"""
__author__ = "Antonio Ospite"
-__url__ = ["http://vrm.projects.blender.org"]
+__url__ = ["http://projects.blender.org/projects/vrm"]
__version__ = "0.3"
__bpydoc__ = """\
# ---------------------------------------------------------------------
#
# Things TODO for a next release:
-# - Switch to the Mesh structure, should be considerably faster
-# (partially done, but with Mesh we cannot sort faces, yet)
+# - Use multiple lighting sources in color calculation,
+# (this is part of the "shading refactor") and use light color!
+# - FIX the issue with negative scales in object tranformations!
# - Use a better depth sorting algorithm
+# - Implement clipping of primitives and do handle object intersections.
+# (for now only clipping away whole objects is supported).
# - Review how selections are made (this script uses selection states of
# primitives to represent visibility infos)
-# - Implement clipping of primitives and do handle object intersections.
-# (for now only clipping for whole objects is supported).
-# - Implement Edge Styles (silhouettes, contours, etc.) (partially done).
-# - Use multiple lighting sources in color calculation
-# - Implement Shading Styles? (for now we use Flat Shading).
# - Use a data structure other than Mesh to represent the 2D image?
-# Think to a way to merge adjacent polygons that have the same color.
+# Think to a way to merge (adjacent) polygons that have the same color.
# Or a way to use paths for silhouettes and contours.
-# - Add Vector Writers other that SVG.
# - Consider SMIL for animation handling instead of ECMA Script? (Firefox do
# not support SMIL for animations)
-# - FIX the issue with negative scales in object tranformations!
+# - Switch to the Mesh structure, should be considerably faster
+# (partially done, but with Mesh we cannot sort faces, yet)
+# - Implement Edge Styles (silhouettes, contours, etc.) (partially done).
+# - Implement Shading Styles? (for now we use Flat Shading) (partially done).
+# - Add Vector Writers other than SVG.
#
# ---------------------------------------------------------------------
#
fovy = fovy * 360.0/pi
# What projection do we want?
- if camera.type:
- #mP = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale)
- mP = self._calcOrthoMatrix(fovy, aspect, near, far, scale)
- else:
+ if camera.type == 0:
mP = self._calcPerspectiveMatrix(fovy, aspect, near, far)
+ elif camera.type == 1:
+ mP = self._calcOrthoMatrix(fovy, aspect, near, far, scale)
# View transformation
cam = Matrix(cameraObj.getInverseMatrix())
# Note that we have to work on the vertex using homogeneous coordinates
p = self.projectionMatrix * Vector(v).resize4D()
- if p[3]>0:
+ # Perspective division
+ if p[3] != 0:
p[0] = p[0]/p[3]
p[1] = p[1]/p[3]
+ p[2] = p[2]/p[3]
# restore the size
p[3] = 1.0
if not face.sel:
continue
- self.file.write("<polygon points=\"")
+ self.file.write("<path d=\"")
+
+ p = self._calcCanvasCoord(face.verts[0])
+ self.file.write("M %g,%g L " % (p[0], p[1]))
- for v in face:
+ for v in face.verts[1:]:
p = self._calcCanvasCoord(v)
self.file.write("%g,%g " % (p[0], p[1]))
self.file.write("\tstyle=\"fill:" + str_col + ";")
self.file.write(opacity_string)
if config.polygons['EXPANSION_TRICK']:
- self.file.write(" stroke:" + str_col + ";")
self.file.write(" stroke-width:" + str(stroke_width) + ";\n")
self.file.write(" stroke-linecap:round;stroke-linejoin:round")
self.file.write("\"/>\n")
# Use some temporary workspace, a full copy of the scene
inputScene = self._SCENE.copy(2)
+ # And Set our camera accordingly
+ self.cameraObj = inputScene.getCurrentCamera()
try:
renderedScene = self.doRenderScene(inputScene)
# clear the rendered scene
self._SCENE.makeCurrent()
- Scene.unlink(renderedScene)
- del renderedScene
+ #Scene.unlink(renderedScene)
+ #del renderedScene
outputWriter.close()
print "Done!"
# global processing of the scene
- self._doConvertGeometricObjToMesh(workScene)
+ self._doSceneClipping(workScene)
- #self._doSceneClipping(workScene)
+ self._doConvertGeometricObjToMesh(workScene)
if config.output['JOIN_OBJECTS']:
self._joinMeshObjectsInScene(workScene)
Objects = workScene.getChildren()
for obj in Objects:
-
if obj.getType() != 'Mesh':
print "Only Mesh supported! - Skipping type:", obj.getType()
continue
mesh = obj.getData(mesh=1)
- self._doModelToWorldCoordinates(mesh, obj.matrix)
+ self._doModelingTransformation(mesh, obj.matrix)
self._doBackFaceCulling(mesh)
-
- self._doObjectDepthSorting(mesh)
-
- self._doColorAndLighting(mesh)
- self._doEdgesStyle(mesh, edgeStyles[config.edges['STYLE']])
+ self._doPerVertexLighting(mesh)
+ # Do "projection" now so we perform further processing
+ # in Normalized View Coordinates
self._doProjection(mesh, self.proj)
+
+ self._doViewFrustumClipping(mesh)
+
+ self._doMeshDepthSorting(mesh)
+
+ self._doEdgesStyle(mesh, edgeStyles[config.edges['STYLE']])
+
# Update the object data, important! :)
mesh.update()
"""
return obj.matrix.translationPart()
- def _cameraViewDirection(self):
+ def _cameraViewVector(self):
"""Get the View Direction form the camera matrix.
"""
return Vector(self.cameraObj.matrix[2]).resize3D()
# View Vector in orthographics projections is the view Direction of
# the camera
if self.cameraObj.data.getType() == 1:
- view_vect = self._cameraViewDirection()
+ view_vect = self._cameraViewVector()
# View vector in perspective projections can be considered as
# the difference between the camera position and one point of
vv = max( [ ((camPos - Vector(v.co)).length, (camPos - Vector(v.co))) for v in face] )
view_vect = vv[1]
+
# if d > 0 the face is visible from the camera
d = view_vect * normal
# Scene methods
- def _doConvertGeometricObjToMesh(self, scene):
- """Convert all "geometric" objects to mesh ones.
- """
- geometricObjTypes = ['Mesh', 'Surf', 'Curve', 'Text']
-
- Objects = scene.getChildren()
- objList = [ o for o in Objects if o.getType() in geometricObjTypes ]
- for obj in objList:
- old_obj = obj
- obj = self._convertToRawMeshObj(obj)
- scene.link(obj)
- scene.unlink(old_obj)
-
-
- # XXX Workaround for Text and Curve which have some normals
- # inverted when they are converted to Mesh, REMOVE that when
- # blender will fix that!!
- if old_obj.getType() in ['Curve', 'Text']:
- me = obj.getData(mesh=1)
- for f in me.faces: f.sel = 1;
- for v in me.verts: v.sel = 1;
- me.remDoubles(0)
- me.triangleToQuad()
- me.recalcNormals()
- me.update()
-
def _doSceneClipping(self, scene):
- """Clip objects against the View Frustum.
+ """Clip whole objects against the View Frustum.
For now clip away only objects according to their center position.
"""
cpos = self._getObjPosition(self.cameraObj)
- view_vect = self._cameraViewDirection()
+ view_vect = self._cameraViewVector()
near = self.cameraObj.data.clipStart
far = self.cameraObj.data.clipEnd
if (d < near) or (d > far) or (theta > fovy):
scene.unlink(o)
+ def _doConvertGeometricObjToMesh(self, scene):
+ """Convert all "geometric" objects to mesh ones.
+ """
+ geometricObjTypes = ['Mesh', 'Surf', 'Curve', 'Text']
+
+ Objects = scene.getChildren()
+ objList = [ o for o in Objects if o.getType() in geometricObjTypes ]
+ for obj in objList:
+ old_obj = obj
+ obj = self._convertToRawMeshObj(obj)
+ scene.link(obj)
+ scene.unlink(old_obj)
+
+
+ # XXX Workaround for Text and Curve which have some normals
+ # inverted when they are converted to Mesh, REMOVE that when
+ # blender will fix that!!
+ if old_obj.getType() in ['Curve', 'Text']:
+ me = obj.getData(mesh=1)
+ for f in me.faces: f.sel = 1;
+ for v in me.verts: v.sel = 1;
+ me.remDoubles(0)
+ me.triangleToQuad()
+ me.recalcNormals()
+ me.update()
+
+
def _doSceneDepthSorting(self, scene):
"""Sort objects in the scene.
"""Merge all the Mesh Objects in a scene into a single Mesh Object.
"""
- if Blender.mode == 'background':
- print "\nWARNING! Joining objects not supported in background mode!\n"
- return
-
oList = [o for o in scene.getChildren() if o.getType()=='Mesh']
# FIXME: Object.join() do not work if the list contains 1 object
try:
bigObj.join(oList)
except RuntimeError:
- print "Can't Join Objects"
+ print "\nCan't Join Objects\n"
scene.unlink(bigObj)
return
except TypeError:
return newObject
- def _doModelToWorldCoordinates(self, mesh, matrix):
+ def _doModelingTransformation(self, mesh, matrix):
"""Transform object coordinates to world coordinates.
This step is done simply applying to the object its tranformation
mesh.transform(matrix, True)
- def _doObjectDepthSorting(self, mesh):
- """Sort faces in an object.
-
- The faces in the object are sorted following the distance of the
- vertices from the camera position.
- """
- if len(mesh.faces) == 0:
- return
-
- c = self._getObjPosition(self.cameraObj)
-
- # hackish sorting of faces
-
- # Sort faces according to the max distance from the camera
- by_max_vert_dist = (lambda f1, f2:
- cmp(max([(Vector(v.co)-Vector(c)).length for v in f1]),
- max([(Vector(v.co)-Vector(c)).length for v in f2])))
-
- # Sort faces according to the min distance from the camera
- by_min_vert_dist = (lambda f1, f2:
- cmp(min([(Vector(v.co)-Vector(c)).length for v in f1]),
- min([(Vector(v.co)-Vector(c)).length for v in f2])))
-
- # Sort faces according to the avg distance from the camera
- by_avg_vert_dist = (lambda f1, f2:
- cmp(sum([(Vector(v.co)-Vector(c)).length for v in f1])/len(f1),
- sum([(Vector(v.co)-Vector(c)).length for v in f2])/len(f2)))
-
-
- # FIXME: using NMesh to sort faces. We should avoid that!
- nmesh = NMesh.GetRaw(mesh.name)
- nmesh.faces.sort(by_max_vert_dist)
- nmesh.faces.reverse()
-
- # Get visible faces
- #vf = nmesh.faces
- #while len(vf)>1:
- # p1 = vf[0]
- # insideList =
-
-
- mesh.faces.delete(1, range(0, len(mesh.faces)))
-
- for i,f in enumerate(nmesh.faces):
- fv = [v.index for v in f.v]
- mesh.faces.extend(fv)
- mesh.faces[i].mat = f.mat
- mesh.faces[i].sel = f.sel
-
-
def _doBackFaceCulling(self, mesh):
"""Simple Backface Culling routine.
if self._isFaceVisible(f):
f.sel = 1
- def _doColorAndLighting(self, mesh):
+ def _doPerVertexLighting(self, mesh):
"""Apply an Illumination ans shading model to the object.
The model used is the Phong one, it may be inefficient,
light = light_obj.data
camPos = self._getObjPosition(self.cameraObj)
-
+
# We do per-face color calculation (FLAT Shading), we can easily turn
# to a per-vertex calculation if we want to implement some shading
# technique. For an example see:
Ip = light.getEnergy()
if config.polygons['SHADING'] == 'FLAT':
- Idiff = Ip * kd * (N*L)
+ Idiff = Ip * kd * max(0, (N*L))
elif config.polygons['SHADING'] == 'TOON':
Idiff = Ip * kd * MeshUtils.toonShading(N*L)
# Specular component
ks = mat.getSpec() * Vector(mat.getSpecCol())
ns = mat.getHardness()
- Ispec = Ip * ks * pow((V*R), ns)
+ Ispec = Ip * ks * pow(max(0, (V*R)), ns)
# Emissive component
ki = Vector([mat.getEmit()]*3)
c.b = tmp_col[2]
c.a = tmp_col[3]
+ def _doProjection(self, mesh, projector):
+ """Apply Viewing and Projection tranformations.
+ """
+
+ for v in mesh.verts:
+ p = projector.doProjection(v.co)
+ v.co[0] = p[0]
+ v.co[1] = p[1]
+ v.co[2] = p[2]
+
+ # We could reeset Camera matrix, since now
+ # we are in Normalized Viewing Coordinates,
+ # but doung that would affect World Coordinate
+ # processing for other objects
+
+ #self.cameraObj.data.type = 1
+ #self.cameraObj.data.scale = 2.0
+ #m = Matrix().identity()
+ #self.cameraObj.setMatrix(m)
+
+ def _doViewFrustumClipping(self, mesh):
+ """Clip faces against the View Frustum.
+ """
+
+ def test_extensions(self, f1, f2):
+ for v1, v2 in [ (v1, v2) for v1 in f1 for v2 in f2 ]:
+ pass
+
+ def depth_sort(self, faces):
+ return
+
+
+ def _doMeshDepthSorting(self, mesh):
+ """Sort faces in an object.
+
+ The faces in the object are sorted following the distance of the
+ vertices from the camera position.
+ """
+ if len(mesh.faces) == 0:
+ return
+
+ #c = self._getObjPosition(self.cameraObj)
+
+ # In NVC
+ c = [0, 0, 1]
+
+ # hackish sorting of faces
+
+ # Sort faces according to the max distance from the camera
+ by_max_vert_dist = (lambda f1, f2:
+ cmp(max([(Vector(v.co)-Vector(c)).length for v in f2]),
+ max([(Vector(v.co)-Vector(c)).length for v in f1])))
+
+ # Sort faces according to the min distance from the camera
+ by_min_vert_dist = (lambda f1, f2:
+ cmp(min([(Vector(v.co)-Vector(c)).length for v in f1]),
+ min([(Vector(v.co)-Vector(c)).length for v in f2])))
+
+ # Sort faces according to the avg distance from the camera
+ by_avg_vert_dist = (lambda f1, f2:
+ cmp(sum([(Vector(v.co)-Vector(c)).length for v in f1])/len(f1),
+ sum([(Vector(v.co)-Vector(c)).length for v in f2])/len(f2)))
+
+
+ # FIXME: using NMesh to sort faces. We should avoid that!
+ nmesh = NMesh.GetRaw(mesh.name)
+ nmesh.faces.sort(by_max_vert_dist)
+ #nmesh.faces.reverse()
+
+ # Depth sort tests
+
+ self.depth_sort(nmesh.faces)
+
+
+ mesh.faces.delete(1, range(0, len(mesh.faces)))
+
+ for i,f in enumerate(nmesh.faces):
+ fv = [v.index for v in f.v]
+ mesh.faces.extend(fv)
+ mesh.faces[i].mat = f.mat
+ mesh.faces[i].sel = f.sel
+ for i,c in enumerate(mesh.faces[i].col):
+ c.r = f.col[i].r
+ c.g = f.col[i].g
+ c.b = f.col[i].b
+ c.a = f.col[i].a
+
def _doEdgesStyle(self, mesh, edgestyleSelect):
"""Process Mesh Edges accroding to a given selection style.
if edgestyleSelect(e, mesh):
e.sel = 1
- def _doProjection(self, mesh, projector):
- """Calculate the Projection for the object.
- """
- # TODO: maybe using the object.transform() can be faster?
-
- for v in mesh.verts:
- p = projector.doProjection(v.co)
- v.co[0] = p[0]
- v.co[1] = p[1]
- v.co[2] = p[2]
-
# ---------------------------------------------------------------------
if evt:
Draw.Redraw(1)
- GUI.conf_debug()
+ #GUI.conf_debug()
def conf_debug():
from pprint import pprint