+ self.file.write("<line x1=\"%g\" y1=\"%g\" x2=\"%g\" y2=\"%g\"\n"
+ % ( p1[0], p1[1], p2[0], p2[1] ) )
+ self.file.write(" style=\"stroke:rgb("+str(stroke_col[0])+","+str(stroke_col[1])+","+str(stroke_col[2])+");")
+ self.file.write(" stroke-width:"+str(stroke_width)+";\n")
+ self.file.write(" stroke-linecap:round;stroke-linejoin:round")
+ self.file.write(hidden_stroke_style)
+ self.file.write("\"/>\n")
+
+ self.file.write("</g>\n")
+
+
+
+# ---------------------------------------------------------------------
+#
+## Rendering Classes
+#
+# ---------------------------------------------------------------------
+
+class Renderer:
+ """Render a scene viewed from a given camera.
+
+ This class is responsible of the rendering process, transformation and
+ projection of the objects in the scene are invoked by the renderer.
+
+ The rendering is done using the active camera for the current scene.
+ """
+
+ def __init__(self):
+ """Make the rendering process only for the current scene by default.
+
+ We will work on a copy of the scene, be sure that the current scene do
+ not get modified in any way.
+ """
+
+ # Render the current Scene, this should be a READ-ONLY property
+ self._SCENE = Scene.GetCurrent()
+
+ # Use the aspect ratio of the scene rendering context
+ context = self._SCENE.getRenderingContext()
+
+ aspect_ratio = float(context.imageSizeX())/float(context.imageSizeY())
+ self.canvasRatio = (float(context.aspectRatioX())*aspect_ratio,
+ float(context.aspectRatioY())
+ )
+
+ # Render from the currently active camera
+ self.cameraObj = self._SCENE.getCurrentCamera()
+
+ # Get the list of lighting sources
+ obj_lst = self._SCENE.getChildren()
+ self.lights = [ o for o in obj_lst if o.getType() == 'Lamp']
+
+ if len(self.lights) == 0:
+ l = Lamp.New('Lamp')
+ lobj = Object.New('Lamp')
+ lobj.link(l)
+ self.lights.append(lobj)
+
+
+ ##
+ # Public Methods
+ #
+
+ def doRendering(self, outputWriter, animation=False):
+ """Render picture or animation and write it out.
+
+ The parameters are:
+ - a Vector writer object than will be used to output the result.
+ - a flag to tell if we want to render an animation or only the
+ current frame.
+ """
+
+ context = self._SCENE.getRenderingContext()
+ currentFrame = context.currentFrame()
+
+ # Handle the animation case
+ if not animation:
+ startFrame = currentFrame
+ endFrame = startFrame
+ outputWriter.open()
+ else:
+ startFrame = context.startFrame()
+ endFrame = context.endFrame()
+ outputWriter.open(startFrame, endFrame)
+
+ # Do the rendering process frame by frame
+ print "Start Rendering!"
+ for f in range(startFrame, endFrame+1):
+ context.currentFrame(f)
+
+ renderedScene = self.doRenderScene(self._SCENE)
+ outputWriter.printCanvas(renderedScene,
+ doPrintPolygons = PRINT_POLYGONS,
+ doPrintEdges = PRINT_EDGES,
+ showHiddenEdges = SHOW_HIDDEN_EDGES)
+
+ # clear the rendered scene
+ self._SCENE.makeCurrent()
+ Scene.unlink(renderedScene)
+ del renderedScene
+
+ outputWriter.close()
+ print "Done!"
+ context.currentFrame(currentFrame)
+
+
+ def doRenderScene(self, inputScene):
+ """Control the rendering process.
+
+ Here we control the entire rendering process invoking the operation
+ needed to transform and project the 3D scene in two dimensions.
+ """
+
+ # Use some temporary workspace, a full copy of the scene
+ workScene = inputScene.copy(2)
+
+ # Get a projector for this scene.
+ # NOTE: the projector wants object in world coordinates,
+ # so we should apply modelview transformations _before_
+ # projection transformations
+ proj = Projector(self.cameraObj, self.canvasRatio)
+
+
+ # Convert geometric object types to mesh Objects
+ geometricObjTypes = ['Mesh', 'Surf', 'Curve'] # TODO: add the Text type
+ Objects = workScene.getChildren()
+ objList = [ o for o in Objects if o.getType() in geometricObjTypes ]
+ for obj in objList:
+ old_obj = obj
+ obj = self._convertToRawMeshObj(obj)
+ workScene.link(obj)
+ workScene.unlink(old_obj)
+
+
+ # FIXME: does not work!!, Blender segfaults on joins
+ if OPTIMIZE_FOR_SPACE:
+ self._joinMeshObjectsInScene(workScene)
+
+
+ # global processing of the scene
+ self._doClipping()
+
+ self._doSceneDepthSorting(workScene)
+
+ # Per object activities
+ Objects = workScene.getChildren()
+
+ for obj in Objects:
+
+ if obj.getType() not in geometricObjTypes:
+ print "Only geometric Objects supported! - Skipping type:", obj.getType()
+ continue
+
+ print "Rendering: ", obj.getName()
+
+ mesh = obj.data
+
+ self._doModelToWorldCoordinates(mesh, obj.matrix)
+
+ self._doObjectDepthSorting(mesh)
+
+ self._doBackFaceCulling(mesh)
+
+ self._doColorAndLighting(mesh)
+
+ # TODO: 'style' can be a function that determine
+ # if an edge should be showed?
+ self._doEdgesStyle(mesh, style=None)
+
+ self._doProjection(mesh, proj)
+
+ # Update the object data, important! :)
+ mesh.update()
+
+ return workScene
+
+
+ ##
+ # Private Methods
+ #
+
+ # Utility methods
+
+ def _worldPosition(self, obj):
+ """Return the obj position in World coordinates.
+ """
+ return obj.matrix.translationPart()
+
+ def _cameraWorldPosition(self):
+ """Return the camera position in World coordinates.
+
+ This trick is needed when the camera follows a path and then
+ camera.loc does not correspond to the current real position of the
+ camera in the world.
+ """
+ return self._worldPosition(self.cameraObj)
+
+
+ # Faces methods
+
+ def _isFaceVisible(self, face):
+ """Determine if a face of an object is visible from the current camera.
+
+ The view vector is calculated from the camera location and one of the
+ vertices of the face (expressed in World coordinates, after applying
+ modelview transformations).
+
+ After those transformations we determine if a face is visible by
+ computing the angle between the face normal and the view vector, this
+ angle has to be between -90 and 90 degrees for the face to be visible.
+ This corresponds somehow to the dot product between the two, if it
+ results > 0 then the face is visible.
+
+ There is no need to normalize those vectors since we are only interested in
+ the sign of the cross product and not in the product value.
+
+ NOTE: here we assume the face vertices are in WorldCoordinates, so
+ please transform the object _before_ doing the test.
+ """
+
+ normal = Vector(face.no)
+ c = self._cameraWorldPosition()
+
+ # View vector in orthographics projections can be considered simply as the
+ # camera position
+ view_vect = Vector(c)
+ #if self.cameraObj.data.getType() == 1:
+ # view_vect = Vector(c)
+
+ # View vector as in perspective projections
+ # it is the difference between the camera position and one point of
+ # the face, we choose the farthest point.
+ # TODO: make the code more pythonic :)
+ if self.cameraObj.data.getType() == 0:
+ max_len = 0
+ for vect in face:
+ vv = Vector(c) - Vector(vect.co)
+ if vv.length > max_len:
+ max_len = vv.length
+ view_vect = vv
+
+ # if d > 0 the face is visible from the camera
+ d = view_vect * normal
+
+ if d > 0:
+ return True
+ else:
+ return False
+
+
+ # Scene methods
+
+ def _doClipping(self):
+ """Clip object against the View Frustum.
+ """
+ print "TODO: _doClipping()"
+ return
+
+ def _doSceneDepthSorting(self, scene):
+ """Sort objects in the scene.
+
+ The object sorting is done accordingly to the object centers.
+ """
+
+ c = self._cameraWorldPosition()
+
+ Objects = scene.getChildren()
+
+ #Objects.sort(lambda obj1, obj2:
+ # cmp((Vector(obj1.loc) - Vector(c)).length,
+ # (Vector(obj2.loc) - Vector(c)).length
+ # )
+ # )
+
+ Objects.sort(lambda obj1, obj2:
+ cmp((self._worldPosition(obj1) - Vector(c)).length,
+ (self._worldPosition(obj2) - Vector(c)).length
+ )
+ )
+
+ # update the scene
+ for o in Objects:
+ scene.unlink(o)
+ scene.link(o)
+
+
+ def _joinMeshObjectsInScene(self, scene):
+ """Merge all the Mesh Objects in a scene into a single Mesh Object.
+ """
+ bigObj = Object.New('Mesh', 'BigOne')
+ oList = [o for o in scene.getChildren() if o.getType()=='Mesh']
+ print "Before join", oList
+ bigObj.join(oList)
+ print "After join"
+ scene.link(bigObj)
+ for o in oList:
+ scene.unlink(o)
+
+
+ # Per object methods
+
+ def _convertToRawMeshObj(self, object):
+ """Convert geometry based object to a mesh object.
+ """
+ me = Mesh.New('RawMesh_'+object.name)
+ me.getFromObject(object.name)
+
+ newObject = Object.New('Mesh', 'RawMesh_'+object.name)
+ newObject.link(me)
+
+ newObject.setMatrix(object.getMatrix())
+
+ return newObject
+
+ def _doModelToWorldCoordinates(self, mesh, matrix):
+ """Transform object coordinates to world coordinates.
+
+ This step is done simply applying to the object its tranformation
+ matrix and recalculating its normals.
+ """
+ mesh.transform(matrix, True)
+
+ def _doObjectDepthSorting(self, mesh):
+ """Sort faces in an object.
+
+ The faces in the object are sorted following the distance of the
+ vertices from the camera position.
+ """
+ c = self._cameraWorldPosition()
+
+ # hackish sorting of faces
+ mesh.faces.sort(
+ lambda f1, f2:
+ # Sort faces according to the min distance from the camera
+ #cmp(min([(Vector(v.co)-Vector(c)).length for v in f1]),
+ # min([(Vector(v.co)-Vector(c)).length for v in f2])))
+
+ # Sort faces according to the max distance from the camera
+ cmp(max([(Vector(v.co)-Vector(c)).length for v in f1]),
+ max([(Vector(v.co)-Vector(c)).length for v in f2])))