+ This class is responsible of the rendering process, hence transformation
+ and projection of the ojects in the scene are invoked by the renderer.
+
+ The user can optionally provide a specific camera for the rendering, see
+ the #doRendering# method for more informations.
+ """
+
+ def __init__(self):
+ """Make the rendering process only for the current scene by default.
+ """
+
+ # Render the current Scene set as a READ-ONLY property
+ self._SCENE = Scene.GetCurrent()
+
+ # Use the aspect ratio of the scene rendering context
+ context = self._SCENE.getRenderingContext()
+ self.canvasRatio = (context.aspectRatioX(), context.aspectRatioY())
+
+ # Render from the currently active camera
+ self.camera = self._SCENE.getCurrentCamera()
+
+
+ ##
+ # Public Methods
+ #
+
+ def doRendering(self, outputWriter, animation=0):
+ """Render picture or animation and write it out.
+
+ The parameters are:
+ - a Vector writer object than will be used to output the result.
+ - a flag to tell if we want to render an animation or the only
+ current frame.
+ """
+
+ context = self._SCENE.getRenderingContext()
+ currentFrame = context.currentFrame()
+
+ # Handle the animation case
+ if animation == 0:
+ startFrame = currentFrame
+ endFrame = startFrame
+ else:
+ startFrame = context.startFrame()
+ endFrame = context.endFrame()
+
+ # Do the rendering process frame by frame
+ print "Start Rendering!"
+ for f in range(startFrame, endFrame+1):
+ context.currentFrame(f)
+ renderedScene = self.doRenderScene(self._SCENE)
+ outputWriter.printCanvas(renderedScene,
+ doPrintPolygons=False, doPrintEdges=True, showHiddenEdges=True)
+
+ # clear the rendered scene
+ self._SCENE.makeCurrent()
+ Scene.unlink(renderedScene)
+ del renderedScene
+
+ print "Done!"
+ context.currentFrame(currentFrame)
+
+
+
+ def doRenderScene(self, inputScene):
+ """Control the rendering process.
+
+ Here we control the entire rendering process invoking the operation
+ needed to transform and project the 3D scene in two dimensions.
+ """
+
+ # Use some temporary workspace, a full copy of the scene
+ workScene = inputScene.copy(2)
+
+ # Get a projector for this scene.
+ # NOTE: the projector wants object in world coordinates,
+ # so we should apply modelview transformations _before_
+ # projection transformations
+ proj = Projector(self.camera, self.canvasRatio)
+
+ # global processing of the scene
+ self._doDepthSorting(workScene)
+
+ # Per object activities
+ Objects = workScene.getChildren()
+
+ for obj in Objects:
+
+ if (obj.getType() != 'Mesh'):
+ print "Type:", obj.getType(), "\tSorry, only mesh Object supported!"
+ continue
+ #
+
+ self._doModelViewTransformations(obj)
+
+ self._doBackFaceCulling(obj)
+
+ self._doColorAndLighting(obj)
+
+ # 'style' can be a function that determine
+ # if an edge should be showed?
+ self._doEdgesStyle(obj, style=None)
+
+ self._doProjection(obj, proj)
+
+ return workScene
+
+
+ def oldRenderScene(scene):
+
+ # Per object activities
+ Objects = workScene.getChildren()
+
+ for obj in Objects:
+
+ if (obj.getType() != 'Mesh'):
+ print "Type:", obj.getType(), "\tSorry, only mesh Object supported!"
+ continue
+
+ # Get a projector for this object
+ proj = Projector(self.camera, obj, self.canvasSize)
+
+ # Let's store the transformed data
+ transformed_mesh = NMesh.New("flat"+obj.name)
+ transformed_mesh.hasVertexColours(1)
+
+ # process Edges
+ self._doProcessEdges(obj)
+
+ for v in obj.getData().verts:
+ transformed_mesh.verts.append(v)
+ transformed_mesh.edges = self._processEdges(obj.getData().edges)
+ #print transformed_mesh.edges
+
+
+ # Store the materials
+ materials = obj.getData().getMaterials()
+
+ meshfaces = obj.getData().faces
+
+ for face in meshfaces:
+
+ # if the face is visible flatten it on the "picture plane"
+ if self._isFaceVisible(face, obj, cameraObj):
+
+ # Store transformed face
+ newface = NMesh.Face()
+
+ for vert in face:
+
+ p = proj.doProjection(vert.co)
+
+ tmp_vert = NMesh.Vert(p[0], p[1], p[2])
+
+ # Add the vert to the mesh
+ transformed_mesh.verts.append(tmp_vert)
+
+ newface.v.append(tmp_vert)
+
+
+ # Per-face color calculation
+ # code taken mostly from the original vrm script
+ # TODO: understand the code and rewrite it clearly
+ ambient = -150
+
+ fakelight = Object.Get("Lamp").loc
+ if fakelight == None:
+ fakelight = [1.0, 1.0, -0.3]
+
+ norm = Vector(face.no)
+ vektori = (norm[0]*fakelight[0]+norm[1]*fakelight[1]+norm[2]*fakelight[2])
+ vduzine = fabs(sqrt(pow(norm[0],2)+pow(norm[1],2)+pow(norm[2],2))*sqrt(pow(fakelight[0],2)+pow(fakelight[1],2)+pow(fakelight[2],2)))
+ intensity = floor(ambient + 200*acos(vektori/vduzine))/200
+ if intensity < 0:
+ intensity = 0
+
+ if materials:
+ tmp_col = materials[face.mat].getRGBCol()
+ else:
+ tmp_col = [0.5, 0.5, 0.5]
+
+ tmp_col = [ (c>intensity) and int(round((c-intensity)*10)*25.5) for c in tmp_col ]
+
+ vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2])
+ newface.col = [vcol, vcol, vcol, 255]
+
+ transformed_mesh.addFace(newface)
+
+ # at the end of the loop on obj
+
+ transformed_obj = Object.New(obj.getType(), "flat"+obj.name)
+ transformed_obj.link(transformed_mesh)
+ transformed_obj.loc = obj.loc
+ newscene.link(transformed_obj)
+
+
+ return workScene
+
+
+ ##
+ # Private Methods
+ #
+
+ # Faces methods
+
+ def _isFaceVisible(self, face, obj, camObj):
+ """Determine if a face of an object is visible from a given camera.
+
+ The normals need to be transformed, but note that we should apply only the
+ rotation part of the tranformation matrix, since the normals are
+ normalized and they can be intended as starting from the origin.
+
+ The view vector is calculated from the camera location and one of the
+ vertices of the face (expressed in World coordinates, after applying
+ modelview transformations).
+
+ After those transformations we determine if a face is visible by computing
+ the angle between the face normal and the view vector, this angle
+ corresponds somehow to the dot product between the two. If the product
+ results <= 0 then the angle between the two vectors is less that 90
+ degrees and then the face is visible.
+
+ There is no need to normalize those vectors since we are only interested in
+ the sign of the cross product and not in the product value.
+ """
+
+ # The transformation matrix of the object
+ mObj = Matrix(obj.getMatrix())
+ mObj.transpose()
+
+ # The normal after applying the current object rotation
+ #normal = mObj.rotationPart() * Vector(face.no)
+ normal = Vector(face.no)
+
+ # View vector in orthographics projections can be considered simply s the
+ # camera position
+ #view_vect = Vector(camObj.loc)
+
+ # View vector as in perspective projections
+ # it is the dofference between the camera position and
+ # one point of the face, we choose the first point,
+ # but maybe a better choice may be the farthest point from the camera.
+ point = Vector(face[0].co)
+ #point = mObj * point.resize4D()
+ #point.resize3D()
+ view_vect = Vector(camObj.loc) - point
+
+
+ # if d <= 0 the face is visible from the camera
+ d = view_vect * normal
+
+ if d <= 0:
+ return False
+ else:
+ return True