X-Git-Url: https://git.ao2.it/vrm.git/blobdiff_plain/3761107280d20fdc779f71b8a60358c3639a074f..e393259b57de5b2e22bcdeb498839eee817a4432:/vrm.py?ds=sidebyside diff --git a/vrm.py b/vrm.py index bd61a58..aa73b10 100755 --- a/vrm.py +++ b/vrm.py @@ -2,8 +2,16 @@ """ Name: 'VRM' Blender: 241 -Group: 'Export' -Tooltip: 'Vector Rendering Method Export Script 0.3' +Group: 'Render' +Tooltip: 'Vector Rendering Method script' +""" + +__author__ = "Antonio Ospite" +__url__ = ["http://vrm.projects.blender.org"] +__version__ = "0.3" + +__bpydoc__ = """\ + Render the scene and save the result in vector format. """ # --------------------------------------------------------------------- @@ -25,212 +33,266 @@ Tooltip: 'Vector Rendering Method Export Script 0.3' # # --------------------------------------------------------------------- # -# NOTE: I do not know who is the original author of 'vrm'. -# The present code is almost entirely rewritten from scratch, -# but if I have to give credits to anyone, please let me know, -# so I can update the copyright. +# Additional credits: +# Thanks to Emilio Aguirre for S2flender from which I took inspirations :) +# Thanks to Nikola Radovanovic, the author of the original VRM script, +# the code you read here has been rewritten _almost_ entirely +# from scratch but Nikola gave me the idea, so I thank him publicly. # # --------------------------------------------------------------------- +# +# Things TODO for a next release: +# - Switch to the Mesh structure, should be considerably faster +# (partially done, but with Mesh we cannot sort faces, yet) +# - Use a better depth sorting algorithm +# - Review how selections are made (this script uses selection states of +# primitives to represent visibility infos) +# - Implement clipping of primitives and do handle object intersections. +# (for now only clipping for whole objects is supported). +# - Implement Edge Styles (silhouettes, contours, etc.) (partially done). +# - Implement Edge coloring +# - Use multiple lighting sources in color calculation +# - Implement Shading Styles? (for now we use Flat Shading). +# - Use a data structure other than Mesh to represent the 2D image? +# Think to a way to merge adjacent polygons that have the same color. +# Or a way to use paths for silhouettes and contours. +# - Add Vector Writers other that SVG. +# - Consider SMIL for animation handling instead of ECMA Script? # -# Additional credits: -# Thanks to Emilio Aguirre for S2flender from which I took inspirations :) -# Thanks to Anthony C. D'Agostino for the backface.py script +# --------------------------------------------------------------------- +# +# Changelog: +# +# vrm-0.3.py - 2006-05-19 +# * First release after code restucturing. +# Now the script offers a useful set of functionalities +# and it can render animations, too. # # --------------------------------------------------------------------- import Blender -from Blender import Scene, Object, NMesh, Lamp, Camera +from Blender import Scene, Object, Mesh, NMesh, Material, Lamp, Camera from Blender.Mathutils import * from math import * +# Some global settings + +PRINT_POLYGONS = True + +POLYGON_EXPANSION_TRICK = True # Hidden to the user for now + +PRINT_EDGES = False +SHOW_HIDDEN_EDGES = False +EDGE_STYLE = 'silhouette' +EDGES_WIDTH = 0.5 + +RENDER_ANIMATION = False + +OPTIMIZE_FOR_SPACE = True + +OUTPUT_FORMAT = 'SVG' + + + # --------------------------------------------------------------------- # -## Projections classes +## Utility Mesh class # # --------------------------------------------------------------------- +class MeshUtils: -class Projection: - def __init__(self): - print "New projection" - -class PerspectiveProjection(Projection): - def __init___(self): - Projection.__init__(self) - print "Perspective" - - def doProjection(): - print "do a perspective projection!!" - -def Perspective(fovy, aspect, near, far): - top = near * tan(fovy * pi / 360.0) - bottom = -top - left = bottom*aspect - right= top*aspect - x = (2.0 * near) / (right-left) - y = (2.0 * near) / (top-bottom) - a = (right+left) / (right-left) - b = (top+bottom) / (top - bottom) - c = - ((far+near) / (far-near)) - d = - ((2*far*near)/(far-near)) - return Matrix([x,0.0,a,0.0],[0.0,y,b,0.0],[0.0,0.0,c,d],[0.0,0.0,-1.0,0.0]) - -def flatten_new(v, cameraObj, canvasSize, obMesh): - - cam = cameraObj.getInverseMatrix() - cam.transpose() + def getEdgeAdjacentFaces(edge, mesh): + """Get the faces adjacent to a given edge. - # Changing the view mode - cmra = cameraObj.getData() - - #if cmra.type: - # print "Ortho" - #m2 = Ortho(fovy,float(w*ax)/float(h*ay),cmra.clipStart, cmra.clipEnd,17) #cmra.scale) - #else: - # print "Perspective" - - #Create Frustum - #frustum = _Frustum(cam,m2) - - m1 = Matrix() - mP = Matrix() + There can be 0, 1 or more (usually 2) faces adjacent to an edge. + """ + adjface_list = [] + + for f in mesh.faces: + if (edge.v1 in f.v) and (edge.v2 in f.v): + adjface_list.append(f) + + return adjface_list + + def isVisibleEdge(e, mesh): + """Normal edge selection rule. + + An edge is visible if _any_ of its adjacent faces is selected. + Note: if the edge has no adjacent faces we want to show it as well, + useful for "edge only" portion of objects. + """ + + adjacent_faces = MeshUtils.getEdgeAdjacentFaces(e, mesh) + + if len(adjacent_faces) == 0: + return True + + selected_faces = [f for f in adjacent_faces if f.sel] + + if len(selected_faces) != 0: + return True + else: + return False + + def isSilhouetteEdge(e, mesh): + """Silhuette selection rule. + + An edge is a silhuette edge if it is shared by two faces with + different selection status or if it is a boundary edge of a selected + face. + """ + + adjacent_faces = MeshUtils.getEdgeAdjacentFaces(e, mesh) + + if ((len(adjacent_faces) == 1 and adjacent_faces[0].sel == 1) or + (len(adjacent_faces) == 2 and + adjacent_faces[0].sel != adjacent_faces[1].sel) + ): + return True + else: + return False - fovy = atan(0.5/(float(canvasSize[0])/float(canvasSize[1]))/(cmra.lens/32)) - fovy = fovy * 360/pi + getEdgeAdjacentFaces = staticmethod(getEdgeAdjacentFaces) + isVisibleEdge = staticmethod(isVisibleEdge) + isSilhouetteEdge = staticmethod(isSilhouetteEdge) - m2 = Perspective(fovy,float(canvasSize[0])/float(canvasSize[1]),cmra.clipStart, cmra.clipEnd) - m1 = obMesh.matrixWorld #mat - m1.transpose() - mP = cam * m1 - mP = m2 * mP + +# --------------------------------------------------------------------- +# +## Projections classes +# +# --------------------------------------------------------------------- + +class Projector: + """Calculate the projection of an object given the camera. - #Transform the vertices to global coordinates - p = mP*Vector([v.co[0],v.co[1],v.co[2],1.0]) - #tf.append(p) - #p = m1*Vector([v.co[0],v.co[1],v.co[2],1.0]) - #t2.append([p[0],p[1],p[2]]) - - mW = canvasSize[0]/2 - mH = canvasSize[1]/2 + A projector is useful to so some per-object transformation to obtain the + projection of an object given the camera. - if p[3]<=0: - p[0] = int(p[0]*mW)+mW - p[1] = int(p[1]*mH)+mH - else: - p[0] = int((p[0]/p[3])*mW)+mW - p[1] = int((p[1]/p[3])*mH)+mH + The main method is #doProjection# see the method description for the + parameter list. + """ + + def __init__(self, cameraObj, canvasRatio): + """Calculate the projection matrix. + + The projection matrix depends, in this case, on the camera settings. + TAKE CARE: This projector expects vertices in World Coordinates! + """ + + camera = cameraObj.getData() + + aspect = float(canvasRatio[0])/float(canvasRatio[1]) + near = camera.clipStart + far = camera.clipEnd + + scale = float(camera.scale) + + fovy = atan(0.5/aspect/(camera.lens/32)) + fovy = fovy * 360.0/pi - # Mirror and translate along y - p[1] *= -1 - p[1] += canvasSize[1] - - return p + # What projection do we want? + if camera.type: + #mP = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale) + mP = self._calcOrthoMatrix(fovy, aspect, near, far, scale) + else: + mP = self._calcPerspectiveMatrix(fovy, aspect, near, far) + + # View transformation + cam = Matrix(cameraObj.getInverseMatrix()) + cam.transpose() + + mP = mP * cam + self.projectionMatrix = mP + ## + # Public methods + # -# distance from camera Z' -def Distance(PX,PY,PZ): - - dist = sqrt(PX*PX+PY*PY+PZ*PZ) - return dist + def doProjection(self, v): + """Project the point on the view plane. -def RotatePoint(PX,PY,PZ,AngleX,AngleY,AngleZ): - - NewPoint = [] - # Rotate X - NewY = (PY * cos(AngleX))-(PZ * sin(AngleX)) - NewZ = (PZ * cos(AngleX))+(PY * sin(AngleX)) - # Rotate Y - PZ = NewZ - PY = NewY - NewZ = (PZ * cos(AngleY))-(PX * sin(AngleY)) - NewX = (PX * cos(AngleY))+(PZ * sin(AngleY)) - PX = NewX - PZ = NewZ - # Rotate Z - NewX = (PX * cos(AngleZ))-(PY * sin(AngleZ)) - NewY = (PY * cos(AngleZ))+(PX * sin(AngleZ)) - NewPoint.append(NewX) - NewPoint.append(NewY) - NewPoint.append(NewZ) - return NewPoint - -def flatten(vertx, verty, vertz, cameraObj, canvasSize): - - camera = cameraObj.getData() - Lens = camera.getLens() # The Camera lens - - xres = canvasSize[0] # X res for output - yres = canvasSize[1] # Y res for output - ratio = xres/yres - - fov = atan(ratio * 16.0 / Lens) # Get fov stuff - - dist = xres/2*tan(fov) # Calculate dist from pinhole camera to image plane + Given a vertex calculate the projection using the current projection + matrix. + """ + + # Note that we have to work on the vertex using homogeneous coordinates + p = self.projectionMatrix * Vector(v).resize4D() - screenxy=[0,0,vertz] - x=-vertx - y=verty - z=vertz + if p[3]>0: + p[0] = p[0]/p[3] + p[1] = p[1]/p[3] - #---------------------------- - # calculate x'=dist*x/z & y'=dist*x/z - #---------------------------- - screenxy[0]=int(xres/2.0+4*x*dist/z) - screenxy[1]=int(yres/2.0+4*y*dist/z) - return screenxy + # restore the size + p[3] = 1.0 + p.resize3D() -## Backface culling routine -# + return p + + ## + # Private methods + # + + def _calcPerspectiveMatrix(self, fovy, aspect, near, far): + """Return a perspective projection matrix. + """ + + top = near * tan(fovy * pi / 360.0) + bottom = -top + left = bottom*aspect + right= top*aspect + x = (2.0 * near) / (right-left) + y = (2.0 * near) / (top-bottom) + a = (right+left) / (right-left) + b = (top+bottom) / (top - bottom) + c = - ((far+near) / (far-near)) + d = - ((2*far*near)/(far-near)) + + m = Matrix( + [x, 0.0, a, 0.0], + [0.0, y, b, 0.0], + [0.0, 0.0, c, d], + [0.0, 0.0, -1.0, 0.0]) + + return m + + def _calcOrthoMatrix(self, fovy, aspect , near, far, scale): + """Return an orthogonal projection matrix. + """ + + # The 11 in the formula was found emiprically + top = near * tan(fovy * pi / 360.0) * (scale * 11) + bottom = -top + left = bottom * aspect + right= top * aspect + rl = right-left + tb = top-bottom + fn = near-far + tx = -((right+left)/rl) + ty = -((top+bottom)/tb) + tz = ((far+near)/fn) + + m = Matrix( + [2.0/rl, 0.0, 0.0, tx], + [0.0, 2.0/tb, 0.0, ty], + [0.0, 0.0, 2.0/fn, tz], + [0.0, 0.0, 0.0, 1.0]) + + return m -def isFaceVisible(face, obj, cameraObj): - """ - Determine if the face is visible from the current camera. - """ - numvert = len(face) - # backface culling - a = [] - a.append(face[0][0]) - a.append(face[0][1]) - a.append(face[0][2]) - a = RotatePoint(a[0], a[1], a[2], obj.RotX, obj.RotY, obj.RotZ) - a[0] += obj.LocX - cameraObj.LocX - a[1] += obj.LocY - cameraObj.LocY - a[2] += obj.LocZ - cameraObj.LocZ - b = [] - b.append(face[1][0]) - b.append(face[1][1]) - b.append(face[1][2]) - b = RotatePoint(b[0], b[1], b[2], obj.RotX, obj.RotY, obj.RotZ) - b[0] += obj.LocX - cameraObj.LocX - b[1] += obj.LocY - cameraObj.LocY - b[2] += obj.LocZ - cameraObj.LocZ - c = [] - c.append(face[numvert-1][0]) - c.append(face[numvert-1][1]) - c.append(face[numvert-1][2]) - c = RotatePoint(c[0], c[1], c[2], obj.RotX, obj.RotY, obj.RotZ) - c[0] += obj.LocX - cameraObj.LocX - c[1] += obj.LocY - cameraObj.LocY - c[2] += obj.LocZ - cameraObj.LocZ - - norm = [0,0,0] - norm[0] = (b[1] - a[1])*(c[2] - a[2]) - (c[1] - a[1])*(b[2] - a[2]) - norm[1] = -((b[0] - a[0])*(c[2] - a[2]) - (c[0] - a[0])*(b[2] - a[2])) - norm[2] = (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1]) - - d = norm[0]*a[0] + norm[1]*a[1] + norm[2]*a[2] - return (d<0) # --------------------------------------------------------------------- # -## Mesh representation class +## 2D Object representation class # # --------------------------------------------------------------------- # TODO: a class to represent the needed properties of a 2D vector image +# For now just using a [N]Mesh structure. # --------------------------------------------------------------------- @@ -250,103 +312,290 @@ class VectorWriter: Every subclasses of VectorWriter must have at last the following public methods: - - printCanvas(mesh) --- where mesh is as specified before. + - open(self) + - close(self) + - printCanvas(self, scene, + doPrintPolygons=True, doPrintEdges=False, showHiddenEdges=False): """ - def __init__(self, fileName, canvasSize): - """Open the file named #fileName# and set the canvas size.""" + def __init__(self, fileName): + """Set the output file name and other properties""" + + self.outputFileName = fileName + self.file = None - self.file = open(fileName, "w") - print "Outputting to: ", fileName + context = Scene.GetCurrent().getRenderingContext() + self.canvasSize = ( context.imageSizeX(), context.imageSizeY() ) + + self.startFrame = 1 + self.endFrame = 1 + self.animation = False - self.canvasSize = canvasSize - + ## # Public Methods # - def printCanvas(mesh): - return - - - # Private Methods - # - - def _printHeader(): + def open(self, startFrame=1, endFrame=1): + if startFrame != endFrame: + self.startFrame = startFrame + self.endFrame = endFrame + self.animation = True + + self.file = open(self.outputFileName, "w") + print "Outputting to: ", self.outputFileName + return - def _printFooter(): + def close(self): + self.file.close() return + def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, + showHiddenEdges=False): + """This is the interface for the needed printing routine. + """ + return + ## SVG Writer class SVGVectorWriter(VectorWriter): """A concrete class for writing SVG output. - - The class does not support animations, yet. - Sorry. """ - def __init__(self, file, canvasSize): - """Simply call the parent Contructor.""" - VectorWriter.__init__(self, file, canvasSize) + def __init__(self, fileName): + """Simply call the parent Contructor. + """ + VectorWriter.__init__(self, fileName) + ## # Public Methods # - - def printCanvas(self, mesh): - """Convert the mesh representation to SVG.""" + def open(self, startFrame=1, endFrame=1): + """Do some initialization operations. + """ + VectorWriter.open(self, startFrame, endFrame) self._printHeader() + + def close(self): + """Do some finalization operation. + """ + self._printFooter() + + # remember to call the close method of the parent + VectorWriter.close(self) + - for obj in mesh: - for face in obj: - self._printPolygon(face) + def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, + showHiddenEdges=False): + """Convert the scene representation to SVG. + """ + + Objects = scene.getChildren() + + context = scene.getRenderingContext() + framenumber = context.currentFrame() + + if self.animation: + framestyle = "display:none" + else: + framestyle = "display:block" - self._printFooter() + # Assign an id to this group so we can set properties on it using DOM + self.file.write("\n" % + (framenumber, framestyle) ) + + for obj in Objects: + + if(obj.getType() != 'Mesh'): + continue + + self.file.write("\n" % obj.getName()) + + mesh = obj.getData(mesh=1) + + if doPrintPolygons: + self._printPolygons(mesh) + + if doPrintEdges: + self._printEdges(mesh, showHiddenEdges) + + self.file.write("\n") + + self.file.write("\n") + - + ## # Private Methods # + def _calcCanvasCoord(self, v): + """Convert vertex in scene coordinates to canvas coordinates. + """ + + pt = Vector([0, 0, 0]) + + mW = float(self.canvasSize[0])/2.0 + mH = float(self.canvasSize[1])/2.0 + + # rescale to canvas size + pt[0] = v.co[0]*mW + mW + pt[1] = v.co[1]*mH + mH + pt[2] = v.co[2] + + # For now we want (0,0) in the top-left corner of the canvas. + # Mirror and translate along y + pt[1] *= -1 + pt[1] += self.canvasSize[1] + + return pt + def _printHeader(self): """Print SVG header.""" self.file.write("\n") - self.file.write("\n") + self.file.write("\n\n" % self.canvasSize) + if self.animation: + + self.file.write("""\n\n + \n""" % (self.startFrame, self.endFrame, self.startFrame) ) + def _printFooter(self): """Print the SVG footer.""" self.file.write("\n\n") - self.file.close() - def _printPolygon(self, face): - """Print our primitive, finally. + def _printPolygons(self, mesh): + """Print the selected (visible) polygons. + """ + + if len(mesh.faces) == 0: + return + + self.file.write("\n") - There is no color Handling for now, *FIX!* + for face in mesh.faces: + if not face.sel: + continue + + self.file.write("\n") + + self.file.write("\n") + + def _printEdges(self, mesh, showHiddenEdges=False): + """Print the wireframe using mesh edges. """ - intensity = 128 - stroke_width=1 + stroke_width=EDGES_WIDTH + stroke_col = [0, 0, 0] - self.file.write("\n") - for v in face: - if face.index(v)!= 0: - self.file.write(", ") + for e in mesh.edges: - self.file.write(`v[0]` + ", " + `v[1]`) + hidden_stroke_style = "" + + # We consider an edge visible if _both_ its vertices are selected, + # hence an edge is hidden if _any_ of its vertices is deselected. + if e.sel == 0: + if showHiddenEdges == False: + continue + else: + hidden_stroke_style = ";\n stroke-dasharray:3, 3" + + p1 = self._calcCanvasCoord(e.v1) + p2 = self._calcCanvasCoord(e.v2) + + self.file.write("\n") + + self.file.write("\n") - self.file.write("\"\n") - self.file.write("\tstyle=\"fill:rgb("+str(intensity)+","+str(intensity)+","+str(intensity)+");") - self.file.write(" stroke:rgb(0,0,0);") - self.file.write(" stroke-width:"+str(stroke_width)+"\"/>\n") # --------------------------------------------------------------------- @@ -355,147 +604,791 @@ class SVGVectorWriter(VectorWriter): # # --------------------------------------------------------------------- +# A dictionary to collect all the different edge styles and their edge +# selection criteria +edgeSelectionStyles = { + 'normal': MeshUtils.isVisibleEdge, + 'silhouette': MeshUtils.isSilhouetteEdge + } + +# A dictionary to collect the supported output formats +outputWriters = { + 'SVG': SVGVectorWriter, + } + + class Renderer: """Render a scene viewed from a given camera. - This class is responsible of the rendering process, hence transormation - and projection of the ojects in the scene are invoked by the renderer. + This class is responsible of the rendering process, transformation and + projection of the objects in the scene are invoked by the renderer. - The user can optionally provide a specific camera for the rendering, see - the #doRendering# method for more informations. + The rendering is done using the active camera for the current scene. """ def __init__(self): - """Set the canvas size to a defaulr value. - - The only instance attribute here is the canvas size, which can be - queryed to the renderer by other entities. + """Make the rendering process only for the current scene by default. + + We will work on a copy of the scene, be sure that the current scene do + not get modified in any way. """ - self.canvasSize = (0.0, 0.0) + # Render the current Scene, this should be a READ-ONLY property + self._SCENE = Scene.GetCurrent() + + # Use the aspect ratio of the scene rendering context + context = self._SCENE.getRenderingContext() + + aspect_ratio = float(context.imageSizeX())/float(context.imageSizeY()) + self.canvasRatio = (float(context.aspectRatioX())*aspect_ratio, + float(context.aspectRatioY()) + ) + + # Render from the currently active camera + self.cameraObj = self._SCENE.getCurrentCamera() + + # Get the list of lighting sources + obj_lst = self._SCENE.getChildren() + self.lights = [ o for o in obj_lst if o.getType() == 'Lamp'] + + # When there are no lights we use a default lighting source + # that have the same position of the camera + if len(self.lights) == 0: + l = Lamp.New('Lamp') + lobj = Object.New('Lamp') + lobj.loc = self.cameraObj.loc + lobj.link(l) + self.lights.append(lobj) + + ## # Public Methods # - def getCanvasSize(self): - """Return the current canvas size read from Blender rendering context""" - return self.canvasSize + def doRendering(self, outputWriter, animation=False): + """Render picture or animation and write it out. + + The parameters are: + - a Vector writer object that will be used to output the result. + - a flag to tell if we want to render an animation or only the + current frame. + """ - def doRendering(self, scene, cameraObj=None): + context = self._SCENE.getRenderingContext() + currentFrame = context.currentFrame() + + # Handle the animation case + if not animation: + startFrame = currentFrame + endFrame = startFrame + outputWriter.open() + else: + startFrame = context.startFrame() + endFrame = context.endFrame() + outputWriter.open(startFrame, endFrame) + + # Do the rendering process frame by frame + print "Start Rendering!" + for f in range(startFrame, endFrame+1): + context.currentFrame(f) + + renderedScene = self.doRenderScene(self._SCENE) + outputWriter.printCanvas(renderedScene, + doPrintPolygons = PRINT_POLYGONS, + doPrintEdges = PRINT_EDGES, + showHiddenEdges = SHOW_HIDDEN_EDGES) + + # clear the rendered scene + self._SCENE.makeCurrent() + Scene.unlink(renderedScene) + del renderedScene + + outputWriter.close() + print "Done!" + context.currentFrame(currentFrame) + + + def doRenderScene(self, inputScene): """Control the rendering process. Here we control the entire rendering process invoking the operation - needed to transforma project the 3D scene in two dimensions. + needed to transform and project the 3D scene in two dimensions. + """ + + # Use some temporary workspace, a full copy of the scene + workScene = inputScene.copy(2) + + # Get a projector for this scene. + # NOTE: the projector wants object in world coordinates, + # so we should apply modelview transformations _before_ + # projection transformations + proj = Projector(self.cameraObj, self.canvasRatio) + + # global processing of the scene + + self._doConvertGeometricObjToMesh(workScene) + + self._doSceneClipping(workScene) + + + # XXX: Joining objects does not work in batch mode!! + # Do not touch the following if, please :) + + global OPTIMIZE_FOR_SPACE + if Blender.mode == 'background': + print "\nWARNING! Joining objects not supported in background mode!\n" + OPTIMIZE_FOR_SPACE = False + + if OPTIMIZE_FOR_SPACE: + self._joinMeshObjectsInScene(workScene) + + + self._doSceneDepthSorting(workScene) + + # Per object activities + + Objects = workScene.getChildren() + for obj in Objects: + + if obj.getType() != 'Mesh': + print "Only Mesh supported! - Skipping type:", obj.getType() + continue + + print "Rendering: ", obj.getName() - Parameters: - scene --- the Blender Scene to render - cameraObj --- the camera object to use for the viewing processing + mesh = obj.getData() + + self._doModelToWorldCoordinates(mesh, obj.matrix) + + self._doObjectDepthSorting(mesh) + + # We use both Mesh and NMesh because for depth sorting we change + # face order and Mesh class don't let us to do that. + mesh.update() + mesh = obj.getData(mesh=1) + + self._doBackFaceCulling(mesh) + + self._doColorAndLighting(mesh) + + self._doEdgesStyle(mesh, edgeSelectionStyles[EDGE_STYLE]) + + self._doProjection(mesh, proj) + + # Update the object data, important! :) + mesh.update() + + return workScene + + + ## + # Private Methods + # + + # Utility methods + + def _getObjPosition(self, obj): + """Return the obj position in World coordinates. + """ + return obj.matrix.translationPart() + + def _cameraViewDirection(self): + """Get the View Direction form the camera matrix. """ + return Vector(self.cameraObj.matrix[2]).resize3D() + - if cameraObj == None: - cameraObj = scene.getCurrentCamera() + # Faces methods + + def _isFaceVisible(self, face): + """Determine if a face of an object is visible from the current camera. - # TODO: given the camera get the Wold-to-camera transform and the - # projection matrix + The view vector is calculated from the camera location and one of the + vertices of the face (expressed in World coordinates, after applying + modelview transformations). + + After those transformations we determine if a face is visible by + computing the angle between the face normal and the view vector, this + angle has to be between -90 and 90 degrees for the face to be visible. + This corresponds somehow to the dot product between the two, if it + results > 0 then the face is visible. + + There is no need to normalize those vectors since we are only interested in + the sign of the cross product and not in the product value. + + NOTE: here we assume the face vertices are in WorldCoordinates, so + please transform the object _before_ doing the test. + """ + + normal = Vector(face.no) + camPos = self._getObjPosition(self.cameraObj) + view_vect = None + + # View Vector in orthographics projections is the view Direction of + # the camera + if self.cameraObj.data.getType() == 1: + view_vect = self._cameraViewDirection() + + # View vector in perspective projections can be considered as + # the difference between the camera position and one point of + # the face, we choose the farthest point from the camera. + if self.cameraObj.data.getType() == 0: + vv = max( [ ((camPos - Vector(v.co)).length, (camPos - Vector(v.co))) for v in face] ) + view_vect = vv[1] + + # if d > 0 the face is visible from the camera + d = view_vect * normal - context = scene.getRenderingContext() - self.canvasSize = (context.imageSizeX(), context.imageSizeY()) + if d > 0: + return True + else: + return False + + + # Scene methods + + def _doConvertGeometricObjToMesh(self, scene): + """Convert all "geometric" objects to mesh ones. + """ + geometricObjTypes = ['Mesh', 'Surf', 'Curve', 'Text'] + + Objects = scene.getChildren() + objList = [ o for o in Objects if o.getType() in geometricObjTypes ] + for obj in objList: + old_obj = obj + obj = self._convertToRawMeshObj(obj) + scene.link(obj) + scene.unlink(old_obj) + + + # XXX Workaround for Text and Curve which have some normals + # inverted when they are converted to Mesh, REMOVE that when + # blender will fix that!! + if old_obj.getType() in ['Curve', 'Text']: + me = obj.getData(mesh=1) + for f in me.faces: f.sel = 1; + for v in me.verts: v.sel = 1; + me.remDoubles(0) + me.triangleToQuad() + me.recalcNormals() + me.update() + + + def _doSceneClipping(self, scene): + """Clip objects against the View Frustum. + + For now clip away only objects according to their center position. + """ + + cpos = self._getObjPosition(self.cameraObj) + view_vect = self._cameraViewDirection() + + near = self.cameraObj.data.clipStart + far = self.cameraObj.data.clipEnd + + aspect = float(self.canvasRatio[0])/float(self.canvasRatio[1]) + fovy = atan(0.5/aspect/(self.cameraObj.data.lens/32)) + fovy = fovy * 360.0/pi + + Objects = scene.getChildren() + for o in Objects: + if o.getType() != 'Mesh': continue; + + obj_vect = Vector(cpos) - self._getObjPosition(o) + + d = obj_vect*view_vect + theta = AngleBetweenVecs(obj_vect, view_vect) + + # if the object is outside the view frustum, clip it away + if (d < near) or (d > far) or (theta > fovy): + scene.unlink(o) + + def _doSceneDepthSorting(self, scene): + """Sort objects in the scene. + + The object sorting is done accordingly to the object centers. + """ + + c = self._getObjPosition(self.cameraObj) + + by_center_pos = (lambda o1, o2: + (o1.getType() == 'Mesh' and o2.getType() == 'Mesh') and + cmp((self._getObjPosition(o1) - Vector(c)).length, + (self._getObjPosition(o2) - Vector(c)).length) + ) + + # TODO: implement sorting by bounding box, if obj1.bb is inside obj2.bb, + # then ob1 goes farther than obj2, useful when obj2 has holes + by_bbox = None Objects = scene.getChildren() + Objects.sort(by_center_pos) - # A mesh to store the transformed geometrical structure - mesh = [] + # update the scene + for o in Objects: + scene.unlink(o) + scene.link(o) + + def _joinMeshObjectsInScene(self, scene): + """Merge all the Mesh Objects in a scene into a single Mesh Object. + """ + mesh = Mesh.New() + bigObj = Object.New('Mesh', 'BigOne') + bigObj.link(mesh) + + oList = [o for o in scene.getChildren() if o.getType()=='Mesh'] + bigObj.join(oList) + scene.link(bigObj) + for o in oList: + scene.unlink(o) + + scene.update() + + + # Per object methods + + def _convertToRawMeshObj(self, object): + """Convert geometry based object to a mesh object. + """ + me = Mesh.New('RawMesh_'+object.name) + me.getFromObject(object.name) + + newObject = Object.New('Mesh', 'RawMesh_'+object.name) + newObject.link(me) + + # If the object has no materials set a default material + if not me.materials: + me.materials = [Material.New()] + #for f in me.faces: f.mat = 0 + + newObject.setMatrix(object.getMatrix()) + + return newObject + + def _doModelToWorldCoordinates(self, mesh, matrix): + """Transform object coordinates to world coordinates. + + This step is done simply applying to the object its tranformation + matrix and recalculating its normals. + """ + mesh.transform(matrix, True) + + def _doObjectDepthSorting(self, mesh): + """Sort faces in an object. + + The faces in the object are sorted following the distance of the + vertices from the camera position. + """ + c = self._getObjPosition(self.cameraObj) + + # hackish sorting of faces + + # Sort faces according to the max distance from the camera + by_max_vert_dist = (lambda f1, f2: + cmp(max([(Vector(v.co)-Vector(c)).length for v in f1]), + max([(Vector(v.co)-Vector(c)).length for v in f2]))) - for obj in Objects: - - if (obj.getType() != "Mesh"): - print "Type:", obj.getType(), "\tSorry, only mesh Object supported!" - continue + # Sort faces according to the min distance from the camera + by_min_vert_dist = (lambda f1, f2: + cmp(min([(Vector(v.co)-Vector(c)).length for v in f1]), + min([(Vector(v.co)-Vector(c)).length for v in f2]))) + + # Sort faces according to the avg distance from the camera + by_avg_vert_dist = (lambda f1, f2: + cmp(sum([(Vector(v.co)-Vector(c)).length for v in f1])/len(f1), + sum([(Vector(v.co)-Vector(c)).length for v in f2])/len(f2))) - OBJmesh = obj.getData() # Get the mesh data for the object - meshfaces = OBJmesh.faces # The number of faces in the object + mesh.faces.sort(by_max_vert_dist) + mesh.faces.reverse() - transformed_object = [] + def _doBackFaceCulling(self, mesh): + """Simple Backface Culling routine. + + At this level we simply do a visibility test face by face and then + select the vertices belonging to visible faces. + """ + + # Select all vertices, so edges can be displayed even if there are no + # faces + for v in mesh.verts: + v.sel = 1 + + Mesh.Mode(Mesh.SelectModes['FACE']) + # Loop on faces + for f in mesh.faces: + f.sel = 0 + if self._isFaceVisible(f): + f.sel = 1 + + # Is this the correct way to propagate the face selection info to the + # vertices belonging to a face ?? + # TODO: Using the Mesh module this should come for free. Right? + #Mesh.Mode(Mesh.SelectModes['VERTEX']) + #for f in mesh.faces: + # if not f.sel: + # for v in f: v.sel = 0; + + #for f in mesh.faces: + # if f.sel: + # for v in f: v.sel = 1; + + def _doColorAndLighting(self, mesh): + """Apply an Illumination model to the object. + + The Illumination model used is the Phong one, it may be inefficient, + but I'm just learning about rendering and starting from Phong seemed + the most natural way. + """ - for face in meshfaces: + # If the mesh has vertex colors already, use them, + # otherwise turn them on and do some calculations + if mesh.vertexColors: + return + mesh.vertexColors = 1 - # TODO: per face color calculation - # TODO: add/sorting in Z' direction (per face??) + materials = mesh.materials + + # TODO: use multiple lighting sources + light_obj = self.lights[0] + light_pos = self._getObjPosition(light_obj) + light = light_obj.data - # if the face is visible flatten it on the "picture plane" - if isFaceVisible(face, obj, cameraObj): - - # Store transformed face - transformed_face = [] + camPos = self._getObjPosition(self.cameraObj) + + # We do per-face color calculation (FLAT Shading), we can easily turn + # to a per-vertex calculation if we want to implement some shading + # technique. For an example see: + # http://www.miralab.unige.ch/papers/368.pdf + for f in mesh.faces: + if not f.sel: + continue - for vert in face: + mat = None + if materials: + mat = materials[f.mat] - vertxyz = list(vert) - - p1 = flatten_new(vert, cameraObj, self.canvasSize, - obj) - transformed_face.append(p1) - continue + # A new default material + if mat == None: + mat = Material.New('defMat') + + L = Vector(light_pos).normalize() - # rotate camera - vertxyz = RotatePoint(vertxyz[0], vertxyz[1], vertxyz[2], - cameraObj.RotX, cameraObj.RotY, cameraObj.RotZ) - #-cameraObj.RotX, -cameraObj.RotY, -cameraObj.RotZ) + V = (Vector(camPos) - Vector(f.v[0].co)).normalize() + N = Vector(f.no).normalize() - # original setting for translate - vertxyz[0] -= (obj.LocX - cameraObj.LocX) - vertxyz[1] -= (obj.LocY - cameraObj.LocY) - vertxyz[2] -= (obj.LocZ - cameraObj.LocZ) + R = 2 * (N*L) * N - L + # TODO: Attenuation factor (not used for now) + a0 = 1; a1 = 0.0; a2 = 0.0 + d = (Vector(f.v[0].co) - Vector(light_pos)).length + fd = min(1, 1.0/(a0 + a1*d + a2*d*d)) - # rotate object - vertxyz = RotatePoint(vertxyz[0], vertxyz[1], vertxyz[2], obj.RotX, obj.RotY, obj.RotZ) + # Ambient component + Ia = 1.0 + ka = mat.getAmb() * Vector([0.1, 0.1, 0.1]) + Iamb = Ia * ka + + # Diffuse component (add light.col for kd) + kd = mat.getRef() * Vector(mat.getRGBCol()) + Ip = light.getEnergy() + Idiff = Ip * kd * (N*L) + + # Specular component + ks = mat.getSpec() * Vector(mat.getSpecCol()) + ns = mat.getHardness() + Ispec = Ip * ks * pow((V * R), ns) + # Emissive component + ki = Vector([mat.getEmit()]*3) + I = ki + Iamb + Idiff + Ispec - p1 = flatten(vertxyz[0], vertxyz[1], vertxyz[2], - cameraObj, self.canvasSize) + # Set Alpha component + I = list(I) + I.append(mat.getAlpha()) - transformed_face.append(p1) - - # just some fake lighting... + # Clamp I values between 0 and 1 + I = [ min(c, 1) for c in I] + I = [ max(0, c) for c in I] + tmp_col = [ int(c * 255.0) for c in I] - transformed_object.append(transformed_face) + for c in f.col: + c.r = tmp_col[0] + c.g = tmp_col[1] + c.b = tmp_col[2] + c.a = tmp_col[3] - # at the end of the loop on obj - mesh.append(transformed_object) - return mesh + def _doEdgesStyle(self, mesh, edgestyleSelect): + """Process Mesh Edges accroding to a given selection style. + Examples of algorithms: - # Private Methods - # + Contours: + given an edge if its adjacent faces have the same normal (that is + they are complanar), than deselect it. - def _removehiddenFaces(obj): - return + Silhouettes: + given an edge if one its adjacent faces is frontfacing and the + other is backfacing, than select it, else deselect. + """ + + Mesh.Mode(Mesh.SelectModes['EDGE']) + + for e in mesh.edges: + + if edgestyleSelect(e, mesh): + e.sel = 1 + else: + e.sel = 0 + + def _doProjection(self, mesh, projector): + """Calculate the Projection for the object. + """ + # TODO: maybe using the object.transform() can be faster? + + for v in mesh.verts: + p = projector.doProjection(v.co) + v.co[0] = p[0] + v.co[1] = p[1] + v.co[2] = p[2] - def _testClipping(face): - return # --------------------------------------------------------------------- # -## Main Program +## GUI Class and Main Program # # --------------------------------------------------------------------- -scene = Scene.GetCurrent() -renderer = Renderer() +from Blender import BGL, Draw +from Blender.BGL import * -projectedMesh = renderer.doRendering(scene) -canvasSize = renderer.getCanvasSize() +class GUI: + + def _init(): + + # Output Format menu + default_value = outputWriters.keys().index(OUTPUT_FORMAT)+1 + GUI.outFormatMenu = Draw.Create(default_value) + GUI.evtOutFormatMenu = 0 + + # Animation toggle button + GUI.animToggle = Draw.Create(RENDER_ANIMATION) + GUI.evtAnimToggle = 1 + + # Join Objects toggle button + GUI.joinObjsToggle = Draw.Create(OPTIMIZE_FOR_SPACE) + GUI.evtJoinObjsToggle = 2 + + # Render filled polygons + GUI.polygonsToggle = Draw.Create(PRINT_POLYGONS) + GUI.evtPolygonsToggle = 3 + # We hide the POLYGON_EXPANSION_TRICK, for now + + # Render polygon edges + GUI.showEdgesToggle = Draw.Create(PRINT_EDGES) + GUI.evtShowEdgesToggle = 4 + + # Render hidden edges + GUI.showHiddenEdgesToggle = Draw.Create(SHOW_HIDDEN_EDGES) + GUI.evtShowHiddenEdgesToggle = 5 + + # Edge Style menu + default_value = edgeSelectionStyles.keys().index(EDGE_STYLE)+1 + GUI.edgeStyleMenu = Draw.Create(default_value) + GUI.evtEdgeStyleMenu = 6 + + # Edge Width slider + GUI.edgeWidthSlider = Draw.Create(EDGES_WIDTH) + GUI.evtEdgeWidthSlider = 7 + + # Render Button + GUI.evtRenderButton = 8 + + # Exit Button + GUI.evtExitButton = 9 + + def draw(): + + # initialize static members + GUI._init() + + glClear(GL_COLOR_BUFFER_BIT) + glColor3f(0.0, 0.0, 0.0) + glRasterPos2i(10, 350) + Draw.Text("VRM: Vector Rendering Method script.") + glRasterPos2i(10, 335) + Draw.Text("Press Q or ESC to quit.") + + # Build the output format menu + glRasterPos2i(10, 310) + Draw.Text("Select the output Format:") + outMenuStruct = "Output Format %t" + for t in outputWriters.keys(): + outMenuStruct = outMenuStruct + "|%s" % t + GUI.outFormatMenu = Draw.Menu(outMenuStruct, GUI.evtOutFormatMenu, + 10, 285, 160, 18, GUI.outFormatMenu.val, "Choose the Output Format") + + # Animation toggle + GUI.animToggle = Draw.Toggle("Animation", GUI.evtAnimToggle, + 10, 260, 160, 18, GUI.animToggle.val, + "Toggle rendering of animations") + + # Join Objects toggle + GUI.joinObjsToggle = Draw.Toggle("Join objects", GUI.evtJoinObjsToggle, + 10, 235, 160, 18, GUI.joinObjsToggle.val, + "Join objects in the rendered file") + + # Render Button + Draw.Button("Render", GUI.evtRenderButton, 10, 210-25, 75, 25+18, + "Start Rendering") + Draw.Button("Exit", GUI.evtExitButton, 95, 210-25, 75, 25+18, "Exit!") + + # Rendering Styles + glRasterPos2i(200, 310) + Draw.Text("Rendering Style:") + + # Render Polygons + GUI.polygonsToggle = Draw.Toggle("Filled Polygons", GUI.evtPolygonsToggle, + 200, 285, 160, 18, GUI.polygonsToggle.val, + "Render filled polygons") + + # Render Edges + GUI.showEdgesToggle = Draw.Toggle("Show Edges", GUI.evtShowEdgesToggle, + 200, 260, 160, 18, GUI.showEdgesToggle.val, + "Render polygon edges") + + if GUI.showEdgesToggle.val == 1: + + # Edge Style + edgeStyleMenuStruct = "Edge Style %t" + for t in edgeSelectionStyles.keys(): + edgeStyleMenuStruct = edgeStyleMenuStruct + "|%s" % t + GUI.edgeStyleMenu = Draw.Menu(edgeStyleMenuStruct, GUI.evtEdgeStyleMenu, + 200, 235, 160, 18, GUI.edgeStyleMenu.val, + "Choose the edge style") + + # Edge size + GUI.edgeWidthSlider = Draw.Slider("Width: ", GUI.evtEdgeWidthSlider, + 200, 210, 160, 18, GUI.edgeWidthSlider.val, + 0.0, 10.0, 0, "Change Edge Width") + + # Show Hidden Edges + GUI.showHiddenEdgesToggle = Draw.Toggle("Show Hidden Edges", + GUI.evtShowHiddenEdgesToggle, + 200, 185, 160, 18, GUI.showHiddenEdgesToggle.val, + "Render hidden edges as dashed lines") + + glRasterPos2i(10, 160) + Draw.Text("Antonio Ospite (c) 2006") + + def event(evt, val): + + if evt == Draw.ESCKEY or evt == Draw.QKEY: + Draw.Exit() + else: + return + + Draw.Redraw(1) + + def button_event(evt): + global PRINT_POLYGONS + global POLYGON_EXPANSION_TRICK + global PRINT_EDGES + global SHOW_HIDDEN_EDGES + global EDGE_STYLE + global EDGES_WIDTH + global RENDER_ANIMATION + global OPTIMIZE_FOR_SPACE + global OUTPUT_FORMAT + + if evt == GUI.evtExitButton: + Draw.Exit() + elif evt == GUI.evtOutFormatMenu: + i = GUI.outFormatMenu.val - 1 + OUTPUT_FORMAT = outputWriters.keys()[i] + elif evt == GUI.evtAnimToggle: + RENDER_ANIMATION = bool(GUI.animToggle.val) + elif evt == GUI.evtJoinObjsToggle: + OPTIMIZE_FOR_SPACE = bool(GUI.joinObjsToggle.val) + elif evt == GUI.evtPolygonsToggle: + PRINT_POLYGONS = bool(GUI.polygonsToggle.val) + elif evt == GUI.evtShowEdgesToggle: + PRINT_EDGES = bool(GUI.showEdgesToggle.val) + elif evt == GUI.evtShowHiddenEdgesToggle: + SHOW_HIDDEN_EDGES = bool(GUI.showHiddenEdgesToggle.val) + elif evt == GUI.evtEdgeStyleMenu: + i = GUI.edgeStyleMenu.val - 1 + EDGE_STYLE = edgeSelectionStyles.keys()[i] + elif evt == GUI.evtEdgeWidthSlider: + EDGES_WIDTH = float(GUI.edgeWidthSlider.val) + elif evt == GUI.evtRenderButton: + label = "Save %s" % OUTPUT_FORMAT + # Show the File Selector + global outputfile + Blender.Window.FileSelector(vectorize, label, outputfile) + + else: + print "Event: %d not handled!" % evt + + if evt: + Draw.Redraw(1) + #GUI.conf_debug() + + def conf_debug(): + print + print "PRINT_POLYGONS:", PRINT_POLYGONS + print "POLYGON_EXPANSION_TRICK:", POLYGON_EXPANSION_TRICK + print "PRINT_EDGES:", PRINT_EDGES + print "SHOW_HIDDEN_EDGES:", SHOW_HIDDEN_EDGES + print "EDGE_STYLE:", EDGE_STYLE + print "EDGES_WIDTH:", EDGES_WIDTH + print "RENDER_ANIMATION:", RENDER_ANIMATION + print "OPTIMIZE_FOR_SPACE:", OPTIMIZE_FOR_SPACE + print "OUTPUT_FORMAT:", OUTPUT_FORMAT + + _init = staticmethod(_init) + draw = staticmethod(draw) + event = staticmethod(event) + button_event = staticmethod(button_event) + conf_debug = staticmethod(conf_debug) + +# A wrapper function for the vectorizing process +def vectorize(filename): + """The vectorizing process is as follows: + + - Instanciate the writer and the renderer + - Render! + """ + + if filename == "": + print "\nERROR: invalid file name!" + return + + from Blender import Window + editmode = Window.EditMode() + if editmode: Window.EditMode(0) + + actualWriter = outputWriters[OUTPUT_FORMAT] + writer = actualWriter(filename) + + renderer = Renderer() + renderer.doRendering(writer, RENDER_ANIMATION) -# hackish sorting of faces according to the max z value of a vertex -for o in projectedMesh: - o.sort(lambda f1, f2: - cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2))) - o.reverse() + if editmode: Window.EditMode(1) -writer = SVGVectorWriter("proba.svg", canvasSize) -writer.printCanvas(projectedMesh) + +# Here the main +if __name__ == "__main__": + + outputfile = "" + basename = Blender.sys.basename(Blender.Get('filename')) + if basename != "": + outputfile = Blender.sys.splitext(basename)[0] + "." + str(OUTPUT_FORMAT).lower() + + if Blender.mode == 'background': + vectorize(outputfile) + else: + Draw.Register(GUI.draw, GUI.event, GUI.button_event)