X-Git-Url: https://git.ao2.it/vrm.git/blobdiff_plain/ce9591fe89bfd93523fedeb453342561d7054dab..08e34a873729c718a510ec642d36eebaef6f4ee7:/vrm.py diff --git a/vrm.py b/vrm.py index bab02a8..d85b085 100755 --- a/vrm.py +++ b/vrm.py @@ -34,7 +34,7 @@ Tooltip: 'Vector Rendering Method Export Script 0.3' # # Additional credits: # Thanks to Emilio Aguirre for S2flender from which I took inspirations :) -# Thanks to Anthony C. D'Agostino for the backface.py script +# Thanks to Anthony C. D'Agostino for the original backface.py script # # --------------------------------------------------------------------- @@ -84,14 +84,12 @@ class Projector: else: m2 = self._calcPerspectiveMatrix(fovy, aspect, near, far) - m1 = Matrix() - mP = Matrix() # View transformation - cam = cameraObj.getInverseMatrix() + cam = Matrix(cameraObj.getInverseMatrix()) cam.transpose() - m1 = obMesh.getMatrix() + m1 = Matrix(obMesh.getMatrix()) m1.transpose() mP = cam * m1 @@ -117,11 +115,11 @@ class Projector: mH = self.size[1]/2 if p[3]<=0: - p[0] = int(p[0]*mW)+mW - p[1] = int(p[1]*mH)+mH + p[0] = round(p[0]*mW)+mW + p[1] = round(p[1]*mH)+mH else: - p[0] = int((p[0]/p[3])*mW)+mW - p[1] = int((p[1]/p[3])*mH)+mH + p[0] = round((p[0]/p[3])*mW)+mW + p[1] = round((p[1]/p[3])*mH)+mH # For now we want (0,0) in the top-left corner of the canvas # Mirror and translate along y @@ -181,7 +179,7 @@ class Projector: # --------------------------------------------------------------------- # -## Mesh representation class +## Object representation class # # --------------------------------------------------------------------- @@ -259,12 +257,15 @@ class SVGVectorWriter(VectorWriter): self._printHeader() - for obj in scene: + Objects = scene.getChildren() + for obj in Objects: self.file.write("\n") - for face in obj.faces: + for face in obj.getData().faces: self._printPolygon(face) + self._printWireframe(obj.getData()) + self.file.write("\n") self._printFooter() @@ -277,7 +278,9 @@ class SVGVectorWriter(VectorWriter): """Print SVG header.""" self.file.write("\n") - self.file.write("\n") + self.file.write("\n\n" % self.canvasSize) @@ -288,32 +291,60 @@ class SVGVectorWriter(VectorWriter): self.file.write("\n\n") self.file.close() - def _printPolygon(self, face): - """Print our primitive, finally. - - There is no color Handling for now, *FIX!* + def _printWireframe(self, mesh): + """Print the wireframe using mesh edges... is this the correct way? """ - stroke_width=1 + print mesh.edges + print + print mesh.verts - self.file.write("\n") - i = 0 - for v in face: - if i != 0: - self.file.write(", ") + for e in mesh.edges: + self.file.write("\n") - i+=1 + self.file.write("\n") - self.file.write("%g, %g" % (v[0], v[1])) - color = [ int(c*255) for c in face.col] + def _printPolygon(self, face): + """Print our primitive, finally. + """ + + wireframe = False + + stroke_width=0.5 + + self.file.write("\n") + self.file.write(" stroke-linecap:round;stroke-linejoin:round") + self.file.write("\"/>\n") # --------------------------------------------------------------------- @@ -390,7 +421,7 @@ class Renderer: Objects = scene.getChildren() # A structure to store the transformed scene - newscene = [] + newscene = Scene.New("flat"+scene.name) for obj in Objects: @@ -402,8 +433,16 @@ class Renderer: proj = Projector(cameraObj, obj, self.canvasSize) # Let's store the transformed data - transformed_mesh = NMesh.New(obj.name) + transformed_mesh = NMesh.New("flat"+obj.name) + transformed_mesh.hasVertexColours(1) + # process Edges + for v in obj.getData().verts: + transformed_mesh.verts.append(v) + transformed_mesh.edges = self._processEdges(obj.getData().edges) + print transformed_mesh.edges + + # Store the materials materials = obj.getData().getMaterials() @@ -412,26 +451,33 @@ class Renderer: for face in meshfaces: # if the face is visible flatten it on the "picture plane" - if self._isFaceVisible(face, obj, cameraObj): + if self._isFaceVisible_old(face, obj, cameraObj): # Store transformed face - transformed_face = [] + newface = NMesh.Face() for vert in face: p = proj.doProjection(vert.co) - transformed_vert = NMesh.Vert(p[0], p[1], p[2]) - transformed_face.append(transformed_vert) + tmp_vert = NMesh.Vert(p[0], p[1], p[2]) - newface = NMesh.Face(transformed_face) + # Add the vert to the mesh + transformed_mesh.verts.append(tmp_vert) + + newface.v.append(tmp_vert) + # Per-face color calculation # code taken mostly from the original vrm script # TODO: understand the code and rewrite it clearly - ambient = -250 - fakelight = [10, 10, 15] - norm = face.normal + ambient = -150 + + fakelight = Object.Get("Lamp").loc + if fakelight == None: + fakelight = [1.0, 1.0, -0.3] + + norm = Vector(face.no) vektori = (norm[0]*fakelight[0]+norm[1]*fakelight[1]+norm[2]*fakelight[2]) vduzine = fabs(sqrt(pow(norm[0],2)+pow(norm[1],2)+pow(norm[2],2))*sqrt(pow(fakelight[0],2)+pow(fakelight[1],2)+pow(fakelight[2],2))) intensity = floor(ambient + 200*acos(vektori/vduzine))/200 @@ -439,21 +485,24 @@ class Renderer: intensity = 0 if materials: - newface.col = materials[face.mat].getRGBCol() + tmp_col = materials[face.mat].getRGBCol() else: - newface.col = [0.5, 0.5, 0.5] + tmp_col = [0.5, 0.5, 0.5] - newface.col = [ (c>0) and (c-intensity) for c in newface.col] + tmp_col = [ (c>intensity) and int(round((c-intensity)*10)*25.5) for c in tmp_col ] + + vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2]) + newface.col = [vcol, vcol, vcol, 255] transformed_mesh.addFace(newface) # at the end of the loop on obj - #transformed_object = NMesh.PutRaw(transformed_mesh) - newscene.append(transformed_mesh) + transformed_obj = Object.New(obj.getType(), "flat"+obj.name) + transformed_obj.link(transformed_mesh) + transformed_obj.loc = obj.loc + newscene.link(transformed_obj) - # reverse the order (TODO: See how is the object order in NMesh) - #newscene.reverse() return newscene @@ -462,7 +511,7 @@ class Renderer: # Private Methods # - def _isFaceVisible(self, face, obj, cameraObj): + def _isFaceVisible_old(self, face, obj, cameraObj): """Determine if the face is visible from the current camera. The following code is taken basicly from the original vrm script. @@ -508,20 +557,67 @@ class Renderer: c[1] += obj.LocY - camera.LocY c[2] += obj.LocZ - camera.LocZ - norm = Vector([0,0,0]) + norm = [0, 0, 0] norm[0] = (b[1] - a[1])*(c[2] - a[2]) - (c[1] - a[1])*(b[2] - a[2]) norm[1] = -((b[0] - a[0])*(c[2] - a[2]) - (c[0] - a[0])*(b[2] - a[2])) norm[2] = (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1]) d = norm[0]*a[0] + norm[1]*a[1] + norm[2]*a[2] - # d = DotVecs(norm, Vector(a)) + #d = DotVecs(Vector(norm), Vector(a)) + + return (d<0) + + def _isFaceVisible(self, face, obj, cameraObj): + """Determine if the face is visible from the current camera. + + The following code is taken basicly from the original vrm script. + """ + + camera = cameraObj + + numvert = len(face) + + # backface culling + + # translate and rotate according to the object matrix + # and then translate according to the camera position + m = obj.getMatrix() + m.transpose() + + a = m*Vector(face[0]) - Vector(cameraObj.loc) + b = m*Vector(face[1]) - Vector(cameraObj.loc) + c = m*Vector(face[numvert-1]) - Vector(cameraObj.loc) + + norm = m*Vector(face.no) + + d = DotVecs(norm, a) return (d<0) - def _doClipping(face): + + def _doClipping(): return + # Per object methods + + def _doVisibleSurfaceDetermination(object): + return + + def _doColorizing(object): + return + + def _doStylizingEdges(self, object, style): + """Process Mesh Edges. (For now copy the edge data, in next version it + can be a place where recognize silouhettes and/or contours). + + input: an edge list + return: a processed edge list + """ + return + + + # --------------------------------------------------------------------- # ## Main Program @@ -529,10 +625,24 @@ class Renderer: # --------------------------------------------------------------------- -# hackish sorting of faces according to the max z value of a vertex -def zSorting(scene): - for o in scene: - o.faces.sort(lambda f1, f2: +# FIXME: really hackish code, just to test if the other parts work +def depthSorting(scene): + + cameraObj = Scene.GetCurrent().getCurrentCamera() + Objects = scene.getChildren() + + Objects.sort(lambda obj1, obj2: + cmp(Vector(Vector(cameraObj.loc) - Vector(obj1.loc)).length, + Vector(Vector(cameraObj.loc) - Vector(obj2.loc)).length + ) + ) + + # hackish sorting of faces according to the max z value of a vertex + for o in Objects: + + mesh = o.data + mesh.faces.sort( + lambda f1, f2: # Sort faces according to the min z coordinate in a face #cmp(min([v[2] for v in f1]), min([v[2] for v in f2]))) @@ -541,23 +651,48 @@ def zSorting(scene): # Sort faces according to the avg z coordinate in a face #cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2))) - o.faces.reverse() + mesh.faces.reverse() + mesh.update() + + # update the scene + for o in scene.getChildren(): + scene.unlink(o) + for o in Objects: + scene.link(o) - def vectorize(filename): - scene = Scene.GetCurrent() + """The vectorizing process is as follows: + + - Open the writer + - Render the scene + - Close the writer + + If you want to render an animation the second pass should be + repeated for any frame, and the frame number should be passed to the + renderer. + """ + + print "Filename: %s" % filename + + scene = Scene.GetCurrent() renderer = Renderer() - + flatScene = renderer.doRendering(scene) canvasSize = renderer.getCanvasSize() - zSorting(flatScene) + depthSorting(flatScene) writer = SVGVectorWriter(filename, canvasSize) writer.printCanvas(flatScene) - -try: - Blender.Window.FileSelector (vectorize, 'Save SVG', "proba.svg") -except: - vectorize("proba.svg") + + Blender.Scene.unlink(flatScene) + del flatScene + +# Here the main +if __name__ == "__main__": + # with this trick we can run the script in batch mode + try: + Blender.Window.FileSelector (vectorize, 'Save SVG', "proba.svg") + except: + vectorize("proba.svg")