From ce9591fe89bfd93523fedeb453342561d7054dab Mon Sep 17 00:00:00 2001 From: Antonio Ospite Date: Thu, 30 Mar 2006 19:42:13 +0200 Subject: [PATCH] Refactor projections and back-face culling * We now support different projections according to the blender camera * The back-face culling routine belongs now to the Renderer class * Add some hackish z-sorting for faces Signed-off-by: Antonio Ospite --- vrm.py | 508 ++++++++++++++++++++++++++++++++++++----------------------------- 1 file changed, 285 insertions(+), 223 deletions(-) diff --git a/vrm.py b/vrm.py index bd61a58..bab02a8 100755 --- a/vrm.py +++ b/vrm.py @@ -50,178 +50,133 @@ from math import * # # --------------------------------------------------------------------- -class Projection: - def __init__(self): - print "New projection" - -class PerspectiveProjection(Projection): - def __init___(self): - Projection.__init__(self) - print "Perspective" - - def doProjection(): - print "do a perspective projection!!" - -def Perspective(fovy, aspect, near, far): - top = near * tan(fovy * pi / 360.0) - bottom = -top - left = bottom*aspect - right= top*aspect - x = (2.0 * near) / (right-left) - y = (2.0 * near) / (top-bottom) - a = (right+left) / (right-left) - b = (top+bottom) / (top - bottom) - c = - ((far+near) / (far-near)) - d = - ((2*far*near)/(far-near)) - return Matrix([x,0.0,a,0.0],[0.0,y,b,0.0],[0.0,0.0,c,d],[0.0,0.0,-1.0,0.0]) - -def flatten_new(v, cameraObj, canvasSize, obMesh): - - cam = cameraObj.getInverseMatrix() - cam.transpose() - - # Changing the view mode - cmra = cameraObj.getData() - - #if cmra.type: - # print "Ortho" - #m2 = Ortho(fovy,float(w*ax)/float(h*ay),cmra.clipStart, cmra.clipEnd,17) #cmra.scale) - #else: - # print "Perspective" +class Projector: + """Calculate the projection of an object given the camera. - #Create Frustum - #frustum = _Frustum(cam,m2) + A projector is useful to so some per-object transformation to obtain the + projection of an object given the camera. - m1 = Matrix() - mP = Matrix() - - fovy = atan(0.5/(float(canvasSize[0])/float(canvasSize[1]))/(cmra.lens/32)) - fovy = fovy * 360/pi + The main method is #doProjection# see the method description for the + parameter list. + """ - m2 = Perspective(fovy,float(canvasSize[0])/float(canvasSize[1]),cmra.clipStart, cmra.clipEnd) + def __init__(self, cameraObj, obMesh, canvasSize): + """Calculate the projection matrix. - m1 = obMesh.matrixWorld #mat - m1.transpose() - mP = cam * m1 - mP = m2 * mP - - #Transform the vertices to global coordinates - p = mP*Vector([v.co[0],v.co[1],v.co[2],1.0]) - #tf.append(p) - #p = m1*Vector([v.co[0],v.co[1],v.co[2],1.0]) - #t2.append([p[0],p[1],p[2]]) - - mW = canvasSize[0]/2 - mH = canvasSize[1]/2 - - if p[3]<=0: - p[0] = int(p[0]*mW)+mW - p[1] = int(p[1]*mH)+mH - else: - p[0] = int((p[0]/p[3])*mW)+mW - p[1] = int((p[1]/p[3])*mH)+mH - - # Mirror and translate along y - p[1] *= -1 - p[1] += canvasSize[1] - - return p + The projection matrix depends, in this case, on the camera settings, + and also on object transformation matrix. + """ + self.size = canvasSize + camera = cameraObj.getData() -# distance from camera Z' -def Distance(PX,PY,PZ): - - dist = sqrt(PX*PX+PY*PY+PZ*PZ) - return dist + aspect = float(canvasSize[0])/float(canvasSize[1]) + near = camera.clipStart + far = camera.clipEnd -def RotatePoint(PX,PY,PZ,AngleX,AngleY,AngleZ): - - NewPoint = [] - # Rotate X - NewY = (PY * cos(AngleX))-(PZ * sin(AngleX)) - NewZ = (PZ * cos(AngleX))+(PY * sin(AngleX)) - # Rotate Y - PZ = NewZ - PY = NewY - NewZ = (PZ * cos(AngleY))-(PX * sin(AngleY)) - NewX = (PX * cos(AngleY))+(PZ * sin(AngleY)) - PX = NewX - PZ = NewZ - # Rotate Z - NewX = (PX * cos(AngleZ))-(PY * sin(AngleZ)) - NewY = (PY * cos(AngleZ))+(PX * sin(AngleZ)) - NewPoint.append(NewX) - NewPoint.append(NewY) - NewPoint.append(NewZ) - return NewPoint + fovy = atan(0.5/aspect/(camera.lens/32)) + fovy = fovy * 360/pi + + # What projection do we want? + if camera.type: + m2 = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale) + else: + m2 = self._calcPerspectiveMatrix(fovy, aspect, near, far) + + m1 = Matrix() + mP = Matrix() -def flatten(vertx, verty, vertz, cameraObj, canvasSize): + # View transformation + cam = cameraObj.getInverseMatrix() + cam.transpose() - camera = cameraObj.getData() - Lens = camera.getLens() # The Camera lens + m1 = obMesh.getMatrix() + m1.transpose() + + mP = cam * m1 + mP = m2 * mP - xres = canvasSize[0] # X res for output - yres = canvasSize[1] # Y res for output - ratio = xres/yres + self.projectionMatrix = mP - fov = atan(ratio * 16.0 / Lens) # Get fov stuff - - dist = xres/2*tan(fov) # Calculate dist from pinhole camera to image plane + ## + # Public methods + # - screenxy=[0,0,vertz] - x=-vertx - y=verty - z=vertz + def doProjection(self, v): + """Project the point on the view plane. - #---------------------------- - # calculate x'=dist*x/z & y'=dist*x/z - #---------------------------- - screenxy[0]=int(xres/2.0+4*x*dist/z) - screenxy[1]=int(yres/2.0+4*y*dist/z) - return screenxy + Given a vertex calculate the projection using the current projection + matrix. + """ + + # Note that we need the vertex expressed using homogeneous coordinates + p = self.projectionMatrix * Vector([v[0], v[1], v[2], 1.0]) + + mW = self.size[0]/2 + mH = self.size[1]/2 + + if p[3]<=0: + p[0] = int(p[0]*mW)+mW + p[1] = int(p[1]*mH)+mH + else: + p[0] = int((p[0]/p[3])*mW)+mW + p[1] = int((p[1]/p[3])*mH)+mH + + # For now we want (0,0) in the top-left corner of the canvas + # Mirror and translate along y + p[1] *= -1 + p[1] += self.size[1] + + return p -## Backface culling routine -# + ## + # Private methods + # + + def _calcPerspectiveMatrix(self, fovy, aspect, near, far): + """Return a perspective projection matrix.""" + + top = near * tan(fovy * pi / 360.0) + bottom = -top + left = bottom*aspect + right= top*aspect + x = (2.0 * near) / (right-left) + y = (2.0 * near) / (top-bottom) + a = (right+left) / (right-left) + b = (top+bottom) / (top - bottom) + c = - ((far+near) / (far-near)) + d = - ((2*far*near)/(far-near)) + + m = Matrix( + [x, 0.0, a, 0.0], + [0.0, y, b, 0.0], + [0.0, 0.0, c, d], + [0.0, 0.0, -1.0, 0.0]) -def isFaceVisible(face, obj, cameraObj): - """ - Determine if the face is visible from the current camera. - """ - numvert = len(face) - # backface culling - a = [] - a.append(face[0][0]) - a.append(face[0][1]) - a.append(face[0][2]) - a = RotatePoint(a[0], a[1], a[2], obj.RotX, obj.RotY, obj.RotZ) - a[0] += obj.LocX - cameraObj.LocX - a[1] += obj.LocY - cameraObj.LocY - a[2] += obj.LocZ - cameraObj.LocZ - b = [] - b.append(face[1][0]) - b.append(face[1][1]) - b.append(face[1][2]) - b = RotatePoint(b[0], b[1], b[2], obj.RotX, obj.RotY, obj.RotZ) - b[0] += obj.LocX - cameraObj.LocX - b[1] += obj.LocY - cameraObj.LocY - b[2] += obj.LocZ - cameraObj.LocZ - c = [] - c.append(face[numvert-1][0]) - c.append(face[numvert-1][1]) - c.append(face[numvert-1][2]) - c = RotatePoint(c[0], c[1], c[2], obj.RotX, obj.RotY, obj.RotZ) - c[0] += obj.LocX - cameraObj.LocX - c[1] += obj.LocY - cameraObj.LocY - c[2] += obj.LocZ - cameraObj.LocZ - - norm = [0,0,0] - norm[0] = (b[1] - a[1])*(c[2] - a[2]) - (c[1] - a[1])*(b[2] - a[2]) - norm[1] = -((b[0] - a[0])*(c[2] - a[2]) - (c[0] - a[0])*(b[2] - a[2])) - norm[2] = (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1]) - - d = norm[0]*a[0] + norm[1]*a[1] + norm[2]*a[2] - return (d<0) + return m + + def _calcOrthoMatrix(self, fovy, aspect , near, far, scale): + """Return an orthogonal projection matrix.""" + + top = near * tan(fovy * pi / 360.0) * (scale * 10) + bottom = -top + left = bottom * aspect + right= top * aspect + rl = right-left + tb = top-bottom + fn = near-far + tx = -((right+left)/rl) + ty = -((top+bottom)/tb) + tz = ((far+near)/fn) + + m = Matrix( + [2.0/rl, 0.0, 0.0, tx], + [0.0, 2.0/tb, 0.0, ty], + [0.0, 0.0, 2.0/fn, tz], + [0.0, 0.0, 0.0, 1.0]) + + return m # --------------------------------------------------------------------- @@ -231,6 +186,7 @@ def isFaceVisible(face, obj, cameraObj): # --------------------------------------------------------------------- # TODO: a class to represent the needed properties of a 2D vector image +# Just use a NMesh structure? # --------------------------------------------------------------------- @@ -262,13 +218,14 @@ class VectorWriter: self.canvasSize = canvasSize + ## # Public Methods # def printCanvas(mesh): return - + ## # Private Methods # @@ -293,21 +250,26 @@ class SVGVectorWriter(VectorWriter): VectorWriter.__init__(self, file, canvasSize) + ## # Public Methods # - def printCanvas(self, mesh): - """Convert the mesh representation to SVG.""" + def printCanvas(self, scene): + """Convert the scene representation to SVG.""" self._printHeader() - for obj in mesh: - for face in obj: + for obj in scene: + self.file.write("\n") + + for face in obj.faces: self._printPolygon(face) + + self.file.write("\n") self._printFooter() - + ## # Private Methods # @@ -332,21 +294,26 @@ class SVGVectorWriter(VectorWriter): There is no color Handling for now, *FIX!* """ - intensity = 128 stroke_width=1 self.file.write("\n") + self.file.write(" stroke-width:"+str(stroke_width)+";\n") + self.file.write(" stroke-linecap:round;stroke-linejoin:round\"/>\n") # --------------------------------------------------------------------- @@ -355,6 +322,27 @@ class SVGVectorWriter(VectorWriter): # # --------------------------------------------------------------------- +def RotatePoint(PX,PY,PZ,AngleX,AngleY,AngleZ): + + NewPoint = [] + # Rotate X + NewY = (PY * cos(AngleX))-(PZ * sin(AngleX)) + NewZ = (PZ * cos(AngleX))+(PY * sin(AngleX)) + # Rotate Y + PZ = NewZ + PY = NewY + NewZ = (PZ * cos(AngleY))-(PX * sin(AngleY)) + NewX = (PX * cos(AngleY))+(PZ * sin(AngleY)) + PX = NewX + PZ = NewZ + # Rotate Z + NewX = (PX * cos(AngleZ))-(PY * sin(AngleZ)) + NewY = (PY * cos(AngleZ))+(PX * sin(AngleZ)) + NewPoint.append(NewX) + NewPoint.append(NewY) + NewPoint.append(NewZ) + return NewPoint + class Renderer: """Render a scene viewed from a given camera. @@ -374,6 +362,7 @@ class Renderer: self.canvasSize = (0.0, 0.0) + ## # Public Methods # @@ -395,16 +384,13 @@ class Renderer: if cameraObj == None: cameraObj = scene.getCurrentCamera() - # TODO: given the camera get the Wold-to-camera transform and the - # projection matrix - context = scene.getRenderingContext() self.canvasSize = (context.imageSizeX(), context.imageSizeY()) Objects = scene.getChildren() - # A mesh to store the transformed geometrical structure - mesh = [] + # A structure to store the transformed scene + newscene = [] for obj in Objects: @@ -412,69 +398,127 @@ class Renderer: print "Type:", obj.getType(), "\tSorry, only mesh Object supported!" continue - OBJmesh = obj.getData() # Get the mesh data for the object - meshfaces = OBJmesh.faces # The number of faces in the object + # Get a projector for this object + proj = Projector(cameraObj, obj, self.canvasSize) - transformed_object = [] + # Let's store the transformed data + transformed_mesh = NMesh.New(obj.name) - for face in meshfaces: + # Store the materials + materials = obj.getData().getMaterials() + + meshfaces = obj.getData().faces - # TODO: per face color calculation - # TODO: add/sorting in Z' direction (per face??) + for face in meshfaces: # if the face is visible flatten it on the "picture plane" - if isFaceVisible(face, obj, cameraObj): + if self._isFaceVisible(face, obj, cameraObj): # Store transformed face transformed_face = [] for vert in face: - vertxyz = list(vert) - - p1 = flatten_new(vert, cameraObj, self.canvasSize, - obj) - transformed_face.append(p1) - continue - - # rotate camera - vertxyz = RotatePoint(vertxyz[0], vertxyz[1], vertxyz[2], - cameraObj.RotX, cameraObj.RotY, cameraObj.RotZ) - #-cameraObj.RotX, -cameraObj.RotY, -cameraObj.RotZ) - + p = proj.doProjection(vert.co) - # original setting for translate - vertxyz[0] -= (obj.LocX - cameraObj.LocX) - vertxyz[1] -= (obj.LocY - cameraObj.LocY) - vertxyz[2] -= (obj.LocZ - cameraObj.LocZ) + transformed_vert = NMesh.Vert(p[0], p[1], p[2]) + transformed_face.append(transformed_vert) + newface = NMesh.Face(transformed_face) + + # Per-face color calculation + # code taken mostly from the original vrm script + # TODO: understand the code and rewrite it clearly + ambient = -250 + fakelight = [10, 10, 15] + norm = face.normal + vektori = (norm[0]*fakelight[0]+norm[1]*fakelight[1]+norm[2]*fakelight[2]) + vduzine = fabs(sqrt(pow(norm[0],2)+pow(norm[1],2)+pow(norm[2],2))*sqrt(pow(fakelight[0],2)+pow(fakelight[1],2)+pow(fakelight[2],2))) + intensity = floor(ambient + 200*acos(vektori/vduzine))/200 + if intensity < 0: + intensity = 0 + + if materials: + newface.col = materials[face.mat].getRGBCol() + else: + newface.col = [0.5, 0.5, 0.5] + + newface.col = [ (c>0) and (c-intensity) for c in newface.col] + + transformed_mesh.addFace(newface) - # rotate object - vertxyz = RotatePoint(vertxyz[0], vertxyz[1], vertxyz[2], obj.RotX, obj.RotY, obj.RotZ) - + # at the end of the loop on obj + + #transformed_object = NMesh.PutRaw(transformed_mesh) + newscene.append(transformed_mesh) + # reverse the order (TODO: See how is the object order in NMesh) + #newscene.reverse() + + return newscene - p1 = flatten(vertxyz[0], vertxyz[1], vertxyz[2], - cameraObj, self.canvasSize) - transformed_face.append(p1) - - # just some fake lighting... + ## + # Private Methods + # - transformed_object.append(transformed_face) + def _isFaceVisible(self, face, obj, cameraObj): + """Determine if the face is visible from the current camera. - # at the end of the loop on obj - mesh.append(transformed_object) - return mesh + The following code is taken basicly from the original vrm script. + """ + camera = cameraObj - # Private Methods - # + numvert = len(face) - def _removehiddenFaces(obj): - return + # backface culling - def _testClipping(face): + # translate and rotate according to the object matrix + # and then translate according to the camera position + #m = obj.getMatrix() + #m.transpose() + + #a = m*Vector(face[0]) - Vector(cameraObj.loc) + #b = m*Vector(face[1]) - Vector(cameraObj.loc) + #c = m*Vector(face[numvert-1]) - Vector(cameraObj.loc) + + a = [] + a.append(face[0][0]) + a.append(face[0][1]) + a.append(face[0][2]) + a = RotatePoint(a[0], a[1], a[2], obj.RotX, obj.RotY, obj.RotZ) + a[0] += obj.LocX - camera.LocX + a[1] += obj.LocY - camera.LocY + a[2] += obj.LocZ - camera.LocZ + b = [] + b.append(face[1][0]) + b.append(face[1][1]) + b.append(face[1][2]) + b = RotatePoint(b[0], b[1], b[2], obj.RotX, obj.RotY, obj.RotZ) + b[0] += obj.LocX - camera.LocX + b[1] += obj.LocY - camera.LocY + b[2] += obj.LocZ - camera.LocZ + c = [] + c.append(face[numvert-1][0]) + c.append(face[numvert-1][1]) + c.append(face[numvert-1][2]) + c = RotatePoint(c[0], c[1], c[2], obj.RotX, obj.RotY, obj.RotZ) + c[0] += obj.LocX - camera.LocX + c[1] += obj.LocY - camera.LocY + c[2] += obj.LocZ - camera.LocZ + + norm = Vector([0,0,0]) + norm[0] = (b[1] - a[1])*(c[2] - a[2]) - (c[1] - a[1])*(b[2] - a[2]) + norm[1] = -((b[0] - a[0])*(c[2] - a[2]) - (c[0] - a[0])*(b[2] - a[2])) + norm[2] = (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1]) + + d = norm[0]*a[0] + norm[1]*a[1] + norm[2]*a[2] + # d = DotVecs(norm, Vector(a)) + + return (d<0) + + def _doClipping(face): return @@ -485,17 +529,35 @@ class Renderer: # --------------------------------------------------------------------- -scene = Scene.GetCurrent() -renderer = Renderer() +# hackish sorting of faces according to the max z value of a vertex +def zSorting(scene): + for o in scene: + o.faces.sort(lambda f1, f2: + # Sort faces according to the min z coordinate in a face + #cmp(min([v[2] for v in f1]), min([v[2] for v in f2]))) + + # Sort faces according to the max z coordinate in a face + cmp(max([v[2] for v in f1]), max([v[2] for v in f2]))) + + # Sort faces according to the avg z coordinate in a face + #cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2))) + o.faces.reverse() + -projectedMesh = renderer.doRendering(scene) -canvasSize = renderer.getCanvasSize() +def vectorize(filename): + scene = Scene.GetCurrent() + renderer = Renderer() -# hackish sorting of faces according to the max z value of a vertex -for o in projectedMesh: - o.sort(lambda f1, f2: - cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2))) - o.reverse() + flatScene = renderer.doRendering(scene) + canvasSize = renderer.getCanvasSize() + + zSorting(flatScene) + + writer = SVGVectorWriter(filename, canvasSize) + writer.printCanvas(flatScene) + +try: + Blender.Window.FileSelector (vectorize, 'Save SVG', "proba.svg") +except: + vectorize("proba.svg") -writer = SVGVectorWriter("proba.svg", canvasSize) -writer.printCanvas(projectedMesh) -- 2.1.4