#
# Additional credits:
# Thanks to Emilio Aguirre for S2flender from which I took inspirations :)
-# Thanks to Anthony C. D'Agostino for the backface.py script
+# Thanks to Anthony C. D'Agostino for the original backface.py script
#
# ---------------------------------------------------------------------
import Blender
-from Blender import Scene, Object, NMesh, Lamp, Camera
+from Blender import Scene, Object, Mesh, NMesh, Lamp, Camera
from Blender.Mathutils import *
from math import *
#
# ---------------------------------------------------------------------
-class Projection:
- def __init__(self):
- print "New projection"
-
-class PerspectiveProjection(Projection):
- def __init___(self):
- Projection.__init__(self)
- print "Perspective"
-
- def doProjection():
- print "do a perspective projection!!"
-
-def Perspective(fovy, aspect, near, far):
- top = near * tan(fovy * pi / 360.0)
- bottom = -top
- left = bottom*aspect
- right= top*aspect
- x = (2.0 * near) / (right-left)
- y = (2.0 * near) / (top-bottom)
- a = (right+left) / (right-left)
- b = (top+bottom) / (top - bottom)
- c = - ((far+near) / (far-near))
- d = - ((2*far*near)/(far-near))
- return Matrix([x,0.0,a,0.0],[0.0,y,b,0.0],[0.0,0.0,c,d],[0.0,0.0,-1.0,0.0])
-
-def flatten_new(v, cameraObj, canvasSize, obMesh):
-
- cam = cameraObj.getInverseMatrix()
- cam.transpose()
-
- # Changing the view mode
- cmra = cameraObj.getData()
-
- #if cmra.type:
- # print "Ortho"
- #m2 = Ortho(fovy,float(w*ax)/float(h*ay),cmra.clipStart, cmra.clipEnd,17) #cmra.scale)
- #else:
- # print "Perspective"
-
- #Create Frustum
- #frustum = _Frustum(cam,m2)
+class Projector:
+ """Calculate the projection of an object given the camera.
- m1 = Matrix()
- mP = Matrix()
+ A projector is useful to so some per-object transformation to obtain the
+ projection of an object given the camera.
- fovy = atan(0.5/(float(canvasSize[0])/float(canvasSize[1]))/(cmra.lens/32))
- fovy = fovy * 360/pi
+ The main method is #doProjection# see the method description for the
+ parameter list.
+ """
- m2 = Perspective(fovy,float(canvasSize[0])/float(canvasSize[1]),cmra.clipStart, cmra.clipEnd)
+ def __init__(self, cameraObj, canvasRatio):
+ """Calculate the projection matrix.
- m1 = obMesh.matrixWorld #mat
- m1.transpose()
- mP = cam * m1
- mP = m2 * mP
-
- #Transform the vertices to global coordinates
- p = mP*Vector([v.co[0],v.co[1],v.co[2],1.0])
- #tf.append(p)
- #p = m1*Vector([v.co[0],v.co[1],v.co[2],1.0])
- #t2.append([p[0],p[1],p[2]])
-
- mW = canvasSize[0]/2
- mH = canvasSize[1]/2
-
- if p[3]<=0:
- p[0] = int(p[0]*mW)+mW
- p[1] = int(p[1]*mH)+mH
- else:
- p[0] = int((p[0]/p[3])*mW)+mW
- p[1] = int((p[1]/p[3])*mH)+mH
-
- # Mirror and translate along y
- p[1] *= -1
- p[1] += canvasSize[1]
-
- return p
+ The projection matrix depends, in this case, on the camera settings,
+ and also on object transformation matrix.
+ """
+ camera = cameraObj.getData()
+ aspect = float(canvasRatio[0])/float(canvasRatio[1])
+ near = camera.clipStart
+ far = camera.clipEnd
-# distance from camera Z'
-def Distance(PX,PY,PZ):
-
- dist = sqrt(PX*PX+PY*PY+PZ*PZ)
- return dist
+ fovy = atan(0.5/aspect/(camera.lens/32))
+ fovy = fovy * 360/pi
+
+ # What projection do we want?
+ if camera.type:
+ m2 = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale)
+ else:
+ m2 = self._calcPerspectiveMatrix(fovy, aspect, near, far)
+
-def RotatePoint(PX,PY,PZ,AngleX,AngleY,AngleZ):
-
- NewPoint = []
- # Rotate X
- NewY = (PY * cos(AngleX))-(PZ * sin(AngleX))
- NewZ = (PZ * cos(AngleX))+(PY * sin(AngleX))
- # Rotate Y
- PZ = NewZ
- PY = NewY
- NewZ = (PZ * cos(AngleY))-(PX * sin(AngleY))
- NewX = (PX * cos(AngleY))+(PZ * sin(AngleY))
- PX = NewX
- PZ = NewZ
- # Rotate Z
- NewX = (PX * cos(AngleZ))-(PY * sin(AngleZ))
- NewY = (PY * cos(AngleZ))+(PX * sin(AngleZ))
- NewPoint.append(NewX)
- NewPoint.append(NewY)
- NewPoint.append(NewZ)
- return NewPoint
-
-def flatten(vertx, verty, vertz, cameraObj, canvasSize):
-
- camera = cameraObj.getData()
- Lens = camera.getLens() # The Camera lens
-
- xres = canvasSize[0] # X res for output
- yres = canvasSize[1] # Y res for output
- ratio = xres/yres
-
- fov = atan(ratio * 16.0 / Lens) # Get fov stuff
-
- dist = xres/2*tan(fov) # Calculate dist from pinhole camera to image plane
+ # View transformation
+ cam = Matrix(cameraObj.getInverseMatrix())
+ cam.transpose()
+
+ # FIXME: remove the commented part, we used to pass object in local
+ # coordinates, but this is not very clean, we should apply modelview
+ # tranformations _before_ (at some other level).
+ #m1 = Matrix(obMesh.getMatrix())
+ #m1.transpose()
+
+ #mP = cam * m1
+ mP = cam
+ mP = m2 * mP
- screenxy=[0,0,vertz]
- x=-vertx
- y=verty
- z=vertz
+ self.projectionMatrix = mP
- #----------------------------
- # calculate x'=dist*x/z & y'=dist*x/z
- #----------------------------
- screenxy[0]=int(xres/2.0+4*x*dist/z)
- screenxy[1]=int(yres/2.0+4*y*dist/z)
- return screenxy
+ ##
+ # Public methods
+ #
-## Backface culling routine
-#
+ def doProjection(self, v):
+ """Project the point on the view plane.
-def isFaceVisible(face, obj, cameraObj):
- """
- Determine if the face is visible from the current camera.
- """
- numvert = len(face)
- # backface culling
- a = []
- a.append(face[0][0])
- a.append(face[0][1])
- a.append(face[0][2])
- a = RotatePoint(a[0], a[1], a[2], obj.RotX, obj.RotY, obj.RotZ)
- a[0] += obj.LocX - cameraObj.LocX
- a[1] += obj.LocY - cameraObj.LocY
- a[2] += obj.LocZ - cameraObj.LocZ
- b = []
- b.append(face[1][0])
- b.append(face[1][1])
- b.append(face[1][2])
- b = RotatePoint(b[0], b[1], b[2], obj.RotX, obj.RotY, obj.RotZ)
- b[0] += obj.LocX - cameraObj.LocX
- b[1] += obj.LocY - cameraObj.LocY
- b[2] += obj.LocZ - cameraObj.LocZ
- c = []
- c.append(face[numvert-1][0])
- c.append(face[numvert-1][1])
- c.append(face[numvert-1][2])
- c = RotatePoint(c[0], c[1], c[2], obj.RotX, obj.RotY, obj.RotZ)
- c[0] += obj.LocX - cameraObj.LocX
- c[1] += obj.LocY - cameraObj.LocY
- c[2] += obj.LocZ - cameraObj.LocZ
-
- norm = [0,0,0]
- norm[0] = (b[1] - a[1])*(c[2] - a[2]) - (c[1] - a[1])*(b[2] - a[2])
- norm[1] = -((b[0] - a[0])*(c[2] - a[2]) - (c[0] - a[0])*(b[2] - a[2]))
- norm[2] = (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1])
-
- d = norm[0]*a[0] + norm[1]*a[1] + norm[2]*a[2]
- return (d<0)
+ Given a vertex calculate the projection using the current projection
+ matrix.
+ """
+
+ # Note that we need the vertex expressed using homogeneous coordinates
+ p = self.projectionMatrix * Vector(v).resize4D()
+
+ if p[3]>0:
+ p[0] = p[0]/p[3]
+ p[1] = p[1]/p[3]
+
+ return p
+
+ ##
+ # Private methods
+ #
+
+ def _calcPerspectiveMatrix(self, fovy, aspect, near, far):
+ """Return a perspective projection matrix."""
+
+ top = near * tan(fovy * pi / 360.0)
+ bottom = -top
+ left = bottom*aspect
+ right= top*aspect
+ x = (2.0 * near) / (right-left)
+ y = (2.0 * near) / (top-bottom)
+ a = (right+left) / (right-left)
+ b = (top+bottom) / (top - bottom)
+ c = - ((far+near) / (far-near))
+ d = - ((2*far*near)/(far-near))
+
+ m = Matrix(
+ [x, 0.0, a, 0.0],
+ [0.0, y, b, 0.0],
+ [0.0, 0.0, c, d],
+ [0.0, 0.0, -1.0, 0.0])
+
+ return m
+
+ def _calcOrthoMatrix(self, fovy, aspect , near, far, scale):
+ """Return an orthogonal projection matrix."""
+
+ top = near * tan(fovy * pi / 360.0) * (scale * 10)
+ bottom = -top
+ left = bottom * aspect
+ right= top * aspect
+ rl = right-left
+ tb = top-bottom
+ fn = near-far
+ tx = -((right+left)/rl)
+ ty = -((top+bottom)/tb)
+ tz = ((far+near)/fn)
+
+ m = Matrix(
+ [2.0/rl, 0.0, 0.0, tx],
+ [0.0, 2.0/tb, 0.0, ty],
+ [0.0, 0.0, 2.0/fn, tz],
+ [0.0, 0.0, 0.0, 1.0])
+
+ return m
# ---------------------------------------------------------------------
#
-## Mesh representation class
+## Object representation class
#
# ---------------------------------------------------------------------
# TODO: a class to represent the needed properties of a 2D vector image
+# Just use a NMesh structure?
# ---------------------------------------------------------------------
- printCanvas(mesh) --- where mesh is as specified before.
"""
- def __init__(self, fileName, canvasSize):
+ def __init__(self, fileName):
"""Open the file named #fileName# and set the canvas size."""
self.file = open(fileName, "w")
print "Outputting to: ", fileName
- self.canvasSize = canvasSize
+
+ context = Scene.GetCurrent().getRenderingContext()
+ self.canvasSize = ( context.imageSizeX(), context.imageSizeY() )
+ ##
# Public Methods
#
def printCanvas(mesh):
return
-
+ ##
# Private Methods
#
Sorry.
"""
- def __init__(self, file, canvasSize):
+ def __init__(self, file):
"""Simply call the parent Contructor."""
- VectorWriter.__init__(self, file, canvasSize)
+ VectorWriter.__init__(self, file)
+ ##
# Public Methods
#
-
- def printCanvas(self, mesh):
- """Convert the mesh representation to SVG."""
+ def open(self):
self._printHeader()
-
- for obj in mesh:
- for face in obj:
- self._printPolygon(face)
-
+
+ def close(self):
self._printFooter()
+
+
+ def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, showHiddenEdges=False):
+ """Convert the scene representation to SVG."""
+
+ Objects = scene.getChildren()
+ for obj in Objects:
+
+ if(obj.getType() != 'Mesh'):
+ continue
+ #
+
+ self.file.write("<g>\n")
+
+
+ if doPrintPolygons:
+ for face in obj.getData().faces:
+ self._printPolygon(face)
+
+ if doPrintEdges:
+ self._printEdges(obj.getData(), showHiddenEdges)
+
+ self.file.write("</g>\n")
+
+ ##
# Private Methods
#
"""Print SVG header."""
self.file.write("<?xml version=\"1.0\"?>\n")
- self.file.write("<svg version=\"1.2\"\n")
+ self.file.write("<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n")
+ self.file.write("\t\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n")
+ self.file.write("<svg version=\"1.1\"\n")
self.file.write("\txmlns=\"http://www.w3.org/2000/svg\"\n")
self.file.write("\twidth=\"%d\" height=\"%d\" streamable=\"true\">\n\n" %
self.canvasSize)
self.file.write("\n</svg>\n")
self.file.close()
+ def _printEdges(self, mesh, showHiddenEdges=False):
+ """Print the wireframe using mesh edges... is this the correct way?
+ """
+
+ stroke_width=0.5
+ stroke_col = [0, 0, 0]
+
+ self.file.write("<g>\n")
+
+ for e in mesh.edges:
+
+ hidden_stroke_style = ""
+
+ # And edge is selected if both vertives are selected
+ if e.v1.sel == 0 or e.v2.sel == 0:
+ if showHiddenEdges == False:
+ continue
+ else:
+ hidden_stroke_style = ";\n stroke-dasharray:3, 3"
+
+ p1 = self._calcCanvasCoord(e.v1)
+ p2 = self._calcCanvasCoord(e.v2)
+
+ self.file.write("<line x1=\"%g\" y1=\"%g\" x2=\"%g\" y2=\"%g\"\n"
+ % ( p1[0], p1[1], p2[0], p2[1] ) )
+ self.file.write(" style=\"stroke:rgb("+str(stroke_col[0])+","+str(stroke_col[1])+","+str(stroke_col[2])+");")
+ self.file.write(" stroke-width:"+str(stroke_width)+";\n")
+ self.file.write(" stroke-linecap:round;stroke-linejoin:round")
+ self.file.write(hidden_stroke_style)
+ self.file.write("\"/>\n")
+
+ self.file.write("</g>\n")
+
+
+
def _printPolygon(self, face):
"""Print our primitive, finally.
-
- There is no color Handling for now, *FIX!*
"""
- intensity = 128
- stroke_width=1
+ wireframe = False
+
+ stroke_width=0.5
self.file.write("<polygon points=\"")
for v in face:
- if face.index(v)!= 0:
- self.file.write(", ")
-
- self.file.write(`v[0]` + ", " + `v[1]`)
-
+ p = self._calcCanvasCoord(v)
+ self.file.write("%g,%g " % (p[0], p[1]))
+
+ self.file.seek(-1,1) # get rid of the last space
self.file.write("\"\n")
- self.file.write("\tstyle=\"fill:rgb("+str(intensity)+","+str(intensity)+","+str(intensity)+");")
- self.file.write(" stroke:rgb(0,0,0);")
- self.file.write(" stroke-width:"+str(stroke_width)+"\"/>\n")
+
+ #take as face color the first vertex color
+ if face.col:
+ fcol = face.col[0]
+ color = [fcol.r, fcol.g, fcol.b]
+ else:
+ color = [ 255, 255, 255]
+
+ stroke_col = [0, 0, 0]
+ if not wireframe:
+ stroke_col = color
+
+ self.file.write("\tstyle=\"fill:rgb("+str(color[0])+","+str(color[1])+","+str(color[2])+");")
+ self.file.write(" stroke:rgb("+str(stroke_col[0])+","+str(stroke_col[1])+","+str(stroke_col[2])+");")
+ self.file.write(" stroke-width:"+str(stroke_width)+";\n")
+ self.file.write(" stroke-linecap:round;stroke-linejoin:round")
+ self.file.write("\"/>\n")
+
+ def _calcCanvasCoord(self, v):
+
+ pt = Vector([0, 0, 0])
+
+ mW = self.canvasSize[0]/2
+ mH = self.canvasSize[1]/2
+
+ # rescale to canvas size
+ pt[0] = round(v[0]*mW)+mW
+ pt[1] = round(v[1]*mH)+mH
+
+ # For now we want (0,0) in the top-left corner of the canvas
+ # Mirror and translate along y
+ pt[1] *= -1
+ pt[1] += self.canvasSize[1]
+
+ return pt
# ---------------------------------------------------------------------
class Renderer:
"""Render a scene viewed from a given camera.
- This class is responsible of the rendering process, hence transormation
+ This class is responsible of the rendering process, hence transformation
and projection of the ojects in the scene are invoked by the renderer.
The user can optionally provide a specific camera for the rendering, see
"""
def __init__(self):
- """Set the canvas size to a defaulr value.
-
- The only instance attribute here is the canvas size, which can be
- queryed to the renderer by other entities.
+ """Make the rendering process only for the current scene by default.
"""
- self.canvasSize = (0.0, 0.0)
+ # Render the current Scene set as a READ-ONLY property
+ self._SCENE = Scene.GetCurrent()
+
+ # Use the aspect ratio of the scene rendering context
+ context = self._SCENE.getRenderingContext()
+ self.canvasRatio = (context.aspectRatioX(), context.aspectRatioY())
+
+ # Render from the currently active camera
+ self.camera = self._SCENE.getCurrentCamera()
+
+ ##
# Public Methods
#
- def getCanvasSize(self):
- """Return the current canvas size read from Blender rendering context"""
- return self.canvasSize
+ def doRendering(self, outputWriter, animation=0):
+ """Render picture or animation and write it out.
- def doRendering(self, scene, cameraObj=None):
+ The parameters are:
+ - a Vector writer object than will be used to output the result.
+ - a flag to tell if we want to render an animation or the only
+ current frame.
+ """
+
+ context = self._SCENE.getRenderingContext()
+ currentFrame = context.currentFrame()
+
+ # Handle the animation case
+ if animation == 0:
+ startFrame = currentFrame
+ endFrame = startFrame
+ else:
+ startFrame = context.startFrame()
+ endFrame = context.endFrame()
+
+ # Do the rendering process frame by frame
+ print "Start Rendering!"
+ for f in range(startFrame, endFrame+1):
+ context.currentFrame(f)
+ renderedScene = self.doRenderScene(self._SCENE)
+ outputWriter.printCanvas(renderedScene,
+ doPrintPolygons=False, doPrintEdges=True, showHiddenEdges=True)
+
+ # clear the rendered scene
+ self._SCENE.makeCurrent()
+ Scene.unlink(renderedScene)
+ del renderedScene
+
+ print "Done!"
+ context.currentFrame(currentFrame)
+
+
+
+ def doRenderScene(self, inputScene):
"""Control the rendering process.
Here we control the entire rendering process invoking the operation
- needed to transforma project the 3D scene in two dimensions.
-
- Parameters:
- scene --- the Blender Scene to render
- cameraObj --- the camera object to use for the viewing processing
+ needed to transform and project the 3D scene in two dimensions.
"""
-
- if cameraObj == None:
- cameraObj = scene.getCurrentCamera()
- # TODO: given the camera get the Wold-to-camera transform and the
- # projection matrix
+ # Use some temporary workspace, a full copy of the scene
+ workScene = inputScene.copy(2)
+
+ # Get a projector for this scene.
+ # NOTE: the projector wants object in world coordinates,
+ # so we should apply modelview transformations _before_
+ # projection transformations
+ proj = Projector(self.camera, self.canvasRatio)
+
+ # global processing of the scene
+ self._doDepthSorting(workScene)
- context = scene.getRenderingContext()
- self.canvasSize = (context.imageSizeX(), context.imageSizeY())
+ # Per object activities
+ Objects = workScene.getChildren()
- Objects = scene.getChildren()
+ for obj in Objects:
+
+ if (obj.getType() != 'Mesh'):
+ print "Type:", obj.getType(), "\tSorry, only mesh Object supported!"
+ continue
+ #
+
+ self._doModelViewTransformations(obj)
+
+ self._doBackFaceCulling(obj)
+
+ self._doColorAndLighting(obj)
+
+ # 'style' can be a function that determine
+ # if an edge should be showed?
+ self._doEdgesStyle(obj, style=None)
+
+ self._doProjection(obj, proj)
+
+ return workScene
+
+
+ def oldRenderScene(scene):
- # A mesh to store the transformed geometrical structure
- mesh = []
+ # Per object activities
+ Objects = workScene.getChildren()
for obj in Objects:
- if (obj.getType() != "Mesh"):
+ if (obj.getType() != 'Mesh'):
print "Type:", obj.getType(), "\tSorry, only mesh Object supported!"
continue
+
+ # Get a projector for this object
+ proj = Projector(self.camera, obj, self.canvasSize)
- OBJmesh = obj.getData() # Get the mesh data for the object
- meshfaces = OBJmesh.faces # The number of faces in the object
+ # Let's store the transformed data
+ transformed_mesh = NMesh.New("flat"+obj.name)
+ transformed_mesh.hasVertexColours(1)
- transformed_object = []
+ # process Edges
+ self._doProcessEdges(obj)
+
+ for v in obj.getData().verts:
+ transformed_mesh.verts.append(v)
+ transformed_mesh.edges = self._processEdges(obj.getData().edges)
+ #print transformed_mesh.edges
- for face in meshfaces:
+
+ # Store the materials
+ materials = obj.getData().getMaterials()
- # TODO: per face color calculation
- # TODO: add/sorting in Z' direction (per face??)
+ meshfaces = obj.getData().faces
+
+ for face in meshfaces:
# if the face is visible flatten it on the "picture plane"
- if isFaceVisible(face, obj, cameraObj):
+ if self._isFaceVisible(face, obj, cameraObj):
# Store transformed face
- transformed_face = []
+ newface = NMesh.Face()
for vert in face:
- vertxyz = list(vert)
+ p = proj.doProjection(vert.co)
+
+ tmp_vert = NMesh.Vert(p[0], p[1], p[2])
+
+ # Add the vert to the mesh
+ transformed_mesh.verts.append(tmp_vert)
+
+ newface.v.append(tmp_vert)
+
+
+ # Per-face color calculation
+ # code taken mostly from the original vrm script
+ # TODO: understand the code and rewrite it clearly
+ ambient = -150
+
+ fakelight = Object.Get("Lamp").loc
+ if fakelight == None:
+ fakelight = [1.0, 1.0, -0.3]
+
+ norm = Vector(face.no)
+ vektori = (norm[0]*fakelight[0]+norm[1]*fakelight[1]+norm[2]*fakelight[2])
+ vduzine = fabs(sqrt(pow(norm[0],2)+pow(norm[1],2)+pow(norm[2],2))*sqrt(pow(fakelight[0],2)+pow(fakelight[1],2)+pow(fakelight[2],2)))
+ intensity = floor(ambient + 200*acos(vektori/vduzine))/200
+ if intensity < 0:
+ intensity = 0
+
+ if materials:
+ tmp_col = materials[face.mat].getRGBCol()
+ else:
+ tmp_col = [0.5, 0.5, 0.5]
- p1 = flatten_new(vert, cameraObj, self.canvasSize,
- obj)
- transformed_face.append(p1)
- continue
+ tmp_col = [ (c>intensity) and int(round((c-intensity)*10)*25.5) for c in tmp_col ]
+
+ vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2])
+ newface.col = [vcol, vcol, vcol, 255]
+
+ transformed_mesh.addFace(newface)
+
+ # at the end of the loop on obj
+
+ transformed_obj = Object.New(obj.getType(), "flat"+obj.name)
+ transformed_obj.link(transformed_mesh)
+ transformed_obj.loc = obj.loc
+ newscene.link(transformed_obj)
+
+
+ return workScene
+
+
+ ##
+ # Private Methods
+ #
+
+ # Faces methods
+
+ def _isFaceVisible(self, face, obj, camObj):
+ """Determine if a face of an object is visible from a given camera.
+
+ The normals need to be transformed, but note that we should apply only the
+ rotation part of the tranformation matrix, since the normals are
+ normalized and they can be intended as starting from the origin.
+
+ The view vector is calculated from the camera location and one of the
+ vertices of the face (expressed in World coordinates, after applying
+ modelview transformations).
+
+ After those transformations we determine if a face is visible by computing
+ the angle between the face normal and the view vector, this angle
+ corresponds somehow to the dot product between the two. If the product
+ results <= 0 then the angle between the two vectors is less that 90
+ degrees and then the face is visible.
+
+ There is no need to normalize those vectors since we are only interested in
+ the sign of the cross product and not in the product value.
+ """
+
+ # The transformation matrix of the object
+ mObj = Matrix(obj.getMatrix())
+ mObj.transpose()
+
+ # The normal after applying the current object rotation
+ #normal = mObj.rotationPart() * Vector(face.no)
+ normal = Vector(face.no)
+
+ # View vector in orthographics projections can be considered simply s the
+ # camera position
+ #view_vect = Vector(camObj.loc)
+
+ # View vector as in perspective projections
+ # it is the dofference between the camera position and
+ # one point of the face, we choose the first point,
+ # but maybe a better choice may be the farthest point from the camera.
+ point = Vector(face[0].co)
+ #point = mObj * point.resize4D()
+ #point.resize3D()
+ view_vect = Vector(camObj.loc) - point
+
+
+ # if d <= 0 the face is visible from the camera
+ d = view_vect * normal
+
+ if d <= 0:
+ return False
+ else:
+ return True
- # rotate camera
- vertxyz = RotatePoint(vertxyz[0], vertxyz[1], vertxyz[2],
- cameraObj.RotX, cameraObj.RotY, cameraObj.RotZ)
- #-cameraObj.RotX, -cameraObj.RotY, -cameraObj.RotZ)
+ # Scene methods
- # original setting for translate
- vertxyz[0] -= (obj.LocX - cameraObj.LocX)
- vertxyz[1] -= (obj.LocY - cameraObj.LocY)
- vertxyz[2] -= (obj.LocZ - cameraObj.LocZ)
+ def _doClipping():
+ return
+ def _doDepthSorting(self, scene):
- # rotate object
- vertxyz = RotatePoint(vertxyz[0], vertxyz[1], vertxyz[2], obj.RotX, obj.RotY, obj.RotZ)
+ cameraObj = self.camera
+ Objects = scene.getChildren()
+ Objects.sort(lambda obj1, obj2:
+ cmp(Vector(Vector(cameraObj.loc) - Vector(obj1.loc)).length,
+ Vector(Vector(cameraObj.loc) - Vector(obj2.loc)).length
+ )
+ )
+
+ # hackish sorting of faces according to the max z value of a vertex
+ for o in Objects:
+ if (o.getType() != 'Mesh'):
+ continue
+ #
- p1 = flatten(vertxyz[0], vertxyz[1], vertxyz[2],
- cameraObj, self.canvasSize)
+ mesh = o.data
+ mesh.faces.sort(
+ lambda f1, f2:
+ # Sort faces according to the min z coordinate in a face
+ #cmp(min([v[2] for v in f1]), min([v[2] for v in f2])))
- transformed_face.append(p1)
+ # Sort faces according to the max z coordinate in a face
+ cmp(max([v[2] for v in f1]), max([v[2] for v in f2])))
- # just some fake lighting...
+ # Sort faces according to the avg z coordinate in a face
+ #cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2)))
+ mesh.faces.reverse()
+ mesh.update()
+
+ # update the scene
+ # FIXME: check if it is correct
+ scene.update()
+ #for o in scene.getChildren():
+ # scene.unlink(o)
+ #for o in Objects:
+ # scene.link(o)
+
+ # Per object methods
+
+ def _doModelViewTransformations(self, object):
+ if(object.getType() != 'Mesh'):
+ return
+
+ matMV = object.matrix
+ mesh = object.data
+ mesh.transform(matMV, True)
+ mesh.update()
- transformed_object.append(transformed_face)
- # at the end of the loop on obj
- mesh.append(transformed_object)
- return mesh
+ def _doBackFaceCulling(self, object):
+ if(object.getType() != 'Mesh'):
+ return
+
+ print "doing Backface Culling"
+ mesh = object.data
+
+ # Select all vertices, so edges without faces can be displayed
+ for v in mesh.verts:
+ v.sel = 1
+
+ Mesh.Mode(Mesh.SelectModes['FACE'])
+ # Loop on faces
+ for f in mesh.faces:
+ f.sel = 0
+ if self._isFaceVisible(f, object, self.camera):
+ f.sel = 1
+ for f in mesh.faces:
+ if not f.sel:
+ for v in f:
+ v.sel = 0
- # Private Methods
- #
+ for f in mesh.faces:
+ if f.sel:
+ for v in f:
+ v.sel = 1
+
+ mesh.update()
+
+
- def _removehiddenFaces(obj):
+ #Mesh.Mode(Mesh.SelectModes['VERTEX'])
+
+ def _doColorAndLighting(self, object):
return
- def _testClipping(face):
+ def _doEdgesStyle(self, object, style):
+ """Process Mesh Edges. (For now copy the edge data, in next version it
+ can be a place where recognize silouhettes and/or contours).
+
+ input: an edge list
+ return: a processed edge list
+ """
return
+ def _doProjection(self, object, projector):
+
+ if(object.getType() != 'Mesh'):
+ return
+
+ mesh = object.data
+ for v in mesh.verts:
+ p = projector.doProjection(v.co)
+ v[0] = p[0]
+ v[1] = p[1]
+ v[2] = p[2]
+ mesh.update()
+
+
# ---------------------------------------------------------------------
#
# ---------------------------------------------------------------------
-scene = Scene.GetCurrent()
-renderer = Renderer()
+# FIXME: really hackish code, just to test if the other parts work
+
+def vectorize(filename):
+ """The vectorizing process is as follows:
+
+ - Open the writer
+ - Render the scene
+ - Close the writer
+
+ If you want to render an animation the second pass should be
+ repeated for any frame, and the frame number should be passed to the
+ renderer.
+ """
+ writer = SVGVectorWriter(filename)
+
+ writer.open()
+
+ renderer = Renderer()
+ renderer.doRendering(writer)
+
+ writer.close()
+
+
+# Here the main
+if __name__ == "__main__":
+ # with this trick we can run the script in batch mode
+ try:
+ Blender.Window.FileSelector (vectorize, 'Save SVG', "proba.svg")
+ Blender.Redraw()
+ except:
+ from Blender import Window
+ editmode = Window.EditMode()
+ if editmode: Window.EditMode(0)
+
+ vectorize("proba.svg")
+ if editmode: Window.EditMode(1)
-projectedMesh = renderer.doRendering(scene)
-canvasSize = renderer.getCanvasSize()
-# hackish sorting of faces according to the max z value of a vertex
-for o in projectedMesh:
- o.sort(lambda f1, f2:
- cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2)))
- o.reverse()
-writer = SVGVectorWriter("proba.svg", canvasSize)
-writer.printCanvas(projectedMesh)