Name: 'VRM'
Blender: 241
Group: 'Export'
-Tooltip: 'Vector Rendering Method Export Script 0.3'
+Tooltip: 'Vector Rendering Method Export Script'
+"""
+
+__author__ = "Antonio Ospite"
+__url__ = ["blender"]
+__version__ = "0.3"
+
+__bpydoc__ = """\
+ Render the scene and save the result in vector format.
"""
# ---------------------------------------------------------------------
#
# ---------------------------------------------------------------------
#
-# NOTE: I do not know who is the original author of 'vrm'.
-# The present code is almost entirely rewritten from scratch,
-# but if I have to give credits to anyone, please let me know,
-# so I can update the copyright.
+# Additional credits:
+# Thanks to Emilio Aguirre for S2flender from which I took inspirations :)
+# Thanks to Nikola Radovanovic, the author of the original VRM script,
+# the code you read here has been rewritten _almost_ entirely
+# from scratch but Nikola gave me the idea, so I thank him publicly.
#
# ---------------------------------------------------------------------
+#
+# Things TODO for a next release:
+# - Switch to the Mesh structure, should be considerably faster
+# (partially done, but cannot sort faces, yet)
+# - Use a better depth sorting algorithm
+# - Review how selections are made (this script uses selection states of
+# primitives to represent visibility infos)
+# - Implement clipping of primitives and do handle object intersections.
+# (for now only clipping for whole objects is supported).
+# - Implement Edge Styles (silhouettes, contours, etc.)
+# - Implement Edge coloring
+# - Use multiple lighting sources in color calculation
+# - Implement Shading Styles?
+# - Use another representation for the 2D projection?
+# Think to a way to merge adjacent polygons that have the same color.
+# - Add other Vector Writers.
#
-# Additional credits:
-# Thanks to Emilio Aguirre for S2flender from which I took inspirations :)
-# Thanks to Anthony C. D'Agostino for the original backface.py script
+# ---------------------------------------------------------------------
+#
+# Changelog:
+#
+# vrm-0.3.py - 2006-05-19
+# * First release after code restucturing.
+# Now the script offers a useful set of functionalities
+# and it can render animations, too.
#
# ---------------------------------------------------------------------
import Blender
-from Blender import Scene, Object, Mesh, NMesh, Lamp, Camera
+from Blender import Scene, Object, Mesh, NMesh, Material, Lamp, Camera
from Blender.Mathutils import *
from math import *
+# Some global settings
+PRINT_POLYGONS = True
+PRINT_EDGES = False
+SHOW_HIDDEN_EDGES = False
+
+EDGES_WIDTH = 0.5
+
+POLYGON_EXPANSION_TRICK = True
+
+RENDER_ANIMATION = False
+
+# Does not work in batch mode!!
+#OPTIMIZE_FOR_SPACE = True
+
+
# ---------------------------------------------------------------------
#
## Projections classes
def __init__(self, cameraObj, canvasRatio):
"""Calculate the projection matrix.
- The projection matrix depends, in this case, on the camera settings,
- and also on object transformation matrix.
+ The projection matrix depends, in this case, on the camera settings.
+ TAKE CARE: This projector expects vertices in World Coordinates!
"""
camera = cameraObj.getData()
near = camera.clipStart
far = camera.clipEnd
+ scale = float(camera.scale)
+
fovy = atan(0.5/aspect/(camera.lens/32))
- fovy = fovy * 360/pi
+ fovy = fovy * 360.0/pi
# What projection do we want?
if camera.type:
- m2 = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale)
+ #mP = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale)
+ mP = self._calcOrthoMatrix(fovy, aspect, near, far, scale)
else:
- m2 = self._calcPerspectiveMatrix(fovy, aspect, near, far)
+ mP = self._calcPerspectiveMatrix(fovy, aspect, near, far)
-
# View transformation
cam = Matrix(cameraObj.getInverseMatrix())
cam.transpose()
- # FIXME: remove the commented part, we used to pass object in local
- # coordinates, but this is not very clean, we should apply modelview
- # tranformations _before_ (at some other level).
- #m1 = Matrix(obMesh.getMatrix())
- #m1.transpose()
-
- #mP = cam * m1
- mP = cam
- mP = m2 * mP
+ mP = mP * cam
self.projectionMatrix = mP
matrix.
"""
- # Note that we need the vertex expressed using homogeneous coordinates
+ # Note that we have to work on the vertex using homogeneous coordinates
p = self.projectionMatrix * Vector(v).resize4D()
if p[3]>0:
p[0] = p[0]/p[3]
p[1] = p[1]/p[3]
+ # restore the size
+ p[3] = 1.0
+ p.resize3D()
+
return p
##
#
def _calcPerspectiveMatrix(self, fovy, aspect, near, far):
- """Return a perspective projection matrix."""
+ """Return a perspective projection matrix.
+ """
top = near * tan(fovy * pi / 360.0)
bottom = -top
return m
def _calcOrthoMatrix(self, fovy, aspect , near, far, scale):
- """Return an orthogonal projection matrix."""
+ """Return an orthogonal projection matrix.
+ """
- top = near * tan(fovy * pi / 360.0) * (scale * 10)
+ # The 11 in the formula was found emiprically
+ top = near * tan(fovy * pi / 360.0) * (scale * 11)
bottom = -top
left = bottom * aspect
right= top * aspect
# ---------------------------------------------------------------------
#
-## Object representation class
+## 2DObject representation class
#
# ---------------------------------------------------------------------
# TODO: a class to represent the needed properties of a 2D vector image
-# Just use a NMesh structure?
+# For now just using a [N]Mesh structure.
# ---------------------------------------------------------------------
Every subclasses of VectorWriter must have at last the following public
methods:
- - printCanvas(mesh) --- where mesh is as specified before.
+ - open(self)
+ - close(self)
+ - printCanvas(self, scene,
+ doPrintPolygons=True, doPrintEdges=False, showHiddenEdges=False):
"""
def __init__(self, fileName):
- """Open the file named #fileName# and set the canvas size."""
-
- self.file = open(fileName, "w")
- print "Outputting to: ", fileName
-
+ """Set the output file name and other properties"""
+ self.outputFileName = fileName
+ self.file = None
+
context = Scene.GetCurrent().getRenderingContext()
self.canvasSize = ( context.imageSizeX(), context.imageSizeY() )
-
+
+ self.startFrame = 1
+ self.endFrame = 1
+ self.animation = False
+
##
# Public Methods
#
- def printCanvas(mesh):
- return
-
- ##
- # Private Methods
- #
-
- def _printHeader():
+ def open(self, startFrame=1, endFrame=1):
+ if startFrame != endFrame:
+ self.startFrame = startFrame
+ self.endFrame = endFrame
+ self.animation = True
+
+ self.file = open(self.outputFileName, "w")
+ print "Outputting to: ", self.outputFileName
+
return
- def _printFooter():
+ def close(self):
+ self.file.close()
return
+ def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False,
+ showHiddenEdges=False):
+ """This is the interface for the needed printing routine.
+ """
+ return
+
## SVG Writer
class SVGVectorWriter(VectorWriter):
"""A concrete class for writing SVG output.
-
- The class does not support animations, yet.
- Sorry.
"""
- def __init__(self, file):
- """Simply call the parent Contructor."""
- VectorWriter.__init__(self, file)
+ def __init__(self, fileName):
+ """Simply call the parent Contructor.
+ """
+ VectorWriter.__init__(self, fileName)
##
# Public Methods
#
- def open(self):
+ def open(self, startFrame=1, endFrame=1):
+ """Do some initialization operations.
+ """
+ VectorWriter.open(self, startFrame, endFrame)
self._printHeader()
def close(self):
+ """Do some finalization operation.
+ """
self._printFooter()
+ # remember to call the close method of the parent
+ VectorWriter.close(self)
+
-
- def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, showHiddenEdges=False):
- """Convert the scene representation to SVG."""
+ def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False,
+ showHiddenEdges=False):
+ """Convert the scene representation to SVG.
+ """
Objects = scene.getChildren()
+
+ context = scene.getRenderingContext()
+ framenumber = context.currentFrame()
+
+ if self.animation:
+ framestyle = "display:none"
+ else:
+ framestyle = "display:block"
+
+ # Assign an id to this group so we can set properties on it using DOM
+ self.file.write("<g id=\"frame%d\" style=\"%s\">\n" %
+ (framenumber, framestyle) )
+
for obj in Objects:
if(obj.getType() != 'Mesh'):
continue
- #
- self.file.write("<g>\n")
+ self.file.write("<g id=\"%s\">\n" % obj.getName())
+
+ mesh = obj.getData(mesh=1)
-
if doPrintPolygons:
- for face in obj.getData().faces:
- self._printPolygon(face)
+ self._printPolygons(mesh)
if doPrintEdges:
- self._printEdges(obj.getData(), showHiddenEdges)
+ self._printEdges(mesh, showHiddenEdges)
self.file.write("</g>\n")
-
+
+ self.file.write("</g>\n")
+
##
# Private Methods
#
+ def _calcCanvasCoord(self, v):
+ """Convert vertex in scene coordinates to canvas coordinates.
+ """
+
+ pt = Vector([0, 0, 0])
+
+ mW = float(self.canvasSize[0])/2.0
+ mH = float(self.canvasSize[1])/2.0
+
+ # rescale to canvas size
+ pt[0] = v.co[0]*mW + mW
+ pt[1] = v.co[1]*mH + mH
+ pt[2] = v.co[2]
+
+ # For now we want (0,0) in the top-left corner of the canvas.
+ # Mirror and translate along y
+ pt[1] *= -1
+ pt[1] += self.canvasSize[1]
+
+ return pt
+
def _printHeader(self):
"""Print SVG header."""
self.file.write("\twidth=\"%d\" height=\"%d\" streamable=\"true\">\n\n" %
self.canvasSize)
+ if self.animation:
+
+ self.file.write("""\n<script><![CDATA[
+ globalStartFrame=%d;
+ globalEndFrame=%d;
+
+ /* FIXME: Use 1000 as interval as lower values gives problems */
+ timerID = setInterval("NextFrame()", 1000);
+ globalFrameCounter=%d;
+
+ function NextFrame()
+ {
+ currentElement = document.getElementById('frame'+globalFrameCounter)
+ previousElement = document.getElementById('frame'+(globalFrameCounter-1))
+
+ if (!currentElement)
+ {
+ return;
+ }
+
+ if (globalFrameCounter > globalEndFrame)
+ {
+ clearInterval(timerID)
+ }
+ else
+ {
+ if(previousElement)
+ {
+ previousElement.style.display="none";
+ }
+ currentElement.style.display="block";
+ globalFrameCounter++;
+ }
+ }
+ \n]]></script>\n
+ \n""" % (self.startFrame, self.endFrame, self.startFrame) )
+
def _printFooter(self):
"""Print the SVG footer."""
self.file.write("\n</svg>\n")
- self.file.close()
+
+ def _printPolygons(self, mesh):
+ """Print the selected (visible) polygons.
+ """
+
+ if len(mesh.faces) == 0:
+ return
+
+ self.file.write("<g>\n")
+
+ for face in mesh.faces:
+ if not face.sel:
+ continue
+
+ self.file.write("<polygon points=\"")
+
+ for v in face:
+ p = self._calcCanvasCoord(v)
+ self.file.write("%g,%g " % (p[0], p[1]))
+
+ # get rid of the last blank space, just cosmetics here.
+ self.file.seek(-1, 1)
+ self.file.write("\"\n")
+
+ # take as face color the first vertex color
+ # TODO: the average of vetrex colors?
+ if face.col:
+ fcol = face.col[0]
+ color = [fcol.r, fcol.g, fcol.b, fcol.a]
+ else:
+ color = [255, 255, 255, 255]
+
+ # use the stroke property to alleviate the "adjacent edges" problem,
+ # we simulate polygon expansion using borders,
+ # see http://www.antigrain.com/svg/index.html for more info
+ stroke_col = color
+ stroke_width = 0.5
+
+ # Convert the color to the #RRGGBB form
+ str_col = "#%02X%02X%02X" % (color[0], color[1], color[2])
+
+ self.file.write("\tstyle=\"fill:" + str_col + ";")
+ if POLYGON_EXPANSION_TRICK:
+ self.file.write(" stroke:" + str_col + ";")
+ self.file.write(" stroke-width:" + str(stroke_width) + ";\n")
+ self.file.write(" stroke-linecap:round;stroke-linejoin:round")
+ self.file.write("\"/>\n")
+
+ self.file.write("</g>\n")
def _printEdges(self, mesh, showHiddenEdges=False):
- """Print the wireframe using mesh edges... is this the correct way?
+ """Print the wireframe using mesh edges.
"""
- stroke_width=0.5
+ stroke_width=EDGES_WIDTH
stroke_col = [0, 0, 0]
self.file.write("<g>\n")
hidden_stroke_style = ""
- # And edge is selected if both vertives are selected
+ # Consider an edge selected if both vertices are selected
if e.v1.sel == 0 or e.v2.sel == 0:
if showHiddenEdges == False:
continue
self.file.write("\"/>\n")
self.file.write("</g>\n")
-
-
- def _printPolygon(self, face):
- """Print our primitive, finally.
- """
-
- wireframe = False
-
- stroke_width=0.5
-
- self.file.write("<polygon points=\"")
-
- for v in face:
- p = self._calcCanvasCoord(v)
- self.file.write("%g,%g " % (p[0], p[1]))
-
- self.file.seek(-1,1) # get rid of the last space
- self.file.write("\"\n")
-
- #take as face color the first vertex color
- if face.col:
- fcol = face.col[0]
- color = [fcol.r, fcol.g, fcol.b]
- else:
- color = [ 255, 255, 255]
-
- stroke_col = [0, 0, 0]
- if not wireframe:
- stroke_col = color
-
- self.file.write("\tstyle=\"fill:rgb("+str(color[0])+","+str(color[1])+","+str(color[2])+");")
- self.file.write(" stroke:rgb("+str(stroke_col[0])+","+str(stroke_col[1])+","+str(stroke_col[2])+");")
- self.file.write(" stroke-width:"+str(stroke_width)+";\n")
- self.file.write(" stroke-linecap:round;stroke-linejoin:round")
- self.file.write("\"/>\n")
-
- def _calcCanvasCoord(self, v):
-
- pt = Vector([0, 0, 0])
-
- mW = self.canvasSize[0]/2
- mH = self.canvasSize[1]/2
-
- # rescale to canvas size
- pt[0] = round(v[0]*mW)+mW
- pt[1] = round(v[1]*mH)+mH
-
- # For now we want (0,0) in the top-left corner of the canvas
- # Mirror and translate along y
- pt[1] *= -1
- pt[1] += self.canvasSize[1]
-
- return pt
# ---------------------------------------------------------------------
class Renderer:
"""Render a scene viewed from a given camera.
- This class is responsible of the rendering process, hence transformation
- and projection of the ojects in the scene are invoked by the renderer.
+ This class is responsible of the rendering process, transformation and
+ projection of the objects in the scene are invoked by the renderer.
- The user can optionally provide a specific camera for the rendering, see
- the #doRendering# method for more informations.
+ The rendering is done using the active camera for the current scene.
"""
def __init__(self):
"""Make the rendering process only for the current scene by default.
+
+ We will work on a copy of the scene, be sure that the current scene do
+ not get modified in any way.
"""
- # Render the current Scene set as a READ-ONLY property
+ # Render the current Scene, this should be a READ-ONLY property
self._SCENE = Scene.GetCurrent()
# Use the aspect ratio of the scene rendering context
context = self._SCENE.getRenderingContext()
- self.canvasRatio = (context.aspectRatioX(), context.aspectRatioY())
+
+ aspect_ratio = float(context.imageSizeX())/float(context.imageSizeY())
+ self.canvasRatio = (float(context.aspectRatioX())*aspect_ratio,
+ float(context.aspectRatioY())
+ )
# Render from the currently active camera
- self.camera = self._SCENE.getCurrentCamera()
+ self.cameraObj = self._SCENE.getCurrentCamera()
+ print dir(self._SCENE)
+
+ # Get the list of lighting sources
+ obj_lst = self._SCENE.getChildren()
+ self.lights = [ o for o in obj_lst if o.getType() == 'Lamp']
+
+ if len(self.lights) == 0:
+ l = Lamp.New('Lamp')
+ lobj = Object.New('Lamp')
+ lobj.link(l)
+ self.lights.append(lobj)
##
# Public Methods
#
- def doRendering(self, outputWriter, animation=0):
+ def doRendering(self, outputWriter, animation=False):
"""Render picture or animation and write it out.
The parameters are:
- - a Vector writer object than will be used to output the result.
- - a flag to tell if we want to render an animation or the only
+ - a Vector writer object that will be used to output the result.
+ - a flag to tell if we want to render an animation or only the
current frame.
"""
currentFrame = context.currentFrame()
# Handle the animation case
- if animation == 0:
+ if not animation:
startFrame = currentFrame
endFrame = startFrame
+ outputWriter.open()
else:
startFrame = context.startFrame()
endFrame = context.endFrame()
+ outputWriter.open(startFrame, endFrame)
# Do the rendering process frame by frame
print "Start Rendering!"
for f in range(startFrame, endFrame+1):
context.currentFrame(f)
+
renderedScene = self.doRenderScene(self._SCENE)
outputWriter.printCanvas(renderedScene,
- doPrintPolygons=False, doPrintEdges=True, showHiddenEdges=True)
+ doPrintPolygons = PRINT_POLYGONS,
+ doPrintEdges = PRINT_EDGES,
+ showHiddenEdges = SHOW_HIDDEN_EDGES)
# clear the rendered scene
self._SCENE.makeCurrent()
Scene.unlink(renderedScene)
del renderedScene
+ outputWriter.close()
print "Done!"
context.currentFrame(currentFrame)
-
def doRenderScene(self, inputScene):
"""Control the rendering process.
# NOTE: the projector wants object in world coordinates,
# so we should apply modelview transformations _before_
# projection transformations
- proj = Projector(self.camera, self.canvasRatio)
-
- # global processing of the scene
- self._doDepthSorting(workScene)
-
- # Per object activities
- Objects = workScene.getChildren()
-
- for obj in Objects:
-
- if (obj.getType() != 'Mesh'):
- print "Type:", obj.getType(), "\tSorry, only mesh Object supported!"
- continue
- #
-
- self._doModelViewTransformations(obj)
+ proj = Projector(self.cameraObj, self.canvasRatio)
- self._doBackFaceCulling(obj)
-
- self._doColorAndLighting(obj)
+ # global processing of the scene
- # 'style' can be a function that determine
- # if an edge should be showed?
- self._doEdgesStyle(obj, style=None)
-
- self._doProjection(obj, proj)
+ self._doConvertGeometricObjToMesh(workScene)
- return workScene
+ self._doSceneClipping(workScene)
+ # FIXME: does not work in batch mode!
+ #if OPTIMIZE_FOR_SPACE:
+ # self._joinMeshObjectsInScene(workScene)
- def oldRenderScene(scene):
+ self._doSceneDepthSorting(workScene)
# Per object activities
+
Objects = workScene.getChildren()
-
for obj in Objects:
- if (obj.getType() != 'Mesh'):
- print "Type:", obj.getType(), "\tSorry, only mesh Object supported!"
+ if obj.getType() != 'Mesh':
+ print "Only Mesh supported! - Skipping type:", obj.getType()
continue
-
- # Get a projector for this object
- proj = Projector(self.camera, obj, self.canvasSize)
- # Let's store the transformed data
- transformed_mesh = NMesh.New("flat"+obj.name)
- transformed_mesh.hasVertexColours(1)
+ print "Rendering: ", obj.getName()
- # process Edges
- self._doProcessEdges(obj)
-
- for v in obj.getData().verts:
- transformed_mesh.verts.append(v)
- transformed_mesh.edges = self._processEdges(obj.getData().edges)
- #print transformed_mesh.edges
+ mesh = obj.data
+ self._doModelToWorldCoordinates(mesh, obj.matrix)
+
+ self._doObjectDepthSorting(mesh)
- # Store the materials
- materials = obj.getData().getMaterials()
-
- meshfaces = obj.getData().faces
-
- for face in meshfaces:
-
- # if the face is visible flatten it on the "picture plane"
- if self._isFaceVisible(face, obj, cameraObj):
-
- # Store transformed face
- newface = NMesh.Face()
-
- for vert in face:
-
- p = proj.doProjection(vert.co)
-
- tmp_vert = NMesh.Vert(p[0], p[1], p[2])
-
- # Add the vert to the mesh
- transformed_mesh.verts.append(tmp_vert)
-
- newface.v.append(tmp_vert)
-
-
- # Per-face color calculation
- # code taken mostly from the original vrm script
- # TODO: understand the code and rewrite it clearly
- ambient = -150
-
- fakelight = Object.Get("Lamp").loc
- if fakelight == None:
- fakelight = [1.0, 1.0, -0.3]
-
- norm = Vector(face.no)
- vektori = (norm[0]*fakelight[0]+norm[1]*fakelight[1]+norm[2]*fakelight[2])
- vduzine = fabs(sqrt(pow(norm[0],2)+pow(norm[1],2)+pow(norm[2],2))*sqrt(pow(fakelight[0],2)+pow(fakelight[1],2)+pow(fakelight[2],2)))
- intensity = floor(ambient + 200*acos(vektori/vduzine))/200
- if intensity < 0:
- intensity = 0
-
- if materials:
- tmp_col = materials[face.mat].getRGBCol()
- else:
- tmp_col = [0.5, 0.5, 0.5]
-
- tmp_col = [ (c>intensity) and int(round((c-intensity)*10)*25.5) for c in tmp_col ]
-
- vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2])
- newface.col = [vcol, vcol, vcol, 255]
-
- transformed_mesh.addFace(newface)
-
- # at the end of the loop on obj
+ self._doBackFaceCulling(mesh)
- transformed_obj = Object.New(obj.getType(), "flat"+obj.name)
- transformed_obj.link(transformed_mesh)
- transformed_obj.loc = obj.loc
- newscene.link(transformed_obj)
+ self._doColorAndLighting(mesh)
+
+ # TODO: 'style' can be a function that determine
+ # if an edge should be showed?
+ self._doEdgesStyle(mesh, style=None)
+
+ self._doProjection(mesh, proj)
+
+ # Update the object data, important! :)
+ mesh.update()
-
return workScene
# Private Methods
#
+ # Utility methods
+
+ def _getObjPosition(self, obj):
+ """Return the obj position in World coordinates.
+ """
+ return obj.matrix.translationPart()
+
+ def _cameraViewDirection(self):
+ """Get the View Direction form the camera matrix.
+ """
+ return Vector(self.cameraObj.matrix[2]).resize3D()
+
+
# Faces methods
- def _isFaceVisible(self, face, obj, camObj):
- """Determine if a face of an object is visible from a given camera.
+ def _isFaceVisible(self, face):
+ """Determine if a face of an object is visible from the current camera.
- The normals need to be transformed, but note that we should apply only the
- rotation part of the tranformation matrix, since the normals are
- normalized and they can be intended as starting from the origin.
-
The view vector is calculated from the camera location and one of the
vertices of the face (expressed in World coordinates, after applying
modelview transformations).
- After those transformations we determine if a face is visible by computing
- the angle between the face normal and the view vector, this angle
- corresponds somehow to the dot product between the two. If the product
- results <= 0 then the angle between the two vectors is less that 90
- degrees and then the face is visible.
+ After those transformations we determine if a face is visible by
+ computing the angle between the face normal and the view vector, this
+ angle has to be between -90 and 90 degrees for the face to be visible.
+ This corresponds somehow to the dot product between the two, if it
+ results > 0 then the face is visible.
There is no need to normalize those vectors since we are only interested in
the sign of the cross product and not in the product value.
- """
- # The transformation matrix of the object
- mObj = Matrix(obj.getMatrix())
- mObj.transpose()
+ NOTE: here we assume the face vertices are in WorldCoordinates, so
+ please transform the object _before_ doing the test.
+ """
- # The normal after applying the current object rotation
- #normal = mObj.rotationPart() * Vector(face.no)
normal = Vector(face.no)
-
- # View vector in orthographics projections can be considered simply s the
- # camera position
- #view_vect = Vector(camObj.loc)
-
- # View vector as in perspective projections
- # it is the dofference between the camera position and
- # one point of the face, we choose the first point,
- # but maybe a better choice may be the farthest point from the camera.
- point = Vector(face[0].co)
- #point = mObj * point.resize4D()
- #point.resize3D()
- view_vect = Vector(camObj.loc) - point
-
-
- # if d <= 0 the face is visible from the camera
+ camPos = self._getObjPosition(self.cameraObj)
+ view_vect = None
+
+ # View Vector in orthographics projections is the view Direction of
+ # the camera
+ if self.cameraObj.data.getType() == 1:
+ view_vect = self._cameraViewDirection()
+
+ # View vector in perspective projections can be considered as
+ # the difference between the camera position and one point of
+ # the face, we choose the farthest point from the camera.
+ if self.cameraObj.data.getType() == 0:
+ vv = max( [ ((camPos - Vector(v.co)).length, (camPos - Vector(v.co))) for v in face] )
+ view_vect = vv[1]
+
+ # if d > 0 the face is visible from the camera
d = view_vect * normal
- if d <= 0:
- return False
- else:
+ if d > 0:
return True
+ else:
+ return False
# Scene methods
- def _doClipping():
- return
-
- def _doDepthSorting(self, scene):
+ def _doConvertGeometricObjToMesh(self, scene):
+ """Convert all "geometric" objects to mesh ones.
+ """
+ geometricObjTypes = ['Mesh', 'Surf', 'Curve', 'Text']
- cameraObj = self.camera
Objects = scene.getChildren()
+ objList = [ o for o in Objects if o.getType() in geometricObjTypes ]
+ for obj in objList:
+ old_obj = obj
+ obj = self._convertToRawMeshObj(obj)
+ scene.link(obj)
+ scene.unlink(old_obj)
+
+ # Mesh Cleanup
+ me = obj.getData(mesh=1)
+ for f in me.faces: f.sel = 1;
+ for v in me.verts: v.sel = 1;
+ me.remDoubles(0)
+ me.triangleToQuad()
+ me.recalcNormals()
+ me.update()
+
+ def _doSceneClipping(self, scene):
+ """Clip objects against the View Frustum.
+
+ For now clip away only objects according to their center position.
+ """
- Objects.sort(lambda obj1, obj2:
- cmp(Vector(Vector(cameraObj.loc) - Vector(obj1.loc)).length,
- Vector(Vector(cameraObj.loc) - Vector(obj2.loc)).length
- )
- )
-
- # hackish sorting of faces according to the max z value of a vertex
+ cpos = self._getObjPosition(self.cameraObj)
+ view_vect = self._cameraViewDirection()
+
+ near = self.cameraObj.data.clipStart
+ far = self.cameraObj.data.clipEnd
+
+ aspect = float(self.canvasRatio[0])/float(self.canvasRatio[1])
+ fovy = atan(0.5/aspect/(self.cameraObj.data.lens/32))
+ fovy = fovy * 360.0/pi
+
+ Objects = scene.getChildren()
for o in Objects:
+ if o.getType() != 'Mesh': continue;
- if (o.getType() != 'Mesh'):
- continue
- #
-
- mesh = o.data
- mesh.faces.sort(
- lambda f1, f2:
- # Sort faces according to the min z coordinate in a face
- #cmp(min([v[2] for v in f1]), min([v[2] for v in f2])))
-
- # Sort faces according to the max z coordinate in a face
- cmp(max([v[2] for v in f1]), max([v[2] for v in f2])))
-
- # Sort faces according to the avg z coordinate in a face
- #cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2)))
- mesh.faces.reverse()
- mesh.update()
+ obj_vect = Vector(cpos) - self._getObjPosition(o)
+
+ d = obj_vect*view_vect
+ theta = AngleBetweenVecs(obj_vect, view_vect)
+ # if the object is outside the view frustum, clip it away
+ if (d < near) or (d > far) or (theta > fovy):
+ scene.unlink(o)
+
+ def _doSceneDepthSorting(self, scene):
+ """Sort objects in the scene.
+
+ The object sorting is done accordingly to the object centers.
+ """
+
+ c = self._getObjPosition(self.cameraObj)
+
+ by_center_pos = (lambda o1, o2:
+ (o1.getType() == 'Mesh' and o2.getType() == 'Mesh') and
+ cmp((self._getObjPosition(o1) - Vector(c)).length,
+ (self._getObjPosition(o2) - Vector(c)).length)
+ )
+
+ # TODO: implement sorting by bounding box, if obj1.bb is inside obj2.bb,
+ # then ob1 goes farther than obj2, useful when obj2 has holes
+ by_bbox = None
+
+ Objects = scene.getChildren()
+ Objects.sort(by_center_pos)
+
# update the scene
- # FIXME: check if it is correct
+ for o in Objects:
+ scene.unlink(o)
+ scene.link(o)
+
+ def _joinMeshObjectsInScene(self, scene):
+ """Merge all the Mesh Objects in a scene into a single Mesh Object.
+ """
+ mesh = Mesh.New()
+ bigObj = Object.New('Mesh', 'BigOne')
+ bigObj.link(mesh)
+
+ oList = [o for o in scene.getChildren() if o.getType()=='Mesh']
+ bigObj.join(oList)
+ scene.link(bigObj)
+ for o in oList:
+ scene.unlink(o)
+
scene.update()
- #for o in scene.getChildren():
- # scene.unlink(o)
- #for o in Objects:
- # scene.link(o)
+
# Per object methods
- def _doModelViewTransformations(self, object):
- if(object.getType() != 'Mesh'):
- return
+ def _convertToRawMeshObj(self, object):
+ """Convert geometry based object to a mesh object.
+ """
+ me = Mesh.New('RawMesh_'+object.name)
+ me.getFromObject(object.name)
+
+ newObject = Object.New('Mesh', 'RawMesh_'+object.name)
+ newObject.link(me)
+
+ # If the object has no materials set a default material
+ if not me.materials:
+ me.materials = [Material.New()]
+ #for f in me.faces: f.mat = 0
+
+ newObject.setMatrix(object.getMatrix())
+
+ return newObject
+
+ def _doModelToWorldCoordinates(self, mesh, matrix):
+ """Transform object coordinates to world coordinates.
+
+ This step is done simply applying to the object its tranformation
+ matrix and recalculating its normals.
+ """
+ mesh.transform(matrix, True)
+
+ def _doObjectDepthSorting(self, mesh):
+ """Sort faces in an object.
+
+ The faces in the object are sorted following the distance of the
+ vertices from the camera position.
+ """
+ c = self._getObjPosition(self.cameraObj)
+
+ # hackish sorting of faces
+
+ # Sort faces according to the max distance from the camera
+ by_max_vert_dist = (lambda f1, f2:
+ cmp(max([(Vector(v.co)-Vector(c)).length for v in f1]),
+ max([(Vector(v.co)-Vector(c)).length for v in f2])))
+
+ # Sort faces according to the min distance from the camera
+ by_min_vert_dist = (lambda f1, f2:
+ cmp(min([(Vector(v.co)-Vector(c)).length for v in f1]),
+ min([(Vector(v.co)-Vector(c)).length for v in f2])))
- matMV = object.matrix
- mesh = object.data
- mesh.transform(matMV, True)
- mesh.update()
+ # Sort faces according to the avg distance from the camera
+ by_avg_vert_dist = (lambda f1, f2:
+ cmp(sum([(Vector(v.co)-Vector(c)).length for v in f1])/len(f1),
+ sum([(Vector(v.co)-Vector(c)).length for v in f2])/len(f2)))
+ mesh.faces.sort(by_max_vert_dist)
+ mesh.faces.reverse()
- def _doBackFaceCulling(self, object):
- if(object.getType() != 'Mesh'):
- return
+ def _doBackFaceCulling(self, mesh):
+ """Simple Backface Culling routine.
- print "doing Backface Culling"
- mesh = object.data
+ At this level we simply do a visibility test face by face and then
+ select the vertices belonging to visible faces.
+ """
- # Select all vertices, so edges without faces can be displayed
+ # Select all vertices, so edges can be displayed even if there are no
+ # faces
for v in mesh.verts:
v.sel = 1
# Loop on faces
for f in mesh.faces:
f.sel = 0
- if self._isFaceVisible(f, object, self.camera):
+ if self._isFaceVisible(f):
f.sel = 1
+ # Is this the correct way to propagate the face selection info to the
+ # vertices belonging to a face ??
+ # TODO: Using the Mesh module this should come for free. Right?
+ Mesh.Mode(Mesh.SelectModes['VERTEX'])
for f in mesh.faces:
if not f.sel:
- for v in f:
- v.sel = 0
+ for v in f: v.sel = 0;
for f in mesh.faces:
if f.sel:
- for v in f:
- v.sel = 1
+ for v in f: v.sel = 1;
+
+ def _doColorAndLighting(self, mesh):
+ """Apply an Illumination model to the object.
+
+ The Illumination model used is the Phong one, it may be inefficient,
+ but I'm just learning about rendering and starting from Phong seemed
+ the most natural way.
+ """
- mesh.update()
+ # If the mesh has vertex colors already, use them,
+ # otherwise turn them on and do some calculations
+ if mesh.hasVertexColours():
+ return
+ mesh.hasVertexColours(True)
+ materials = mesh.materials
+ # TODO: use multiple lighting sources
+ light_obj = self.lights[0]
+ light_pos = self._getObjPosition(light_obj)
+ light = light_obj.data
- #Mesh.Mode(Mesh.SelectModes['VERTEX'])
+ camPos = self._getObjPosition(self.cameraObj)
+
+ # We do per-face color calculation (FLAT Shading), we can easily turn
+ # to a per-vertex calculation if we want to implement some shading
+ # technique. For an example see:
+ # http://www.miralab.unige.ch/papers/368.pdf
+ for f in mesh.faces:
+ if not f.sel:
+ continue
- def _doColorAndLighting(self, object):
- return
+ mat = None
+ if materials:
+ mat = materials[f.mat]
- def _doEdgesStyle(self, object, style):
- """Process Mesh Edges. (For now copy the edge data, in next version it
- can be a place where recognize silouhettes and/or contours).
+ # A new default material
+ if mat == None:
+ mat = Material.New('defMat')
+
+ L = Vector(light_pos).normalize()
- input: an edge list
- return: a processed edge list
+ V = (Vector(camPos) - Vector(f.v[0].co)).normalize()
+
+ N = Vector(f.no).normalize()
+
+ R = 2 * (N*L) * N - L
+
+ # TODO: Attenuation factor (not used for now)
+ a0 = 1; a1 = 0.0; a2 = 0.0
+ d = (Vector(f.v[0].co) - Vector(light_pos)).length
+ fd = min(1, 1.0/(a0 + a1*d + a2*d*d))
+
+ # Ambient component
+ Ia = 1.0
+ ka = mat.getAmb() * Vector([0.1, 0.1, 0.1])
+ Iamb = Ia * ka
+
+ # Diffuse component (add light.col for kd)
+ kd = mat.getRef() * Vector(mat.getRGBCol())
+ Ip = light.getEnergy()
+ Idiff = Ip * kd * (N*L)
+
+ # Specular component
+ ks = mat.getSpec() * Vector(mat.getSpecCol())
+ ns = mat.getHardness()
+ Ispec = Ip * ks * pow((V * R), ns)
+
+ # Emissive component
+ ki = Vector([mat.getEmit()]*3)
+
+ I = ki + Iamb + Idiff + Ispec
+
+ # Clamp I values between 0 and 1
+ I = [ min(c, 1) for c in I]
+ I = [ max(0, c) for c in I]
+ tmp_col = [ int(c * 255.0) for c in I]
+
+ vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2], 255)
+ f.col = []
+ for v in f.v:
+ f.col.append(vcol)
+
+ def _doEdgesStyle(self, mesh, style):
+ """Process Mesh Edges.
+
+ Examples of algorithms:
+
+ Contours:
+ given an edge if its adjacent faces have the same normal (that is
+ they are complanar), than deselect it.
+
+ Silhouettes:
+ given an edge if one its adjacent faces is frontfacing and the
+ other is backfacing, than select it, else deselect.
"""
+ #print "\tTODO: _doEdgeStyle()"
return
- def _doProjection(self, object, projector):
+ def _doProjection(self, mesh, projector):
+ """Calculate the Projection for the object.
+ """
+ # TODO: maybe using the object.transform() can be faster?
- if(object.getType() != 'Mesh'):
- return
-
- mesh = object.data
for v in mesh.verts:
p = projector.doProjection(v.co)
- v[0] = p[0]
- v[1] = p[1]
- v[2] = p[2]
- mesh.update()
+ v.co[0] = p[0]
+ v.co[1] = p[1]
+ v.co[2] = p[2]
#
# ---------------------------------------------------------------------
-
-# FIXME: really hackish code, just to test if the other parts work
-
def vectorize(filename):
"""The vectorizing process is as follows:
- - Open the writer
- - Render the scene
- - Close the writer
-
- If you want to render an animation the second pass should be
- repeated for any frame, and the frame number should be passed to the
- renderer.
+ - Instanciate the writer and the renderer
+ - Render!
"""
+ from Blender import Window
+ editmode = Window.EditMode()
+ if editmode: Window.EditMode(0)
+
writer = SVGVectorWriter(filename)
- writer.open()
-
renderer = Renderer()
- renderer.doRendering(writer)
+ renderer.doRendering(writer, RENDER_ANIMATION)
- writer.close()
+ if editmode: Window.EditMode(1)
+
+def vectorize_gui(filename):
+ """Draw the gui.
+
+ I would like to keep that simple, really.
+ """
+ Blender.Window.FileSelector (vectorize, 'Save SVG', filename)
+ Blender.Redraw()
# Here the main
if __name__ == "__main__":
+
+ basename = Blender.sys.basename(Blender.Get('filename'))
+ outputfile = Blender.sys.splitext(basename)[0]+".svg"
+
# with this trick we can run the script in batch mode
try:
- Blender.Window.FileSelector (vectorize, 'Save SVG', "proba.svg")
- Blender.Redraw()
+ vectorize_gui(outputfile)
except:
- from Blender import Window
- editmode = Window.EditMode()
- if editmode: Window.EditMode(0)
-
- vectorize("proba.svg")
- if editmode: Window.EditMode(1)
-
-
-
+ vectorize(outputfile)