#!BPY
"""
Name: 'VRM'
-Blender: 241
-Group: 'Export'
-Tooltip: 'Vector Rendering Method Export Script 0.3'
+Blender: 242
+Group: 'Render'
+Tooltip: 'Vector Rendering Method script'
+"""
+
+__author__ = "Antonio Ospite"
+__url__ = ["http://projects.blender.org/projects/vrm"]
+__version__ = "0.3.beta"
+
+__bpydoc__ = """\
+ Render the scene and save the result in vector format.
"""
# ---------------------------------------------------------------------
#
# ---------------------------------------------------------------------
#
-# NOTE: I do not know who is the original author of 'vrm'.
-# The present code is almost entirely rewritten from scratch,
-# but if I have to give credits to anyone, please let me know,
-# so I can update the copyright.
+# Additional credits:
+# Thanks to Emilio Aguirre for S2flender from which I took inspirations :)
+# Thanks to Nikola Radovanovic, the author of the original VRM script,
+# the code you read here has been rewritten _almost_ entirely
+# from scratch but Nikola gave me the idea, so I thank him publicly.
#
# ---------------------------------------------------------------------
+#
+# Things TODO for a next release:
+# - FIX the issue with negative scales in object tranformations!
+# - Use a better depth sorting algorithm
+# - Implement clipping of primitives and do handle object intersections.
+# (for now only clipping away whole objects is supported).
+# - Review how selections are made (this script uses selection states of
+# primitives to represent visibility infos)
+# - Use a data structure other than Mesh to represent the 2D image?
+# Think to a way to merge (adjacent) polygons that have the same color.
+# Or a way to use paths for silhouettes and contours.
+# - Consider SMIL for animation handling instead of ECMA Script? (Firefox do
+# not support SMIL for animations)
+# - Switch to the Mesh structure, should be considerably faster
+# (partially done, but with Mesh we cannot sort faces, yet)
+# - Implement Edge Styles (silhouettes, contours, etc.) (partially done).
+# - Implement Shading Styles? (partially done, to make more flexible).
+# - Add Vector Writers other than SVG.
+# - Check memory use!!
+# - Support Indexed palettes!! (Useful for ILDA FILES, for example,
+# see http://www.linux-laser.org/download/autotrace/ilda-output.patch)
#
-# Additional credits:
-# Thanks to Emilio Aguirre for S2flender from which I took inspirations :)
-# Thanks to Anthony C. D'Agostino for the original backface.py script
+# ---------------------------------------------------------------------
+#
+# Changelog:
+#
+# vrm-0.3.py - ...
+# * First release after code restucturing.
+# Now the script offers a useful set of functionalities
+# and it can render animations, too.
+# * Optimization in Renderer.doEdgeStyle(), build a topology cache
+# so to speed up the lookup of adjacent faces of an edge.
+# Thanks ideasman42.
+# * The SVG output is now SVG 1.0 valid.
+# Checked with: http://jiggles.w3.org/svgvalidator/ValidatorURI.html
+# * Progress indicator during HSR.
#
# ---------------------------------------------------------------------
import Blender
-from Blender import Scene, Object, Mesh, NMesh, Lamp, Camera
+from Blender import Scene, Object, Mesh, NMesh, Material, Lamp, Camera, Window
from Blender.Mathutils import *
from math import *
+import sys, time
+
+
+# Some global settings
+
+class config:
+ polygons = dict()
+ polygons['SHOW'] = True
+ polygons['SHADING'] = 'FLAT'
+ polygons['HSR'] = 'PAINTER' # 'PAINTER' or 'NEWELL'
+ #polygons['HSR'] = 'NEWELL'
+ # Hidden to the user for now
+ polygons['EXPANSION_TRICK'] = True
+
+ polygons['TOON_LEVELS'] = 2
+
+ edges = dict()
+ edges['SHOW'] = False
+ edges['SHOW_HIDDEN'] = False
+ edges['STYLE'] = 'MESH'
+ edges['WIDTH'] = 2
+ edges['COLOR'] = [0, 0, 0]
+
+ output = dict()
+ output['FORMAT'] = 'SVG'
+ output['ANIMATION'] = False
+ output['JOIN_OBJECTS'] = True
+
+
+
+# Debug utility function
+print_debug = False
+def debug(msg):
+ if print_debug:
+ sys.stderr.write(msg)
+
+
+# ---------------------------------------------------------------------
+#
+## Mesh Utility class
+#
+# ---------------------------------------------------------------------
+class MeshUtils:
+
+ def buildEdgeFaceUsersCache(me):
+ '''
+ Takes a mesh and returns a list aligned with the meshes edges.
+ Each item is a list of the faces that use the edge
+ would be the equiv for having ed.face_users as a property
+
+ Taken from .blender/scripts/bpymodules/BPyMesh.py,
+ thanks to ideasman_42.
+ '''
+
+ def sorted_edge_indicies(ed):
+ i1= ed.v1.index
+ i2= ed.v2.index
+ if i1>i2:
+ i1,i2= i2,i1
+ return i1, i2
+
+
+ face_edges_dict= dict([(sorted_edge_indicies(ed), (ed.index, [])) for ed in me.edges])
+ for f in me.faces:
+ fvi= [v.index for v in f.v]# face vert idx's
+ for i in xrange(len(f)):
+ i1= fvi[i]
+ i2= fvi[i-1]
+
+ if i1>i2:
+ i1,i2= i2,i1
+
+ face_edges_dict[i1,i2][1].append(f)
+
+ face_edges= [None] * len(me.edges)
+ for ed_index, ed_faces in face_edges_dict.itervalues():
+ face_edges[ed_index]= ed_faces
+
+ return face_edges
+
+ def isMeshEdge(adjacent_faces):
+ """Mesh edge rule.
+
+ A mesh edge is visible if _at_least_one_ of its adjacent faces is selected.
+ Note: if the edge has no adjacent faces we want to show it as well,
+ useful for "edge only" portion of objects.
+ """
+
+ if len(adjacent_faces) == 0:
+ return True
+
+ selected_faces = [f for f in adjacent_faces if f.sel]
+
+ if len(selected_faces) != 0:
+ return True
+ else:
+ return False
+
+ def isSilhouetteEdge(adjacent_faces):
+ """Silhuette selection rule.
+
+ An edge is a silhuette edge if it is shared by two faces with
+ different selection status or if it is a boundary edge of a selected
+ face.
+ """
+
+ if ((len(adjacent_faces) == 1 and adjacent_faces[0].sel == 1) or
+ (len(adjacent_faces) == 2 and
+ adjacent_faces[0].sel != adjacent_faces[1].sel)
+ ):
+ return True
+ else:
+ return False
+
+ buildEdgeFaceUsersCache = staticmethod(buildEdgeFaceUsersCache)
+ isMeshEdge = staticmethod(isMeshEdge)
+ isSilhouetteEdge = staticmethod(isSilhouetteEdge)
+
+
+# ---------------------------------------------------------------------
+#
+## Shading Utility class
+#
+# ---------------------------------------------------------------------
+class ShadingUtils:
+
+ shademap = None
+
+ def toonShadingMapSetup():
+ levels = config.polygons['TOON_LEVELS']
+
+ texels = 2*levels - 1
+ tmp_shademap = [0.0] + [(i)/float(texels-1) for i in xrange(1, texels-1) ] + [1.0]
+
+ return tmp_shademap
+
+ def toonShading(u):
+
+ shademap = ShadingUtils.shademap
+
+ if not shademap:
+ shademap = ShadingUtils.toonShadingMapSetup()
+
+ v = 1.0
+ for i in xrange(0, len(shademap)-1):
+ pivot = (shademap[i]+shademap[i+1])/2.0
+ j = int(u>pivot)
+
+ v = shademap[i+j]
+
+ if v < shademap[i+1]:
+ return v
+
+ return v
+
+ toonShadingMapSetup = staticmethod(toonShadingMapSetup)
+ toonShading = staticmethod(toonShading)
# ---------------------------------------------------------------------
def __init__(self, cameraObj, canvasRatio):
"""Calculate the projection matrix.
- The projection matrix depends, in this case, on the camera settings,
- and also on object transformation matrix.
+ The projection matrix depends, in this case, on the camera settings.
+ TAKE CARE: This projector expects vertices in World Coordinates!
"""
camera = cameraObj.getData()
near = camera.clipStart
far = camera.clipEnd
+ scale = float(camera.scale)
+
fovy = atan(0.5/aspect/(camera.lens/32))
- fovy = fovy * 360/pi
+ fovy = fovy * 360.0/pi
# What projection do we want?
- if camera.type:
- m2 = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale)
- else:
- m2 = self._calcPerspectiveMatrix(fovy, aspect, near, far)
+ if camera.type == 0:
+ mP = self._calcPerspectiveMatrix(fovy, aspect, near, far)
+ elif camera.type == 1:
+ mP = self._calcOrthoMatrix(fovy, aspect, near, far, scale)
-
# View transformation
cam = Matrix(cameraObj.getInverseMatrix())
cam.transpose()
- # FIXME: remove the commented part, we used to pass object in local
- # coordinates, but this is not very clean, we should apply modelview
- # tranformations _before_ (at some other level).
- #m1 = Matrix(obMesh.getMatrix())
- #m1.transpose()
-
- #mP = cam * m1
- mP = cam
- mP = m2 * mP
+ mP = mP * cam
self.projectionMatrix = mP
matrix.
"""
- # Note that we need the vertex expressed using homogeneous coordinates
+ # Note that we have to work on the vertex using homogeneous coordinates
+ # From blender 2.42+ we don't need to resize the vector to be 4d
+ # when applying a 4x4 matrix, but we do that anyway since we need the
+ # 4th coordinate later
p = self.projectionMatrix * Vector(v).resize4D()
-
- if p[3]>0:
+
+ # Perspective division
+ if p[3] != 0:
p[0] = p[0]/p[3]
p[1] = p[1]/p[3]
+ p[2] = p[2]/p[3]
+
+ # restore the size
+ p[3] = 1.0
+ p.resize3D()
return p
+
##
# Private methods
#
def _calcPerspectiveMatrix(self, fovy, aspect, near, far):
- """Return a perspective projection matrix."""
+ """Return a perspective projection matrix.
+ """
top = near * tan(fovy * pi / 360.0)
bottom = -top
return m
def _calcOrthoMatrix(self, fovy, aspect , near, far, scale):
- """Return an orthogonal projection matrix."""
+ """Return an orthogonal projection matrix.
+ """
- top = near * tan(fovy * pi / 360.0) * (scale * 10)
+ # The 11 in the formula was found emiprically
+ top = near * tan(fovy * pi / 360.0) * (scale * 11)
bottom = -top
left = bottom * aspect
right= top * aspect
# ---------------------------------------------------------------------
#
-## Object representation class
+## Progress Indicator
+#
+# ---------------------------------------------------------------------
+
+class Progress:
+ """A model for a progress indicator.
+
+ Do the progress calculation calculation and
+ the view independent stuff of a progress indicator.
+ """
+ def __init__(self, steps=0):
+ self.name = ""
+ self.steps = steps
+ self.completed = 0
+ self.progress = 0
+
+ def setSteps(self, steps):
+ """Set the number of steps of the activity wich we want to track.
+ """
+ self.steps = steps
+
+ def getSteps(self):
+ return self.steps
+
+ def setName(self, name):
+ """Set the name of the activity wich we want to track.
+ """
+ self.name = name
+
+ def getName(self):
+ return self.name
+
+ def getProgress(self):
+ return self.progress
+
+ def reset(self):
+ self.completed = 0
+ self.progress = 0
+
+ def update(self):
+ """Update the model, call this method when one step is completed.
+ """
+ if self.progress == 100:
+ return False
+
+ self.completed += 1
+ self.progress = ( float(self.completed) / float(self.steps) ) * 100
+ self.progress = int(self.progress)
+
+ return True
+
+
+class ProgressIndicator:
+ """An abstraction of a View for the Progress Model
+ """
+ def __init__(self):
+
+ # Use a refresh rate so we do not show the progress at
+ # every update, but every 'self.refresh_rate' times.
+ self.refresh_rate = 10
+ self.shows_counter = 0
+
+ self.progressModel = None
+
+ def setActivity(self, name, steps):
+ """Initialize the Model.
+
+ In a future version (with subactivities-progress support) this method
+ could only set the current activity.
+ """
+ self.progressModel = Progress()
+ self.progressModel.setName(name)
+ self.progressModel.setSteps(steps)
+
+ def getActivity(self):
+ return self.progressModel
+
+ def update(self):
+ """Update the model and show the actual progress.
+ """
+ assert(self.progressModel)
+
+ if self.progressModel.update():
+ self.show(self.progressModel.getProgress(),
+ self.progressModel.getName())
+
+ # We return always True here so we can call the update() method also
+ # from lambda funcs (putting the call in logical AND with other ops)
+ return True
+
+ def show(self, progress, name=""):
+ self.shows_counter = (self.shows_counter + 1) % self.refresh_rate
+ if self.shows_counter != 0:
+ return
+
+ if progress == 100:
+ self.shows_counter = -1
+
+
+class ConsoleProgressIndicator(ProgressIndicator):
+ """Show a progress bar on stderr, a la wget.
+ """
+ def __init__(self):
+ ProgressIndicator.__init__(self)
+
+ self.swirl_chars = ["-", "\\", "|", "/"]
+ self.swirl_count = -1
+
+ def show(self, progress, name):
+ ProgressIndicator.show(self, progress, name)
+
+ bar_length = 70
+ bar_progress = int( (progress/100.0) * bar_length )
+ bar = ("=" * bar_progress).ljust(bar_length)
+
+ self.swirl_count = (self.swirl_count+1)%len(self.swirl_chars)
+ swirl_char = self.swirl_chars[self.swirl_count]
+
+ progress_bar = "%s |%s| %c %3d%%" % (name, bar, swirl_char, progress)
+
+ sys.stderr.write(progress_bar+"\r")
+ if progress == 100:
+ sys.stderr.write("\n")
+
+
+class GraphicalProgressIndicator(ProgressIndicator):
+ """Interface to the Blender.Window.DrawProgressBar() method.
+ """
+ def __init__(self):
+ ProgressIndicator.__init__(self)
+
+ #self.swirl_chars = ["-", "\\", "|", "/"]
+ # We have to use letters with the same width, for now!
+ # Blender progress bar considers the font widths when
+ # calculating the progress bar width.
+ self.swirl_chars = ["\\", "/"]
+ self.swirl_count = -1
+
+ def show(self, progress, name):
+ ProgressIndicator.show(self, progress)
+
+ self.swirl_count = (self.swirl_count+1)%len(self.swirl_chars)
+ swirl_char = self.swirl_chars[self.swirl_count]
+
+ progress_text = "%s - %c %3d%%" % (name, swirl_char, progress)
+
+ # Finally draw the Progress Bar
+ Window.WaitCursor(1) # Maybe we can move that call in the constructor?
+ Window.DrawProgressBar(progress/100.0, progress_text)
+
+ if progress == 100:
+ Window.DrawProgressBar(1, progress_text)
+ Window.WaitCursor(0)
+
+
+
+# ---------------------------------------------------------------------
+#
+## 2D Object representation class
#
# ---------------------------------------------------------------------
# TODO: a class to represent the needed properties of a 2D vector image
-# Just use a NMesh structure?
+# For now just using a [N]Mesh structure.
# ---------------------------------------------------------------------
Every subclasses of VectorWriter must have at last the following public
methods:
- - printCanvas(mesh) --- where mesh is as specified before.
+ - open(self)
+ - close(self)
+ - printCanvas(self, scene,
+ doPrintPolygons=True, doPrintEdges=False, showHiddenEdges=False):
"""
def __init__(self, fileName):
- """Open the file named #fileName# and set the canvas size."""
-
- self.file = open(fileName, "w")
- print "Outputting to: ", fileName
-
+ """Set the output file name and other properties"""
+ self.outputFileName = fileName
+ self.file = None
+
context = Scene.GetCurrent().getRenderingContext()
self.canvasSize = ( context.imageSizeX(), context.imageSizeY() )
-
+
+ self.startFrame = 1
+ self.endFrame = 1
+ self.animation = False
+
##
# Public Methods
#
- def printCanvas(mesh):
- return
-
- ##
- # Private Methods
- #
-
- def _printHeader():
+ def open(self, startFrame=1, endFrame=1):
+ if startFrame != endFrame:
+ self.startFrame = startFrame
+ self.endFrame = endFrame
+ self.animation = True
+
+ self.file = open(self.outputFileName, "w")
+ print "Outputting to: ", self.outputFileName
+
return
- def _printFooter():
+ def close(self):
+ self.file.close()
return
+ def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False,
+ showHiddenEdges=False):
+ """This is the interface for the needed printing routine.
+ """
+ return
+
## SVG Writer
class SVGVectorWriter(VectorWriter):
"""A concrete class for writing SVG output.
-
- The class does not support animations, yet.
- Sorry.
"""
- def __init__(self, file):
- """Simply call the parent Contructor."""
- VectorWriter.__init__(self, file)
+ def __init__(self, fileName):
+ """Simply call the parent Contructor.
+ """
+ VectorWriter.__init__(self, fileName)
##
# Public Methods
#
- def open(self):
+ def open(self, startFrame=1, endFrame=1):
+ """Do some initialization operations.
+ """
+ VectorWriter.open(self, startFrame, endFrame)
self._printHeader()
def close(self):
+ """Do some finalization operation.
+ """
self._printFooter()
+ # remember to call the close method of the parent
+ VectorWriter.close(self)
+
-
- def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, showHiddenEdges=False):
- """Convert the scene representation to SVG."""
+ def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False,
+ showHiddenEdges=False):
+ """Convert the scene representation to SVG.
+ """
Objects = scene.getChildren()
+
+ context = scene.getRenderingContext()
+ framenumber = context.currentFrame()
+
+ if self.animation:
+ framestyle = "display:none"
+ else:
+ framestyle = "display:block"
+
+ # Assign an id to this group so we can set properties on it using DOM
+ self.file.write("<g id=\"frame%d\" style=\"%s\">\n" %
+ (framenumber, framestyle) )
+
+
for obj in Objects:
if(obj.getType() != 'Mesh'):
continue
- #
- self.file.write("<g>\n")
+ self.file.write("<g id=\"%s\">\n" % obj.getName())
+
+ mesh = obj.getData(mesh=1)
-
if doPrintPolygons:
- for face in obj.getData().faces:
- self._printPolygon(face)
+ self._printPolygons(mesh)
if doPrintEdges:
- self._printEdges(obj.getData(), showHiddenEdges)
+ self._printEdges(mesh, showHiddenEdges)
self.file.write("</g>\n")
-
+
+ self.file.write("</g>\n")
+
##
# Private Methods
#
+ def _calcCanvasCoord(self, v):
+ """Convert vertex in scene coordinates to canvas coordinates.
+ """
+
+ pt = Vector([0, 0, 0])
+
+ mW = float(self.canvasSize[0])/2.0
+ mH = float(self.canvasSize[1])/2.0
+
+ # rescale to canvas size
+ pt[0] = v.co[0]*mW + mW
+ pt[1] = v.co[1]*mH + mH
+ pt[2] = v.co[2]
+
+ # For now we want (0,0) in the top-left corner of the canvas.
+ # Mirror and translate along y
+ pt[1] *= -1
+ pt[1] += self.canvasSize[1]
+
+ return pt
+
def _printHeader(self):
"""Print SVG header."""
self.file.write("<?xml version=\"1.0\"?>\n")
- self.file.write("<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n")
- self.file.write("\t\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n")
- self.file.write("<svg version=\"1.1\"\n")
+ self.file.write("<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.0//EN\"\n")
+ self.file.write("\t\"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd\">\n")
+ self.file.write("<svg version=\"1.0\"\n")
self.file.write("\txmlns=\"http://www.w3.org/2000/svg\"\n")
- self.file.write("\twidth=\"%d\" height=\"%d\" streamable=\"true\">\n\n" %
+ self.file.write("\twidth=\"%d\" height=\"%d\">\n\n" %
self.canvasSize)
+ if self.animation:
+
+ self.file.write("""\n<script type="text/javascript"><![CDATA[
+ globalStartFrame=%d;
+ globalEndFrame=%d;
+
+ /* FIXME: Use 1000 as interval as lower values gives problems */
+ timerID = setInterval("NextFrame()", 1000);
+ globalFrameCounter=%d;
+
+ function NextFrame()
+ {
+ currentElement = document.getElementById('frame'+globalFrameCounter)
+ previousElement = document.getElementById('frame'+(globalFrameCounter-1))
+
+ if (!currentElement)
+ {
+ return;
+ }
+
+ if (globalFrameCounter > globalEndFrame)
+ {
+ clearInterval(timerID)
+ }
+ else
+ {
+ if(previousElement)
+ {
+ previousElement.style.display="none";
+ }
+ currentElement.style.display="block";
+ globalFrameCounter++;
+ }
+ }
+ \n]]></script>\n
+ \n""" % (self.startFrame, self.endFrame, self.startFrame) )
+
def _printFooter(self):
"""Print the SVG footer."""
self.file.write("\n</svg>\n")
- self.file.close()
+
+ def _printPolygons(self, mesh):
+ """Print the selected (visible) polygons.
+ """
+
+ if len(mesh.faces) == 0:
+ return
+
+ self.file.write("<g>\n")
+
+ for face in mesh.faces:
+ if not face.sel:
+ continue
+
+ self.file.write("<path d=\"")
+
+ p = self._calcCanvasCoord(face.verts[0])
+ self.file.write("M %g,%g L " % (p[0], p[1]))
+
+ for v in face.verts[1:]:
+ p = self._calcCanvasCoord(v)
+ self.file.write("%g,%g " % (p[0], p[1]))
+
+ # get rid of the last blank space, just cosmetics here.
+ self.file.seek(-1, 1)
+ self.file.write(" z\"\n")
+
+ # take as face color the first vertex color
+ if face.col:
+ fcol = face.col[0]
+ color = [fcol.r, fcol.g, fcol.b, fcol.a]
+ else:
+ color = [255, 255, 255, 255]
+
+ # Convert the color to the #RRGGBB form
+ str_col = "#%02X%02X%02X" % (color[0], color[1], color[2])
+
+ # Handle transparent polygons
+ opacity_string = ""
+ if color[3] != 255:
+ opacity = float(color[3])/255.0
+ opacity_string = " fill-opacity: %g; stroke-opacity: %g; opacity: 1;" % (opacity, opacity)
+
+ self.file.write("\tstyle=\"fill:" + str_col + ";")
+ self.file.write(opacity_string)
+
+ # use the stroke property to alleviate the "adjacent edges" problem,
+ # we simulate polygon expansion using borders,
+ # see http://www.antigrain.com/svg/index.html for more info
+ stroke_width = 1.0
+
+ if config.polygons['EXPANSION_TRICK']:
+ self.file.write(" stroke:%s;\n" % str_col)
+ self.file.write(" stroke-width:" + str(stroke_width) + ";\n")
+ self.file.write(" stroke-linecap:round;stroke-linejoin:round")
+
+ self.file.write("\"/>\n")
+
+ self.file.write("</g>\n")
def _printEdges(self, mesh, showHiddenEdges=False):
- """Print the wireframe using mesh edges... is this the correct way?
+ """Print the wireframe using mesh edges.
"""
- stroke_width=0.5
- stroke_col = [0, 0, 0]
+ stroke_width = config.edges['WIDTH']
+ stroke_col = config.edges['COLOR']
self.file.write("<g>\n")
hidden_stroke_style = ""
- # And edge is selected if both vertives are selected
- if e.v1.sel == 0 or e.v2.sel == 0:
+ if e.sel == 0:
if showHiddenEdges == False:
continue
else:
self.file.write("\"/>\n")
self.file.write("</g>\n")
-
-
-
- def _printPolygon(self, face):
- """Print our primitive, finally.
- """
-
- wireframe = False
-
- stroke_width=0.5
-
- self.file.write("<polygon points=\"")
-
- for v in face:
- p = self._calcCanvasCoord(v)
- self.file.write("%g,%g " % (p[0], p[1]))
-
- self.file.seek(-1,1) # get rid of the last space
- self.file.write("\"\n")
-
- #take as face color the first vertex color
- if face.col:
- fcol = face.col[0]
- color = [fcol.r, fcol.g, fcol.b]
- else:
- color = [ 255, 255, 255]
-
- stroke_col = [0, 0, 0]
- if not wireframe:
- stroke_col = color
-
- self.file.write("\tstyle=\"fill:rgb("+str(color[0])+","+str(color[1])+","+str(color[2])+");")
- self.file.write(" stroke:rgb("+str(stroke_col[0])+","+str(stroke_col[1])+","+str(stroke_col[2])+");")
- self.file.write(" stroke-width:"+str(stroke_width)+";\n")
- self.file.write(" stroke-linecap:round;stroke-linejoin:round")
- self.file.write("\"/>\n")
-
- def _calcCanvasCoord(self, v):
-
- pt = Vector([0, 0, 0])
-
- mW = self.canvasSize[0]/2
- mH = self.canvasSize[1]/2
-
- # rescale to canvas size
- pt[0] = round(v[0]*mW)+mW
- pt[1] = round(v[1]*mH)+mH
-
- # For now we want (0,0) in the top-left corner of the canvas
- # Mirror and translate along y
- pt[1] *= -1
- pt[1] += self.canvasSize[1]
-
- return pt
# ---------------------------------------------------------------------
#
# ---------------------------------------------------------------------
+# A dictionary to collect different shading style methods
+shadingStyles = dict()
+shadingStyles['FLAT'] = None
+shadingStyles['TOON'] = None
+
+# A dictionary to collect different edge style methods
+edgeStyles = dict()
+edgeStyles['MESH'] = MeshUtils.isMeshEdge
+edgeStyles['SILHOUETTE'] = MeshUtils.isSilhouetteEdge
+
+# A dictionary to collect the supported output formats
+outputWriters = dict()
+outputWriters['SVG'] = SVGVectorWriter
+
+
class Renderer:
- """Render a scene viewed from a given camera.
+ """Render a scene viewed from the active camera.
- This class is responsible of the rendering process, hence transformation
- and projection of the ojects in the scene are invoked by the renderer.
+ This class is responsible of the rendering process, transformation and
+ projection of the objects in the scene are invoked by the renderer.
- The user can optionally provide a specific camera for the rendering, see
- the #doRendering# method for more informations.
+ The rendering is done using the active camera for the current scene.
"""
def __init__(self):
"""Make the rendering process only for the current scene by default.
+
+ We will work on a copy of the scene, to be sure that the current scene do
+ not get modified in any way.
"""
- # Render the current Scene set as a READ-ONLY property
+ # Render the current Scene, this should be a READ-ONLY property
self._SCENE = Scene.GetCurrent()
# Use the aspect ratio of the scene rendering context
context = self._SCENE.getRenderingContext()
- self.canvasRatio = (context.aspectRatioX(), context.aspectRatioY())
+
+ aspect_ratio = float(context.imageSizeX())/float(context.imageSizeY())
+ self.canvasRatio = (float(context.aspectRatioX())*aspect_ratio,
+ float(context.aspectRatioY())
+ )
# Render from the currently active camera
- self.camera = self._SCENE.getCurrentCamera()
+ self.cameraObj = self._SCENE.getCurrentCamera()
+
+ # Get a projector for this camera.
+ # NOTE: the projector wants object in world coordinates,
+ # so we should remember to apply modelview transformations
+ # _before_ we do projection transformations.
+ self.proj = Projector(self.cameraObj, self.canvasRatio)
+
+ # Get the list of lighting sources
+ obj_lst = self._SCENE.getChildren()
+ self.lights = [ o for o in obj_lst if o.getType() == 'Lamp']
+
+ # When there are no lights we use a default lighting source
+ # that have the same position of the camera
+ if len(self.lights) == 0:
+ l = Lamp.New('Lamp')
+ lobj = Object.New('Lamp')
+ lobj.loc = self.cameraObj.loc
+ lobj.link(l)
+ self.lights.append(lobj)
##
# Public Methods
#
- def doRendering(self, outputWriter, animation=0):
+ def doRendering(self, outputWriter, animation=False):
"""Render picture or animation and write it out.
The parameters are:
- - a Vector writer object than will be used to output the result.
- - a flag to tell if we want to render an animation or the only
+ - a Vector writer object that will be used to output the result.
+ - a flag to tell if we want to render an animation or only the
current frame.
"""
context = self._SCENE.getRenderingContext()
- currentFrame = context.currentFrame()
+ origCurrentFrame = context.currentFrame()
# Handle the animation case
- if animation == 0:
- startFrame = currentFrame
+ if not animation:
+ startFrame = origCurrentFrame
endFrame = startFrame
+ outputWriter.open()
else:
startFrame = context.startFrame()
endFrame = context.endFrame()
+ outputWriter.open(startFrame, endFrame)
# Do the rendering process frame by frame
- print "Start Rendering!"
- for f in range(startFrame, endFrame+1):
+ print "Start Rendering of %d frames" % (endFrame-startFrame)
+ for f in xrange(startFrame, endFrame+1):
+ print "\n\nFrame: %d" % f
context.currentFrame(f)
- renderedScene = self.doRenderScene(self._SCENE)
+
+ # Use some temporary workspace, a full copy of the scene
+ inputScene = self._SCENE.copy(2)
+ # And Set our camera accordingly
+ self.cameraObj = inputScene.getCurrentCamera()
+
+ try:
+ renderedScene = self.doRenderScene(inputScene)
+ except :
+ print "There was an error! Aborting."
+ import traceback
+ print traceback.print_exc()
+
+ self._SCENE.makeCurrent()
+ Scene.unlink(inputScene)
+ del inputScene
+ return
+
outputWriter.printCanvas(renderedScene,
- doPrintPolygons=False, doPrintEdges=True, showHiddenEdges=True)
+ doPrintPolygons = config.polygons['SHOW'],
+ doPrintEdges = config.edges['SHOW'],
+ showHiddenEdges = config.edges['SHOW_HIDDEN'])
- # clear the rendered scene
+ # delete the rendered scene
self._SCENE.makeCurrent()
Scene.unlink(renderedScene)
del renderedScene
+ outputWriter.close()
print "Done!"
- context.currentFrame(currentFrame)
-
+ context.currentFrame(origCurrentFrame)
- def doRenderScene(self, inputScene):
+ def doRenderScene(self, workScene):
"""Control the rendering process.
Here we control the entire rendering process invoking the operation
needed to transform and project the 3D scene in two dimensions.
"""
- # Use some temporary workspace, a full copy of the scene
- workScene = inputScene.copy(2)
-
- # Get a projector for this scene.
- # NOTE: the projector wants object in world coordinates,
- # so we should apply modelview transformations _before_
- # projection transformations
- proj = Projector(self.camera, self.canvasRatio)
-
# global processing of the scene
- self._doDepthSorting(workScene)
+
+ self._doSceneClipping(workScene)
+
+ self._doConvertGeometricObjsToMesh(workScene)
+
+ if config.output['JOIN_OBJECTS']:
+ self._joinMeshObjectsInScene(workScene)
+
+ self._doSceneDepthSorting(workScene)
# Per object activities
+
Objects = workScene.getChildren()
-
- for obj in Objects:
-
- if (obj.getType() != 'Mesh'):
- print "Type:", obj.getType(), "\tSorry, only mesh Object supported!"
+ print "Total Objects: %d" % len(Objects)
+ for i,obj in enumerate(Objects):
+ print "Rendering Object: %d" % i
+
+ if obj.getType() != 'Mesh':
+ print "Only Mesh supported! - Skipping type:", obj.getType()
continue
- #
- self._doModelViewTransformations(obj)
+ print "Rendering: ", obj.getName()
- self._doBackFaceCulling(obj)
-
- self._doColorAndLighting(obj)
+ mesh = obj.getData(mesh=1)
- # 'style' can be a function that determine
- # if an edge should be showed?
- self._doEdgesStyle(obj, style=None)
-
- self._doProjection(obj, proj)
+ self._doModelingTransformation(mesh, obj.matrix)
- return workScene
+ self._doBackFaceCulling(mesh)
+ self._doLighting(mesh)
- def oldRenderScene(scene):
-
- # Per object activities
- Objects = workScene.getChildren()
-
- for obj in Objects:
-
- if (obj.getType() != 'Mesh'):
- print "Type:", obj.getType(), "\tSorry, only mesh Object supported!"
- continue
-
- # Get a projector for this object
- proj = Projector(self.camera, obj, self.canvasSize)
+ # Do "projection" now so we perform further processing
+ # in Normalized View Coordinates
+ self._doProjection(mesh, self.proj)
- # Let's store the transformed data
- transformed_mesh = NMesh.New("flat"+obj.name)
- transformed_mesh.hasVertexColours(1)
+ self._doViewFrustumClipping(mesh)
- # process Edges
- self._doProcessEdges(obj)
-
- for v in obj.getData().verts:
- transformed_mesh.verts.append(v)
- transformed_mesh.edges = self._processEdges(obj.getData().edges)
- #print transformed_mesh.edges
+ self._doHiddenSurfaceRemoval(mesh)
-
- # Store the materials
- materials = obj.getData().getMaterials()
-
- meshfaces = obj.getData().faces
-
- for face in meshfaces:
-
- # if the face is visible flatten it on the "picture plane"
- if self._isFaceVisible(face, obj, cameraObj):
-
- # Store transformed face
- newface = NMesh.Face()
-
- for vert in face:
-
- p = proj.doProjection(vert.co)
-
- tmp_vert = NMesh.Vert(p[0], p[1], p[2])
-
- # Add the vert to the mesh
- transformed_mesh.verts.append(tmp_vert)
-
- newface.v.append(tmp_vert)
-
-
- # Per-face color calculation
- # code taken mostly from the original vrm script
- # TODO: understand the code and rewrite it clearly
- ambient = -150
-
- fakelight = Object.Get("Lamp").loc
- if fakelight == None:
- fakelight = [1.0, 1.0, -0.3]
-
- norm = Vector(face.no)
- vektori = (norm[0]*fakelight[0]+norm[1]*fakelight[1]+norm[2]*fakelight[2])
- vduzine = fabs(sqrt(pow(norm[0],2)+pow(norm[1],2)+pow(norm[2],2))*sqrt(pow(fakelight[0],2)+pow(fakelight[1],2)+pow(fakelight[2],2)))
- intensity = floor(ambient + 200*acos(vektori/vduzine))/200
- if intensity < 0:
- intensity = 0
-
- if materials:
- tmp_col = materials[face.mat].getRGBCol()
- else:
- tmp_col = [0.5, 0.5, 0.5]
-
- tmp_col = [ (c>intensity) and int(round((c-intensity)*10)*25.5) for c in tmp_col ]
+ self._doEdgesStyle(mesh, edgeStyles[config.edges['STYLE']])
- vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2])
- newface.col = [vcol, vcol, vcol, 255]
-
- transformed_mesh.addFace(newface)
-
- # at the end of the loop on obj
- transformed_obj = Object.New(obj.getType(), "flat"+obj.name)
- transformed_obj.link(transformed_mesh)
- transformed_obj.loc = obj.loc
- newscene.link(transformed_obj)
+ # Update the object data, important! :)
+ mesh.update()
-
return workScene
# Private Methods
#
+ # Utility methods
+
+ def _getObjPosition(self, obj):
+ """Return the obj position in World coordinates.
+ """
+ return obj.matrix.translationPart()
+
+ def _cameraViewVector(self):
+ """Get the View Direction form the camera matrix.
+ """
+ return Vector(self.cameraObj.matrix[2]).resize3D()
+
+
# Faces methods
- def _isFaceVisible(self, face, obj, camObj):
- """Determine if a face of an object is visible from a given camera.
+ def _isFaceVisible(self, face):
+ """Determine if a face of an object is visible from the current camera.
- The normals need to be transformed, but note that we should apply only the
- rotation part of the tranformation matrix, since the normals are
- normalized and they can be intended as starting from the origin.
-
The view vector is calculated from the camera location and one of the
vertices of the face (expressed in World coordinates, after applying
modelview transformations).
- After those transformations we determine if a face is visible by computing
- the angle between the face normal and the view vector, this angle
- corresponds somehow to the dot product between the two. If the product
- results <= 0 then the angle between the two vectors is less that 90
- degrees and then the face is visible.
+ After those transformations we determine if a face is visible by
+ computing the angle between the face normal and the view vector, this
+ angle has to be between -90 and 90 degrees for the face to be visible.
+ This corresponds somehow to the dot product between the two, if it
+ results > 0 then the face is visible.
There is no need to normalize those vectors since we are only interested in
the sign of the cross product and not in the product value.
- """
- # The transformation matrix of the object
- mObj = Matrix(obj.getMatrix())
- mObj.transpose()
+ NOTE: here we assume the face vertices are in WorldCoordinates, so
+ please transform the object _before_ doing the test.
+ """
- # The normal after applying the current object rotation
- #normal = mObj.rotationPart() * Vector(face.no)
normal = Vector(face.no)
+ camPos = self._getObjPosition(self.cameraObj)
+ view_vect = None
- # View vector in orthographics projections can be considered simply s the
- # camera position
- #view_vect = Vector(camObj.loc)
+ # View Vector in orthographics projections is the view Direction of
+ # the camera
+ if self.cameraObj.data.getType() == 1:
+ view_vect = self._cameraViewVector()
- # View vector as in perspective projections
- # it is the dofference between the camera position and
- # one point of the face, we choose the first point,
- # but maybe a better choice may be the farthest point from the camera.
- point = Vector(face[0].co)
- #point = mObj * point.resize4D()
- #point.resize3D()
- view_vect = Vector(camObj.loc) - point
-
+ # View vector in perspective projections can be considered as
+ # the difference between the camera position and one point of
+ # the face, we choose the farthest point from the camera.
+ if self.cameraObj.data.getType() == 0:
+ vv = max( [ ((camPos - Vector(v.co)).length, (camPos - Vector(v.co))) for v in face] )
+ view_vect = vv[1]
- # if d <= 0 the face is visible from the camera
+
+ # if d > 0 the face is visible from the camera
d = view_vect * normal
- if d <= 0:
- return False
- else:
+ if d > 0:
return True
+ else:
+ return False
# Scene methods
- def _doClipping():
- return
+ def _doSceneClipping(self, scene):
+ """Clip whole objects against the View Frustum.
+
+ For now clip away only objects according to their center position.
+ """
- def _doDepthSorting(self, scene):
+ cpos = self._getObjPosition(self.cameraObj)
+ view_vect = self._cameraViewVector()
- cameraObj = self.camera
- Objects = scene.getChildren()
+ near = self.cameraObj.data.clipStart
+ far = self.cameraObj.data.clipEnd
- Objects.sort(lambda obj1, obj2:
- cmp(Vector(Vector(cameraObj.loc) - Vector(obj1.loc)).length,
- Vector(Vector(cameraObj.loc) - Vector(obj2.loc)).length
- )
- )
-
- # hackish sorting of faces according to the max z value of a vertex
+ aspect = float(self.canvasRatio[0])/float(self.canvasRatio[1])
+ fovy = atan(0.5/aspect/(self.cameraObj.data.lens/32))
+ fovy = fovy * 360.0/pi
+
+ Objects = scene.getChildren()
for o in Objects:
+ if o.getType() != 'Mesh': continue;
- if (o.getType() != 'Mesh'):
- continue
- #
-
- mesh = o.data
- mesh.faces.sort(
- lambda f1, f2:
- # Sort faces according to the min z coordinate in a face
- #cmp(min([v[2] for v in f1]), min([v[2] for v in f2])))
-
- # Sort faces according to the max z coordinate in a face
- cmp(max([v[2] for v in f1]), max([v[2] for v in f2])))
-
- # Sort faces according to the avg z coordinate in a face
- #cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2)))
- mesh.faces.reverse()
- mesh.update()
+ obj_vect = Vector(cpos) - self._getObjPosition(o)
+
+ d = obj_vect*view_vect
+ theta = AngleBetweenVecs(obj_vect, view_vect)
+ # if the object is outside the view frustum, clip it away
+ if (d < near) or (d > far) or (theta > fovy):
+ scene.unlink(o)
+
+ def _doConvertGeometricObjsToMesh(self, scene):
+ """Convert all "geometric" objects to mesh ones.
+ """
+ geometricObjTypes = ['Mesh', 'Surf', 'Curve', 'Text']
+
+ Objects = scene.getChildren()
+ objList = [ o for o in Objects if o.getType() in geometricObjTypes ]
+ for obj in objList:
+ old_obj = obj
+ obj = self._convertToRawMeshObj(obj)
+ scene.link(obj)
+ scene.unlink(old_obj)
+
+
+ # XXX Workaround for Text and Curve which have some normals
+ # inverted when they are converted to Mesh, REMOVE that when
+ # blender will fix that!!
+ if old_obj.getType() in ['Curve', 'Text']:
+ me = obj.getData(mesh=1)
+ for f in me.faces: f.sel = 1;
+ for v in me.verts: v.sel = 1;
+ me.remDoubles(0)
+ me.triangleToQuad()
+ me.recalcNormals()
+ me.update()
+
+
+ def _doSceneDepthSorting(self, scene):
+ """Sort objects in the scene.
+
+ The object sorting is done accordingly to the object centers.
+ """
+
+ c = self._getObjPosition(self.cameraObj)
+
+ by_center_pos = (lambda o1, o2:
+ (o1.getType() == 'Mesh' and o2.getType() == 'Mesh') and
+ cmp((self._getObjPosition(o1) - Vector(c)).length,
+ (self._getObjPosition(o2) - Vector(c)).length)
+ )
+
+ # TODO: implement sorting by bounding box, if obj1.bb is inside obj2.bb,
+ # then ob1 goes farther than obj2, useful when obj2 has holes
+ by_bbox = None
+
+ Objects = scene.getChildren()
+ Objects.sort(by_center_pos)
+
# update the scene
- # FIXME: check if it is correct
- scene.update()
- #for o in scene.getChildren():
- # scene.unlink(o)
- #for o in Objects:
- # scene.link(o)
+ for o in Objects:
+ scene.unlink(o)
+ scene.link(o)
- # Per object methods
+ def _joinMeshObjectsInScene(self, scene):
+ """Merge all the Mesh Objects in a scene into a single Mesh Object.
+ """
+
+ oList = [o for o in scene.getChildren() if o.getType()=='Mesh']
- def _doModelViewTransformations(self, object):
- if(object.getType() != 'Mesh'):
+ # FIXME: Object.join() do not work if the list contains 1 object
+ if len(oList) == 1:
return
-
- matMV = object.matrix
- mesh = object.data
- mesh.transform(matMV, True)
- mesh.update()
+ mesh = Mesh.New('BigOne')
+ bigObj = Object.New('Mesh', 'BigOne')
+ bigObj.link(mesh)
+
+ scene.link(bigObj)
- def _doBackFaceCulling(self, object):
- if(object.getType() != 'Mesh'):
+ try:
+ bigObj.join(oList)
+ except RuntimeError:
+ print "\nWarning! - Can't Join Objects\n"
+ scene.unlink(bigObj)
return
+ except TypeError:
+ print "Objects Type error?"
- print "doing Backface Culling"
- mesh = object.data
+ for o in oList:
+ scene.unlink(o)
+
+ scene.update()
+
+
+ # Per object/mesh methods
+
+ def _convertToRawMeshObj(self, object):
+ """Convert geometry based object to a mesh object.
+ """
+ me = Mesh.New('RawMesh_'+object.name)
+ me.getFromObject(object.name)
+
+ newObject = Object.New('Mesh', 'RawMesh_'+object.name)
+ newObject.link(me)
+
+ # If the object has no materials set a default material
+ if not me.materials:
+ me.materials = [Material.New()]
+ #for f in me.faces: f.mat = 0
+
+ newObject.setMatrix(object.getMatrix())
+
+ return newObject
+
+ def _doModelingTransformation(self, mesh, matrix):
+ """Transform object coordinates to world coordinates.
+
+ This step is done simply applying to the object its tranformation
+ matrix and recalculating its normals.
+ """
+ # XXX FIXME: blender do not transform normals in the right way when
+ # there are negative scale values
+ if matrix[0][0] < 0 or matrix[1][1] < 0 or matrix[2][2] < 0:
+ print "WARNING: Negative scales, expect incorrect results!"
+
+ mesh.transform(matrix, True)
+
+ def _doBackFaceCulling(self, mesh):
+ """Simple Backface Culling routine.
+
+ At this level we simply do a visibility test face by face and then
+ select the vertices belonging to visible faces.
+ """
- # Select all vertices, so edges without faces can be displayed
+ # Select all vertices, so edges can be displayed even if there are no
+ # faces
for v in mesh.verts:
v.sel = 1
# Loop on faces
for f in mesh.faces:
f.sel = 0
- if self._isFaceVisible(f, object, self.camera):
+ if self._isFaceVisible(f):
f.sel = 1
+ def _doLighting(self, mesh):
+ """Apply an Illumination and shading model to the object.
+
+ The model used is the Phong one, it may be inefficient,
+ but I'm just learning about rendering and starting from Phong seemed
+ the most natural way.
+ """
+
+ # If the mesh has vertex colors already, use them,
+ # otherwise turn them on and do some calculations
+ if mesh.vertexColors:
+ return
+ mesh.vertexColors = 1
+
+ materials = mesh.materials
+
+ camPos = self._getObjPosition(self.cameraObj)
+
+ # We do per-face color calculation (FLAT Shading), we can easily turn
+ # to a per-vertex calculation if we want to implement some shading
+ # technique. For an example see:
+ # http://www.miralab.unige.ch/papers/368.pdf
for f in mesh.faces:
if not f.sel:
- for v in f:
- v.sel = 0
+ continue
- for f in mesh.faces:
- if f.sel:
- for v in f:
- v.sel = 1
+ mat = None
+ if materials:
+ mat = materials[f.mat]
+ # Check if it is a shadeless material
+ if mat.getMode() & Material.Modes['SHADELESS']:
+ I = mat.getRGBCol()
+ # Convert to a value between 0 and 255
+ tmp_col = [ int(c * 255.0) for c in I]
+
+ for c in f.col:
+ c.r = tmp_col[0]
+ c.g = tmp_col[1]
+ c.b = tmp_col[2]
+ #c.a = tmp_col[3]
+
+ continue
+
+
+
+ # A new default material
+ if mat == None:
+ mat = Material.New('defMat')
+
+ # do vertex color calculation
+
+ TotDiffSpec = Vector([0.0, 0.0, 0.0])
+
+ for l in self.lights:
+ light_obj = l
+ light_pos = self._getObjPosition(l)
+ light = light_obj.data
+
+ L = Vector(light_pos).normalize()
+
+ V = (Vector(camPos) - Vector(f.cent)).normalize()
+
+ N = Vector(f.no).normalize()
+
+ if config.polygons['SHADING'] == 'TOON':
+ NL = ShadingUtils.toonShading(N*L)
+ else:
+ NL = (N*L)
+
+ # Should we use NL instead of (N*L) here?
+ R = 2 * (N*L) * N - L
+
+ Ip = light.getEnergy()
- mesh.update()
+ # Diffuse co-efficient
+ kd = mat.getRef() * Vector(mat.getRGBCol())
+ for i in [0, 1, 2]:
+ kd[i] *= light.col[i]
+ Idiff = Ip * kd * max(0, NL)
+
+
+ # Specular component
+ ks = mat.getSpec() * Vector(mat.getSpecCol())
+ ns = mat.getHardness()
+ Ispec = Ip * ks * pow(max(0, (V*R)), ns)
+
+ TotDiffSpec += (Idiff+Ispec)
+
+
+ # Ambient component
+ Iamb = Vector(Blender.World.Get()[0].getAmb())
+ ka = mat.getAmb()
+
+ # Emissive component (convert to a triplet)
+ ki = Vector([mat.getEmit()]*3)
+
+ #I = ki + Iamb + (Idiff + Ispec)
+ I = ki + (ka * Iamb) + TotDiffSpec
+
+
+ # Set Alpha component
+ I = list(I)
+ I.append(mat.getAlpha())
+
+ # Clamp I values between 0 and 1
+ I = [ min(c, 1) for c in I]
+ I = [ max(0, c) for c in I]
+
+ # Convert to a value between 0 and 255
+ tmp_col = [ int(c * 255.0) for c in I]
+
+ for c in f.col:
+ c.r = tmp_col[0]
+ c.g = tmp_col[1]
+ c.b = tmp_col[2]
+ c.a = tmp_col[3]
+
+ def _doProjection(self, mesh, projector):
+ """Apply Viewing and Projection tranformations.
+ """
+
+ for v in mesh.verts:
+ p = projector.doProjection(v.co[:])
+ v.co[0] = p[0]
+ v.co[1] = p[1]
+ v.co[2] = p[2]
+
+ #mesh.recalcNormals()
+ #mesh.update()
+
+ # We could reeset Camera matrix, since now
+ # we are in Normalized Viewing Coordinates,
+ # but doung that would affect World Coordinate
+ # processing for other objects
+
+ #self.cameraObj.data.type = 1
+ #self.cameraObj.data.scale = 2.0
+ #m = Matrix().identity()
+ #self.cameraObj.setMatrix(m)
+
+ def _doViewFrustumClipping(self, mesh):
+ """Clip faces against the View Frustum.
+ """
+
+ # HSR routines
+ def __simpleDepthSort(self, mesh):
+ """Sort faces by the furthest vertex.
+
+ This simple mesthod is known also as the painter algorithm, and it
+ solves HSR correctly only for convex meshes.
+ """
+
+ global progress
+ # The sorting requires circa n*log(n) steps
+ n = len(mesh.faces)
+ progress.setActivity("HSR: Painter", n*log(n))
- #Mesh.Mode(Mesh.SelectModes['VERTEX'])
+ by_furthest_z = (lambda f1, f2: progress.update() and
+ cmp(max([v.co[2] for v in f1]), max([v.co[2] for v in f2]))
+ )
- def _doColorAndLighting(self, object):
- return
+ # FIXME: using NMesh to sort faces. We should avoid that!
+ nmesh = NMesh.GetRaw(mesh.name)
- def _doEdgesStyle(self, object, style):
- """Process Mesh Edges. (For now copy the edge data, in next version it
- can be a place where recognize silouhettes and/or contours).
+ # remember that _higher_ z values mean further points
+ nmesh.faces.sort(by_furthest_z)
+ nmesh.faces.reverse()
- input: an edge list
- return: a processed edge list
+ nmesh.update()
+
+ def __topologicalDepthSort(self, mesh):
+ """Occlusion based on topological occlusion.
+
+ Build the occlusion graph of the mesh,
+ and then do topological sort on that graph
"""
return
- def _doProjection(self, object, projector):
+ def __newellDepthSort(self, mesh):
+ """Newell's depth sorting.
+
+ """
+ by_furthest_z = (lambda f1, f2:
+ cmp(max([v.co[2] for v in f1]), max([v.co[2] for v in f2]))
+ )
+
+ def Distance(point, face):
+ """ Calculate the distance between a point and a face.
+
+ An alternative but more expensive method can be:
+
+ ip = Intersect(Vector(face[0]), Vector(face[1]), Vector(face[2]),
+ Vector(face.no), Vector(point), 0)
+
+ d = Vector(ip - point).length
+ """
+
+ plNormal = Vector(face.no)
+ plVert0 = Vector(face[0])
+
+ #d = abs( (point * plNormal ) - (plVert0 * plNormal) )
+ d = (point * plNormal ) - (plVert0 * plNormal)
+ debug("d: "+ str(d) + "\n")
+
+ return d
+
+
+ # FIXME: using NMesh to sort faces. We should avoid that!
+ nmesh = NMesh.GetRaw(mesh.name)
+
+ # remember that _higher_ z values mean further points
+ nmesh.faces.sort(by_furthest_z)
+ nmesh.faces.reverse()
- if(object.getType() != 'Mesh'):
- return
- mesh = object.data
- for v in mesh.verts:
- p = projector.doProjection(v.co)
- v[0] = p[0]
- v[1] = p[1]
- v[2] = p[2]
- mesh.update()
+ # Begin depth sort tests
+
+ # use the smooth flag to set marked faces
+ for f in nmesh.faces:
+ f.smooth = 0
+
+ facelist = nmesh.faces[:]
+ maplist = []
+
+ EPS = 10e-7
+
+ global progress
+ progress.setActivity("HSR: Newell", len(facelist))
+
+ while len(facelist):
+ P = facelist[0]
+
+ pSign = 1
+ if P.sel == 0:
+ pSign = -1
+
+ for Q in facelist[1:]:
+
+ debug("P.smooth: " + str(P.smooth) + "\n")
+ debug("Q.smooth: " + str(Q.smooth) + "\n")
+ debug("\n")
+
+ qSign = 1
+ if Q.sel == 0:
+ qSign = -1
+
+ # We need to test only those Qs whose furthest vertex
+ # is closer to the observer than the closest vertex of P.
+
+ zP = [v.co[2] for v in P.v]
+ zQ = [v.co[2] for v in Q.v]
+ ZOverlap = min(zP) < max(zQ)
+
+ if not ZOverlap:
+ if not Q.smooth:
+ # We can safely print P
+ break
+ else:
+ continue
+
+ # Test 1: X extent overlapping
+ xP = [v.co[0] for v in P.v]
+ xQ = [v.co[0] for v in Q.v]
+ notXOverlap = (max(xP) < min(xQ)) or (max(xQ) < min(xP))
+
+ if notXOverlap:
+ continue
+
+ # Test 2: Y extent Overlapping
+ yP = [v.co[1] for v in P.v]
+ yQ = [v.co[1] for v in Q.v]
+ notYOverlap = (max(yP) < min(yQ)) or (max(yQ) < min(yP))
+
+ if notYOverlap:
+ continue
+
+
+ # Test 3: P vertices are all behind the plane of Q
+ n = 0
+ for Pi in P:
+ d = qSign * Distance(Vector(Pi), Q)
+ if d < EPS:
+ n += 1
+ pVerticesBehindPlaneQ = (n == len(P))
+
+ if pVerticesBehindPlaneQ:
+ debug("\nTest 3\n")
+ debug("P BEHIND Q!\n")
+ continue
+
+ # Test 4: Q vertices in front of the plane of P
+ n = 0
+ for Qi in Q:
+ d = pSign * Distance(Vector(Qi), P)
+ if d >= EPS:
+ n += 1
+ qVerticesInFrontPlaneP = (n == len(Q))
+
+ if qVerticesInFrontPlaneP:
+ debug("\nTest 4\n")
+ debug("Q IN FRONT OF P!\n")
+ continue
+
+ # Test 5: Line Intersections... TODO
+
+
+ # We do not know if P obscures Q.
+ if Q.smooth == 1:
+ # Split P or Q, TODO
+ debug("Split here!!\n")
+ continue
+
+
+ # The question now is: Does Q obscure P?
+
+ # Test 3bis: Q vertices are all behind the plane of P
+ n = 0
+ for Qi in Q:
+ d = pSign * Distance(Vector(Qi), P)
+ if d < EPS:
+ n += 1
+ qVerticesBehindPlaneP = (n == len(Q))
+
+
+ # Test 4bis: P vertices in front of the plane of Q
+ n = 0
+ for Pi in P:
+ d = qSign * Distance(Vector(Pi), Q)
+ if d >= EPS:
+ n += 1
+ pVerticesInFrontPlaneQ = (n == len(P))
+
+
+ """
+ import intersection
+
+ if not qVerticesBehindPlaneP and not pVerticesInFrontPlaneQ:
+ # Split P or Q, TODO
+ print "Test 3bis or 4bis failed"
+ print "Split here!!2\n"
+
+ newfaces = intersection.splitOn(nmesh, P, Q, 0)
+ facelist.remove(Q)
+ for nf in newfaces:
+ if nf:
+ nf.col = Q.col
+ facelist.append(nf)
+
+ break
+
+ # We do not know
+ if Q.smooth:
+ # split P or Q
+ print "Split here!!\n"
+ newfaces = intersection.splitOn(nmesh, P, Q, 0)
+ facelist.remove(Q)
+ for nf in newfaces:
+ if nf:
+ nf.col = Q.col
+ facelist.append(nf)
+
+ break
+ """
+
+ Q.smooth = 1
+ facelist.remove(Q)
+ facelist.insert(0, Q)
+
+ # Write P!
+ facelist.remove(P)
+ maplist.append(P)
+
+ progress.update()
+
+
+ nmesh.faces = maplist
+
+ for f in nmesh.faces:
+ f.sel = 1
+ nmesh.update()
+
+ def _doHiddenSurfaceRemoval(self, mesh):
+ """Do HSR for the given mesh.
+ """
+ if len(mesh.faces) == 0:
+ return
+
+ if config.polygons['HSR'] == 'PAINTER':
+ print "\n\nUsing the Painter algorithm for HSR.\n"
+ self.__simpleDepthSort(mesh)
+
+ elif config.polygons['HSR'] == 'NEWELL':
+ print "\n\nUsing the Newell's algorithm for HSR.\n"
+ self.__newellDepthSort(mesh)
+
+
+ def _doEdgesStyle(self, mesh, edgestyleSelect):
+ """Process Mesh Edges accroding to a given selection style.
+
+ Examples of algorithms:
+
+ Contours:
+ given an edge if its adjacent faces have the same normal (that is
+ they are complanar), than deselect it.
+
+ Silhouettes:
+ given an edge if one its adjacent faces is frontfacing and the
+ other is backfacing, than select it, else deselect.
+ """
+
+ Mesh.Mode(Mesh.SelectModes['EDGE'])
+
+ edge_cache = MeshUtils.buildEdgeFaceUsersCache(mesh)
+
+ for i,edge_faces in enumerate(edge_cache):
+ mesh.edges[i].sel = 0
+ if edgestyleSelect(edge_faces):
+ mesh.edges[i].sel = 1
+
+ """
+ for e in mesh.edges:
+
+ e.sel = 0
+ if edgestyleSelect(e, mesh):
+ e.sel = 1
+ """
+
# ---------------------------------------------------------------------
#
-## Main Program
+## GUI Class and Main Program
#
# ---------------------------------------------------------------------
-# FIXME: really hackish code, just to test if the other parts work
+from Blender import BGL, Draw
+from Blender.BGL import *
+
+class GUI:
+ def _init():
+
+ # Output Format menu
+ output_format = config.output['FORMAT']
+ default_value = outputWriters.keys().index(output_format)+1
+ GUI.outFormatMenu = Draw.Create(default_value)
+ GUI.evtOutFormatMenu = 0
+
+ # Animation toggle button
+ GUI.animToggle = Draw.Create(config.output['ANIMATION'])
+ GUI.evtAnimToggle = 1
+
+ # Join Objects toggle button
+ GUI.joinObjsToggle = Draw.Create(config.output['JOIN_OBJECTS'])
+ GUI.evtJoinObjsToggle = 2
+
+ # Render filled polygons
+ GUI.polygonsToggle = Draw.Create(config.polygons['SHOW'])
+
+ # Shading Style menu
+ shading_style = config.polygons['SHADING']
+ default_value = shadingStyles.keys().index(shading_style)+1
+ GUI.shadingStyleMenu = Draw.Create(default_value)
+ GUI.evtShadingStyleMenu = 21
+
+ GUI.evtPolygonsToggle = 3
+ # We hide the config.polygons['EXPANSION_TRICK'], for now
+
+ # Render polygon edges
+ GUI.showEdgesToggle = Draw.Create(config.edges['SHOW'])
+ GUI.evtShowEdgesToggle = 4
+
+ # Render hidden edges
+ GUI.showHiddenEdgesToggle = Draw.Create(config.edges['SHOW_HIDDEN'])
+ GUI.evtShowHiddenEdgesToggle = 5
+
+ # Edge Style menu
+ edge_style = config.edges['STYLE']
+ default_value = edgeStyles.keys().index(edge_style)+1
+ GUI.edgeStyleMenu = Draw.Create(default_value)
+ GUI.evtEdgeStyleMenu = 6
+
+ # Edge Width slider
+ GUI.edgeWidthSlider = Draw.Create(config.edges['WIDTH'])
+ GUI.evtEdgeWidthSlider = 7
+
+ # Edge Color Picker
+ c = config.edges['COLOR']
+ GUI.edgeColorPicker = Draw.Create(c[0]/255.0, c[1]/255.0, c[2]/255.0)
+ GUI.evtEdgeColorPicker = 71
+
+ # Render Button
+ GUI.evtRenderButton = 8
+
+ # Exit Button
+ GUI.evtExitButton = 9
+
+ def draw():
+
+ # initialize static members
+ GUI._init()
+
+ glClear(GL_COLOR_BUFFER_BIT)
+ glColor3f(0.0, 0.0, 0.0)
+ glRasterPos2i(10, 350)
+ Draw.Text("VRM: Vector Rendering Method script. Version %s." %
+ __version__)
+ glRasterPos2i(10, 335)
+ Draw.Text("Press Q or ESC to quit.")
+
+ # Build the output format menu
+ glRasterPos2i(10, 310)
+ Draw.Text("Select the output Format:")
+ outMenuStruct = "Output Format %t"
+ for t in outputWriters.keys():
+ outMenuStruct = outMenuStruct + "|%s" % t
+ GUI.outFormatMenu = Draw.Menu(outMenuStruct, GUI.evtOutFormatMenu,
+ 10, 285, 160, 18, GUI.outFormatMenu.val, "Choose the Output Format")
+
+ # Animation toggle
+ GUI.animToggle = Draw.Toggle("Animation", GUI.evtAnimToggle,
+ 10, 260, 160, 18, GUI.animToggle.val,
+ "Toggle rendering of animations")
+
+ # Join Objects toggle
+ GUI.joinObjsToggle = Draw.Toggle("Join objects", GUI.evtJoinObjsToggle,
+ 10, 235, 160, 18, GUI.joinObjsToggle.val,
+ "Join objects in the rendered file")
+
+ # Render Button
+ Draw.Button("Render", GUI.evtRenderButton, 10, 210-25, 75, 25+18,
+ "Start Rendering")
+ Draw.Button("Exit", GUI.evtExitButton, 95, 210-25, 75, 25+18, "Exit!")
+
+ # Rendering Styles
+ glRasterPos2i(200, 310)
+ Draw.Text("Rendering Style:")
+
+ # Render Polygons
+ GUI.polygonsToggle = Draw.Toggle("Filled Polygons", GUI.evtPolygonsToggle,
+ 200, 285, 160, 18, GUI.polygonsToggle.val,
+ "Render filled polygons")
+
+ if GUI.polygonsToggle.val == 1:
+
+ # Polygon Shading Style
+ shadingStyleMenuStruct = "Shading Style %t"
+ for t in shadingStyles.keys():
+ shadingStyleMenuStruct = shadingStyleMenuStruct + "|%s" % t.lower()
+ GUI.shadingStyleMenu = Draw.Menu(shadingStyleMenuStruct, GUI.evtShadingStyleMenu,
+ 200, 260, 160, 18, GUI.shadingStyleMenu.val,
+ "Choose the shading style")
+
+
+ # Render Edges
+ GUI.showEdgesToggle = Draw.Toggle("Show Edges", GUI.evtShowEdgesToggle,
+ 200, 235, 160, 18, GUI.showEdgesToggle.val,
+ "Render polygon edges")
+
+ if GUI.showEdgesToggle.val == 1:
+
+ # Edge Style
+ edgeStyleMenuStruct = "Edge Style %t"
+ for t in edgeStyles.keys():
+ edgeStyleMenuStruct = edgeStyleMenuStruct + "|%s" % t.lower()
+ GUI.edgeStyleMenu = Draw.Menu(edgeStyleMenuStruct, GUI.evtEdgeStyleMenu,
+ 200, 210, 160, 18, GUI.edgeStyleMenu.val,
+ "Choose the edge style")
+
+ # Edge size
+ GUI.edgeWidthSlider = Draw.Slider("Width: ", GUI.evtEdgeWidthSlider,
+ 200, 185, 140, 18, GUI.edgeWidthSlider.val,
+ 0.0, 10.0, 0, "Change Edge Width")
+
+ # Edge Color
+ GUI.edgeColorPicker = Draw.ColorPicker(GUI.evtEdgeColorPicker,
+ 342, 185, 18, 18, GUI.edgeColorPicker.val, "Choose Edge Color")
+
+ # Show Hidden Edges
+ GUI.showHiddenEdgesToggle = Draw.Toggle("Show Hidden Edges",
+ GUI.evtShowHiddenEdgesToggle,
+ 200, 160, 160, 18, GUI.showHiddenEdgesToggle.val,
+ "Render hidden edges as dashed lines")
+
+ glRasterPos2i(10, 160)
+ Draw.Text("%s (c) 2006" % __author__)
+
+ def event(evt, val):
+
+ if evt == Draw.ESCKEY or evt == Draw.QKEY:
+ Draw.Exit()
+ else:
+ return
+
+ Draw.Redraw(1)
+
+ def button_event(evt):
+
+ if evt == GUI.evtExitButton:
+ Draw.Exit()
+
+ elif evt == GUI.evtOutFormatMenu:
+ i = GUI.outFormatMenu.val - 1
+ config.output['FORMAT']= outputWriters.keys()[i]
+
+ elif evt == GUI.evtAnimToggle:
+ config.output['ANIMATION'] = bool(GUI.animToggle.val)
+
+ elif evt == GUI.evtJoinObjsToggle:
+ config.output['JOIN_OBJECTS'] = bool(GUI.joinObjsToggle.val)
+
+ elif evt == GUI.evtPolygonsToggle:
+ config.polygons['SHOW'] = bool(GUI.polygonsToggle.val)
+
+ elif evt == GUI.evtShadingStyleMenu:
+ i = GUI.shadingStyleMenu.val - 1
+ config.polygons['SHADING'] = shadingStyles.keys()[i]
+
+ elif evt == GUI.evtShowEdgesToggle:
+ config.edges['SHOW'] = bool(GUI.showEdgesToggle.val)
+
+ elif evt == GUI.evtShowHiddenEdgesToggle:
+ config.edges['SHOW_HIDDEN'] = bool(GUI.showHiddenEdgesToggle.val)
+
+ elif evt == GUI.evtEdgeStyleMenu:
+ i = GUI.edgeStyleMenu.val - 1
+ config.edges['STYLE'] = edgeStyles.keys()[i]
+
+ elif evt == GUI.evtEdgeWidthSlider:
+ config.edges['WIDTH'] = float(GUI.edgeWidthSlider.val)
+
+ elif evt == GUI.evtEdgeColorPicker:
+ config.edges['COLOR'] = [int(c*255.0) for c in GUI.edgeColorPicker.val]
+
+ elif evt == GUI.evtRenderButton:
+ label = "Save %s" % config.output['FORMAT']
+ # Show the File Selector
+ global outputfile
+ Blender.Window.FileSelector(vectorize, label, outputfile)
+
+ else:
+ print "Event: %d not handled!" % evt
+
+ if evt:
+ Draw.Redraw(1)
+ #GUI.conf_debug()
+
+ def conf_debug():
+ from pprint import pprint
+ print "\nConfig"
+ pprint(config.output)
+ pprint(config.polygons)
+ pprint(config.edges)
+
+ _init = staticmethod(_init)
+ draw = staticmethod(draw)
+ event = staticmethod(event)
+ button_event = staticmethod(button_event)
+ conf_debug = staticmethod(conf_debug)
+
+# A wrapper function for the vectorizing process
def vectorize(filename):
"""The vectorizing process is as follows:
- - Open the writer
- - Render the scene
- - Close the writer
-
- If you want to render an animation the second pass should be
- repeated for any frame, and the frame number should be passed to the
- renderer.
+ - Instanciate the writer and the renderer
+ - Render!
"""
- writer = SVGVectorWriter(filename)
-
- writer.open()
+
+ if filename == "":
+ print "\nERROR: invalid file name!"
+ return
+
+ from Blender import Window
+ editmode = Window.EditMode()
+ if editmode: Window.EditMode(0)
+
+ actualWriter = outputWriters[config.output['FORMAT']]
+ writer = actualWriter(filename)
renderer = Renderer()
- renderer.doRendering(writer)
+ renderer.doRendering(writer, config.output['ANIMATION'])
- writer.close()
+ if editmode: Window.EditMode(1)
+# We use a global progress Indicator Object
+progress = None
# Here the main
if __name__ == "__main__":
- # with this trick we can run the script in batch mode
- try:
- Blender.Window.FileSelector (vectorize, 'Save SVG', "proba.svg")
- Blender.Redraw()
- except:
- from Blender import Window
- editmode = Window.EditMode()
- if editmode: Window.EditMode(0)
-
- vectorize("proba.svg")
- if editmode: Window.EditMode(1)
+ global progress
+ outputfile = ""
+ basename = Blender.sys.basename(Blender.Get('filename'))
+ if basename != "":
+ outputfile = Blender.sys.splitext(basename)[0] + "." + str(config.output['FORMAT']).lower()
+ if Blender.mode == 'background':
+ progress = ConsoleProgressIndicator()
+ vectorize(outputfile)
+ else:
+ progress = GraphicalProgressIndicator()
+ Draw.Register(GUI.draw, GUI.event, GUI.button_event)