X-Git-Url: https://git.ao2.it/vrm.git/blobdiff_plain/b68056f960f3135bfa0d7e6327d06bf412b420cf..6522c892513097a6f9da53d64b5c38cf8d417c31:/vrm.py
diff --git a/vrm.py b/vrm.py
index 00c8ed4..5e2c128 100755
--- a/vrm.py
+++ b/vrm.py
@@ -1,9 +1,17 @@
#!BPY
"""
Name: 'VRM'
-Blender: 241
-Group: 'Export'
-Tooltip: 'Vector Rendering Method Export Script 0.3'
+Blender: 242
+Group: 'Render'
+Tooltip: 'Vector Rendering Method script'
+"""
+
+__author__ = "Antonio Ospite"
+__url__ = ["http://projects.blender.org/projects/vrm"]
+__version__ = "0.3.beta"
+
+__bpydoc__ = """\
+ Render the scene and save the result in vector format.
"""
# ---------------------------------------------------------------------
@@ -25,23 +33,220 @@ Tooltip: 'Vector Rendering Method Export Script 0.3'
#
# ---------------------------------------------------------------------
#
-# NOTE: I do not know who is the original author of 'vrm'.
-# The present code is almost entirely rewritten from scratch,
-# but if I have to give credits to anyone, please let me know,
-# so I can update the copyright.
+# Additional credits:
+# Thanks to Emilio Aguirre for S2flender from which I took inspirations :)
+# Thanks to Nikola Radovanovic, the author of the original VRM script,
+# the code you read here has been rewritten _almost_ entirely
+# from scratch but Nikola gave me the idea, so I thank him publicly.
#
# ---------------------------------------------------------------------
+#
+# Things TODO for a next release:
+# - FIX the issue with negative scales in object tranformations!
+# - Use a better depth sorting algorithm
+# - Implement clipping of primitives and do handle object intersections.
+# (for now only clipping away whole objects is supported).
+# - Review how selections are made (this script uses selection states of
+# primitives to represent visibility infos)
+# - Use a data structure other than Mesh to represent the 2D image?
+# Think to a way to merge (adjacent) polygons that have the same color.
+# Or a way to use paths for silhouettes and contours.
+# - Consider SMIL for animation handling instead of ECMA Script? (Firefox do
+# not support SMIL for animations)
+# - Switch to the Mesh structure, should be considerably faster
+# (partially done, but with Mesh we cannot sort faces, yet)
+# - Implement Edge Styles (silhouettes, contours, etc.) (partially done).
+# - Implement Shading Styles? (partially done, to make more flexible).
+# - Add Vector Writers other than SVG.
+# - Check memory use!!
+# - Support Indexed palettes!! (Useful for ILDA FILES, for example,
+# see http://www.linux-laser.org/download/autotrace/ilda-output.patch)
#
-# Additional credits:
-# Thanks to Emilio Aguirre for S2flender from which I took inspirations :)
-# Thanks to Anthony C. D'Agostino for the backface.py script
+# ---------------------------------------------------------------------
+#
+# Changelog:
+#
+# vrm-0.3.py - ...
+# * First release after code restucturing.
+# Now the script offers a useful set of functionalities
+# and it can render animations, too.
+# * Optimization in Renderer.doEdgeStyle(), build a topology cache
+# so to speed up the lookup of adjacent faces of an edge.
+# Thanks ideasman42.
+# * The SVG output is now SVG 1.0 valid.
+# Checked with: http://jiggles.w3.org/svgvalidator/ValidatorURI.html
+# * Progress indicator during HSR.
#
# ---------------------------------------------------------------------
import Blender
-from Blender import Scene, Object, NMesh, Lamp, Camera
+from Blender import Scene, Object, Mesh, NMesh, Material, Lamp, Camera, Window
from Blender.Mathutils import *
from math import *
+import sys, time
+
+# Constants
+EPS = 10e-5
+
+
+# Some global settings
+
+class config:
+ polygons = dict()
+ polygons['SHOW'] = True
+ polygons['SHADING'] = 'TOON'
+ #polygons['HSR'] = 'PAINTER' # 'PAINTER' or 'NEWELL'
+ polygons['HSR'] = 'NEWELL'
+ # Hidden to the user for now
+ polygons['EXPANSION_TRICK'] = True
+
+ polygons['TOON_LEVELS'] = 2
+
+ edges = dict()
+ edges['SHOW'] = False
+ edges['SHOW_HIDDEN'] = False
+ edges['STYLE'] = 'MESH' # or SILHOUETTE
+ edges['WIDTH'] = 2
+ edges['COLOR'] = [0, 0, 0]
+
+ output = dict()
+ output['FORMAT'] = 'SVG'
+ output['ANIMATION'] = False
+ output['JOIN_OBJECTS'] = True
+
+
+
+# Utility functions
+def sign(x):
+
+ if x < 0:
+ return -1
+ elif x > 0:
+ return 1
+ #else:
+ # return 0
+
+
+# ---------------------------------------------------------------------
+#
+## Mesh Utility class
+#
+# ---------------------------------------------------------------------
+class MeshUtils:
+
+ def buildEdgeFaceUsersCache(me):
+ '''
+ Takes a mesh and returns a list aligned with the meshes edges.
+ Each item is a list of the faces that use the edge
+ would be the equiv for having ed.face_users as a property
+
+ Taken from .blender/scripts/bpymodules/BPyMesh.py,
+ thanks to ideasman_42.
+ '''
+
+ def sorted_edge_indicies(ed):
+ i1= ed.v1.index
+ i2= ed.v2.index
+ if i1>i2:
+ i1,i2= i2,i1
+ return i1, i2
+
+
+ face_edges_dict= dict([(sorted_edge_indicies(ed), (ed.index, [])) for ed in me.edges])
+ for f in me.faces:
+ fvi= [v.index for v in f.v]# face vert idx's
+ for i in xrange(len(f)):
+ i1= fvi[i]
+ i2= fvi[i-1]
+
+ if i1>i2:
+ i1,i2= i2,i1
+
+ face_edges_dict[i1,i2][1].append(f)
+
+ face_edges= [None] * len(me.edges)
+ for ed_index, ed_faces in face_edges_dict.itervalues():
+ face_edges[ed_index]= ed_faces
+
+ return face_edges
+
+ def isMeshEdge(adjacent_faces):
+ """Mesh edge rule.
+
+ A mesh edge is visible if _at_least_one_ of its adjacent faces is selected.
+ Note: if the edge has no adjacent faces we want to show it as well,
+ useful for "edge only" portion of objects.
+ """
+
+ if len(adjacent_faces) == 0:
+ return True
+
+ selected_faces = [f for f in adjacent_faces if f.sel]
+
+ if len(selected_faces) != 0:
+ return True
+ else:
+ return False
+
+ def isSilhouetteEdge(adjacent_faces):
+ """Silhuette selection rule.
+
+ An edge is a silhuette edge if it is shared by two faces with
+ different selection status or if it is a boundary edge of a selected
+ face.
+ """
+
+ if ((len(adjacent_faces) == 1 and adjacent_faces[0].sel == 1) or
+ (len(adjacent_faces) == 2 and
+ adjacent_faces[0].sel != adjacent_faces[1].sel)
+ ):
+ return True
+ else:
+ return False
+
+ buildEdgeFaceUsersCache = staticmethod(buildEdgeFaceUsersCache)
+ isMeshEdge = staticmethod(isMeshEdge)
+ isSilhouetteEdge = staticmethod(isSilhouetteEdge)
+
+
+# ---------------------------------------------------------------------
+#
+## Shading Utility class
+#
+# ---------------------------------------------------------------------
+class ShadingUtils:
+
+ shademap = None
+
+ def toonShadingMapSetup():
+ levels = config.polygons['TOON_LEVELS']
+
+ texels = 2*levels - 1
+ tmp_shademap = [0.0] + [(i)/float(texels-1) for i in xrange(1, texels-1) ] + [1.0]
+
+ return tmp_shademap
+
+ def toonShading(u):
+
+ shademap = ShadingUtils.shademap
+
+ if not shademap:
+ shademap = ShadingUtils.toonShadingMapSetup()
+
+ v = 1.0
+ for i in xrange(0, len(shademap)-1):
+ pivot = (shademap[i]+shademap[i+1])/2.0
+ j = int(u>pivot)
+
+ v = shademap[i+j]
+
+ if v < shademap[i+1]:
+ return v
+
+ return v
+
+ toonShadingMapSetup = staticmethod(toonShadingMapSetup)
+ toonShading = staticmethod(toonShading)
# ---------------------------------------------------------------------
@@ -60,40 +265,35 @@ class Projector:
parameter list.
"""
- def __init__(self, cameraObj, obMesh, canvasSize):
+ def __init__(self, cameraObj, canvasRatio):
"""Calculate the projection matrix.
- The projection matrix depends, in this case, on the camera settings,
- and also on object transformation matrix.
+ The projection matrix depends, in this case, on the camera settings.
+ TAKE CARE: This projector expects vertices in World Coordinates!
"""
- self.size = canvasSize
-
camera = cameraObj.getData()
- aspect = float(canvasSize[0])/float(canvasSize[1])
+ aspect = float(canvasRatio[0])/float(canvasRatio[1])
near = camera.clipStart
far = camera.clipEnd
+ scale = float(camera.scale)
+
fovy = atan(0.5/aspect/(camera.lens/32))
- fovy = fovy * 360/pi
+ fovy = fovy * 360.0/pi
# What projection do we want?
- if camera.type:
- m2 = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale)
- else:
- m2 = self._calcPerspectiveMatrix(fovy, aspect, near, far)
+ if camera.type == 0:
+ mP = self._calcPerspectiveMatrix(fovy, aspect, near, far)
+ elif camera.type == 1:
+ mP = self._calcOrthoMatrix(fovy, aspect, near, far, scale)
-
# View transformation
cam = Matrix(cameraObj.getInverseMatrix())
cam.transpose()
-
- m1 = Matrix(obMesh.getMatrix())
- m1.transpose()
- mP = cam * m1
- mP = m2 * mP
+ mP = mP * cam
self.projectionMatrix = mP
@@ -108,32 +308,32 @@ class Projector:
matrix.
"""
- # Note that we need the vertex expressed using homogeneous coordinates
- p = self.projectionMatrix * Vector([v[0], v[1], v[2], 1.0])
-
- mW = self.size[0]/2
- mH = self.size[1]/2
+ # Note that we have to work on the vertex using homogeneous coordinates
+ # From blender 2.42+ we don't need to resize the vector to be 4d
+ # when applying a 4x4 matrix, but we do that anyway since we need the
+ # 4th coordinate later
+ p = self.projectionMatrix * Vector(v).resize4D()
- if p[3]<=0:
- p[0] = round(p[0]*mW)+mW
- p[1] = round(p[1]*mH)+mH
- else:
- p[0] = round((p[0]/p[3])*mW)+mW
- p[1] = round((p[1]/p[3])*mH)+mH
-
- # For now we want (0,0) in the top-left corner of the canvas
- # Mirror and translate along y
- p[1] *= -1
- p[1] += self.size[1]
-
+ # Perspective division
+ if p[3] != 0:
+ p[0] = p[0]/p[3]
+ p[1] = p[1]/p[3]
+ p[2] = p[2]/p[3]
+
+ # restore the size
+ p[3] = 1.0
+ p.resize3D()
+
return p
+
##
# Private methods
#
def _calcPerspectiveMatrix(self, fovy, aspect, near, far):
- """Return a perspective projection matrix."""
+ """Return a perspective projection matrix.
+ """
top = near * tan(fovy * pi / 360.0)
bottom = -top
@@ -155,9 +355,11 @@ class Projector:
return m
def _calcOrthoMatrix(self, fovy, aspect , near, far, scale):
- """Return an orthogonal projection matrix."""
+ """Return an orthogonal projection matrix.
+ """
- top = near * tan(fovy * pi / 360.0) * (scale * 10)
+ # The 11 in the formula was found emiprically
+ top = near * tan(fovy * pi / 360.0) * (scale * 11)
bottom = -top
left = bottom * aspect
right= top * aspect
@@ -179,12 +381,178 @@ class Projector:
# ---------------------------------------------------------------------
#
-## Mesh representation class
+## Progress Indicator
+#
+# ---------------------------------------------------------------------
+
+class Progress:
+ """A model for a progress indicator.
+
+ Do the progress calculation calculation and
+ the view independent stuff of a progress indicator.
+ """
+ def __init__(self, steps=0):
+ self.name = ""
+ self.steps = steps
+ self.completed = 0
+ self.progress = 0
+
+ def setSteps(self, steps):
+ """Set the number of steps of the activity wich we want to track.
+ """
+ self.steps = steps
+
+ def getSteps(self):
+ return self.steps
+
+ def setName(self, name):
+ """Set the name of the activity wich we want to track.
+ """
+ self.name = name
+
+ def getName(self):
+ return self.name
+
+ def getProgress(self):
+ return self.progress
+
+ def reset(self):
+ self.completed = 0
+ self.progress = 0
+
+ def update(self):
+ """Update the model, call this method when one step is completed.
+ """
+ if self.progress == 100:
+ return False
+
+ self.completed += 1
+ self.progress = ( float(self.completed) / float(self.steps) ) * 100
+ self.progress = int(self.progress)
+
+ return True
+
+
+class ProgressIndicator:
+ """An abstraction of a View for the Progress Model
+ """
+ def __init__(self):
+
+ # Use a refresh rate so we do not show the progress at
+ # every update, but every 'self.refresh_rate' times.
+ self.refresh_rate = 10
+ self.shows_counter = 0
+
+ self.quiet = False
+
+ self.progressModel = None
+
+ def setQuiet(self, value):
+ self.quiet = value
+
+ def setActivity(self, name, steps):
+ """Initialize the Model.
+
+ In a future version (with subactivities-progress support) this method
+ could only set the current activity.
+ """
+ self.progressModel = Progress()
+ self.progressModel.setName(name)
+ self.progressModel.setSteps(steps)
+
+ def getActivity(self):
+ return self.progressModel
+
+ def update(self):
+ """Update the model and show the actual progress.
+ """
+ assert(self.progressModel)
+
+ if self.progressModel.update():
+ if self.quiet:
+ return
+
+ self.show(self.progressModel.getProgress(),
+ self.progressModel.getName())
+
+ # We return always True here so we can call the update() method also
+ # from lambda funcs (putting the call in logical AND with other ops)
+ return True
+
+ def show(self, progress, name=""):
+ self.shows_counter = (self.shows_counter + 1) % self.refresh_rate
+ if self.shows_counter != 0:
+ return
+
+ if progress == 100:
+ self.shows_counter = -1
+
+
+class ConsoleProgressIndicator(ProgressIndicator):
+ """Show a progress bar on stderr, a la wget.
+ """
+ def __init__(self):
+ ProgressIndicator.__init__(self)
+
+ self.swirl_chars = ["-", "\\", "|", "/"]
+ self.swirl_count = -1
+
+ def show(self, progress, name):
+ ProgressIndicator.show(self, progress, name)
+
+ bar_length = 70
+ bar_progress = int( (progress/100.0) * bar_length )
+ bar = ("=" * bar_progress).ljust(bar_length)
+
+ self.swirl_count = (self.swirl_count+1)%len(self.swirl_chars)
+ swirl_char = self.swirl_chars[self.swirl_count]
+
+ progress_bar = "%s |%s| %c %3d%%" % (name, bar, swirl_char, progress)
+
+ sys.stderr.write(progress_bar+"\r")
+ if progress == 100:
+ sys.stderr.write("\n")
+
+
+class GraphicalProgressIndicator(ProgressIndicator):
+ """Interface to the Blender.Window.DrawProgressBar() method.
+ """
+ def __init__(self):
+ ProgressIndicator.__init__(self)
+
+ #self.swirl_chars = ["-", "\\", "|", "/"]
+ # We have to use letters with the same width, for now!
+ # Blender progress bar considers the font widths when
+ # calculating the progress bar width.
+ self.swirl_chars = ["\\", "/"]
+ self.swirl_count = -1
+
+ def show(self, progress, name):
+ ProgressIndicator.show(self, progress)
+
+ self.swirl_count = (self.swirl_count+1)%len(self.swirl_chars)
+ swirl_char = self.swirl_chars[self.swirl_count]
+
+ progress_text = "%s - %c %3d%%" % (name, swirl_char, progress)
+
+ # Finally draw the Progress Bar
+ Window.WaitCursor(1) # Maybe we can move that call in the constructor?
+ Window.DrawProgressBar(progress/100.0, progress_text)
+
+ if progress == 100:
+ Window.DrawProgressBar(1, progress_text)
+ Window.WaitCursor(0)
+
+
+
+# ---------------------------------------------------------------------
+#
+## 2D Object representation class
#
# ---------------------------------------------------------------------
# TODO: a class to represent the needed properties of a 2D vector image
-# Just use a NMesh structure?
+# For now just using a [N]Mesh structure.
# ---------------------------------------------------------------------
@@ -204,118 +572,294 @@ class VectorWriter:
Every subclasses of VectorWriter must have at last the following public
methods:
- - printCanvas(mesh) --- where mesh is as specified before.
+ - open(self)
+ - close(self)
+ - printCanvas(self, scene,
+ doPrintPolygons=True, doPrintEdges=False, showHiddenEdges=False):
"""
- def __init__(self, fileName, canvasSize):
- """Open the file named #fileName# and set the canvas size."""
+ def __init__(self, fileName):
+ """Set the output file name and other properties"""
+
+ self.outputFileName = fileName
+ self.file = None
- self.file = open(fileName, "w")
- print "Outputting to: ", fileName
+ context = Scene.GetCurrent().getRenderingContext()
+ self.canvasSize = ( context.imageSizeX(), context.imageSizeY() )
+
+ self.startFrame = 1
+ self.endFrame = 1
+ self.animation = False
- self.canvasSize = canvasSize
-
##
# Public Methods
#
- def printCanvas(mesh):
- return
-
- ##
- # Private Methods
- #
-
- def _printHeader():
+ def open(self, startFrame=1, endFrame=1):
+ if startFrame != endFrame:
+ self.startFrame = startFrame
+ self.endFrame = endFrame
+ self.animation = True
+
+ self.file = open(self.outputFileName, "w")
+ print "Outputting to: ", self.outputFileName
+
return
- def _printFooter():
+ def close(self):
+ self.file.close()
return
+ def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False,
+ showHiddenEdges=False):
+ """This is the interface for the needed printing routine.
+ """
+ return
+
## SVG Writer
class SVGVectorWriter(VectorWriter):
"""A concrete class for writing SVG output.
-
- The class does not support animations, yet.
- Sorry.
"""
- def __init__(self, file, canvasSize):
- """Simply call the parent Contructor."""
- VectorWriter.__init__(self, file, canvasSize)
+ def __init__(self, fileName):
+ """Simply call the parent Contructor.
+ """
+ VectorWriter.__init__(self, fileName)
##
# Public Methods
#
-
- def printCanvas(self, scene):
- """Convert the scene representation to SVG."""
+ def open(self, startFrame=1, endFrame=1):
+ """Do some initialization operations.
+ """
+ VectorWriter.open(self, startFrame, endFrame)
self._printHeader()
+
+ def close(self):
+ """Do some finalization operation.
+ """
+ self._printFooter()
+
+ # remember to call the close method of the parent
+ VectorWriter.close(self)
+
+ def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False,
+ showHiddenEdges=False):
+ """Convert the scene representation to SVG.
+ """
+
Objects = scene.getChildren()
+
+ context = scene.getRenderingContext()
+ framenumber = context.currentFrame()
+
+ if self.animation:
+ framestyle = "display:none"
+ else:
+ framestyle = "display:block"
+
+ # Assign an id to this group so we can set properties on it using DOM
+ self.file.write("\n" %
+ (framenumber, framestyle) )
+
+
for obj in Objects:
- self.file.write("\n")
-
- for face in obj.getData().faces:
- self._printPolygon(face)
+ if(obj.getType() != 'Mesh'):
+ continue
+
+ self.file.write("\n" % obj.getName())
+
+ mesh = obj.getData(mesh=1)
+
+ if doPrintPolygons:
+ self._printPolygons(mesh)
+
+ if doPrintEdges:
+ self._printEdges(mesh, showHiddenEdges)
+
self.file.write("\n")
-
- self._printFooter()
+
+ self.file.write("\n")
+
##
# Private Methods
#
+ def _calcCanvasCoord(self, v):
+ """Convert vertex in scene coordinates to canvas coordinates.
+ """
+
+ pt = Vector([0, 0, 0])
+
+ mW = float(self.canvasSize[0])/2.0
+ mH = float(self.canvasSize[1])/2.0
+
+ # rescale to canvas size
+ pt[0] = v.co[0]*mW + mW
+ pt[1] = v.co[1]*mH + mH
+ pt[2] = v.co[2]
+
+ # For now we want (0,0) in the top-left corner of the canvas.
+ # Mirror and translate along y
+ pt[1] *= -1
+ pt[1] += self.canvasSize[1]
+
+ return pt
+
def _printHeader(self):
"""Print SVG header."""
self.file.write("\n")
- self.file.write("\n")
+
+ def _printEdges(self, mesh, showHiddenEdges=False):
+ """Print the wireframe using mesh edges.
+ """
+
+ stroke_width = config.edges['WIDTH']
+ stroke_col = config.edges['COLOR']
- #take as face color the first vertex color
- fcol = face.col[0]
- color = [fcol.r, fcol.g, fcol.b]
+ self.file.write("\n")
- stroke_col = [0, 0, 0]
- if not wireframe:
- stroke_col = color
+ for e in mesh.edges:
+
+ hidden_stroke_style = ""
+
+ if e.sel == 0:
+ if showHiddenEdges == False:
+ continue
+ else:
+ hidden_stroke_style = ";\n stroke-dasharray:3, 3"
+
+ p1 = self._calcCanvasCoord(e.v1)
+ p2 = self._calcCanvasCoord(e.v2)
+
+ self.file.write("\n")
- self.file.write("\tstyle=\"fill:rgb("+str(color[0])+","+str(color[1])+","+str(color[2])+");")
- self.file.write(" stroke:rgb("+str(stroke_col[0])+","+str(stroke_col[1])+","+str(stroke_col[2])+");")
- self.file.write(" stroke-width:"+str(stroke_width)+";\n")
- self.file.write(" stroke-linecap:round;stroke-linejoin:round")
- self.file.write("\"/>\n")
+ self.file.write("\n")
# ---------------------------------------------------------------------
@@ -324,308 +868,1157 @@ class SVGVectorWriter(VectorWriter):
#
# ---------------------------------------------------------------------
-def RotatePoint(PX,PY,PZ,AngleX,AngleY,AngleZ):
-
- NewPoint = []
- # Rotate X
- NewY = (PY * cos(AngleX))-(PZ * sin(AngleX))
- NewZ = (PZ * cos(AngleX))+(PY * sin(AngleX))
- # Rotate Y
- PZ = NewZ
- PY = NewY
- NewZ = (PZ * cos(AngleY))-(PX * sin(AngleY))
- NewX = (PX * cos(AngleY))+(PZ * sin(AngleY))
- PX = NewX
- PZ = NewZ
- # Rotate Z
- NewX = (PX * cos(AngleZ))-(PY * sin(AngleZ))
- NewY = (PY * cos(AngleZ))+(PX * sin(AngleZ))
- NewPoint.append(NewX)
- NewPoint.append(NewY)
- NewPoint.append(NewZ)
- return NewPoint
+# A dictionary to collect different shading style methods
+shadingStyles = dict()
+shadingStyles['FLAT'] = None
+shadingStyles['TOON'] = None
+
+# A dictionary to collect different edge style methods
+edgeStyles = dict()
+edgeStyles['MESH'] = MeshUtils.isMeshEdge
+edgeStyles['SILHOUETTE'] = MeshUtils.isSilhouetteEdge
+
+# A dictionary to collect the supported output formats
+outputWriters = dict()
+outputWriters['SVG'] = SVGVectorWriter
+
class Renderer:
- """Render a scene viewed from a given camera.
+ """Render a scene viewed from the active camera.
- This class is responsible of the rendering process, hence transormation
- and projection of the ojects in the scene are invoked by the renderer.
+ This class is responsible of the rendering process, transformation and
+ projection of the objects in the scene are invoked by the renderer.
- The user can optionally provide a specific camera for the rendering, see
- the #doRendering# method for more informations.
+ The rendering is done using the active camera for the current scene.
"""
def __init__(self):
- """Set the canvas size to a defaulr value.
-
- The only instance attribute here is the canvas size, which can be
- queryed to the renderer by other entities.
+ """Make the rendering process only for the current scene by default.
+
+ We will work on a copy of the scene, to be sure that the current scene do
+ not get modified in any way.
"""
- self.canvasSize = (0.0, 0.0)
+
+ # Render the current Scene, this should be a READ-ONLY property
+ self._SCENE = Scene.GetCurrent()
+
+ # Use the aspect ratio of the scene rendering context
+ context = self._SCENE.getRenderingContext()
+
+ aspect_ratio = float(context.imageSizeX())/float(context.imageSizeY())
+ self.canvasRatio = (float(context.aspectRatioX())*aspect_ratio,
+ float(context.aspectRatioY())
+ )
+
+ # Render from the currently active camera
+ self.cameraObj = self._SCENE.getCurrentCamera()
+
+ # Get a projector for this camera.
+ # NOTE: the projector wants object in world coordinates,
+ # so we should remember to apply modelview transformations
+ # _before_ we do projection transformations.
+ self.proj = Projector(self.cameraObj, self.canvasRatio)
+
+ # Get the list of lighting sources
+ obj_lst = self._SCENE.getChildren()
+ self.lights = [ o for o in obj_lst if o.getType() == 'Lamp']
+
+ # When there are no lights we use a default lighting source
+ # that have the same position of the camera
+ if len(self.lights) == 0:
+ l = Lamp.New('Lamp')
+ lobj = Object.New('Lamp')
+ lobj.loc = self.cameraObj.loc
+ lobj.link(l)
+ self.lights.append(lobj)
##
# Public Methods
#
- def getCanvasSize(self):
- """Return the current canvas size read from Blender rendering context"""
- return self.canvasSize
+ def doRendering(self, outputWriter, animation=False):
+ """Render picture or animation and write it out.
+
+ The parameters are:
+ - a Vector writer object that will be used to output the result.
+ - a flag to tell if we want to render an animation or only the
+ current frame.
+ """
+
+ context = self._SCENE.getRenderingContext()
+ origCurrentFrame = context.currentFrame()
+
+ # Handle the animation case
+ if not animation:
+ startFrame = origCurrentFrame
+ endFrame = startFrame
+ outputWriter.open()
+ else:
+ startFrame = context.startFrame()
+ endFrame = context.endFrame()
+ outputWriter.open(startFrame, endFrame)
- def doRendering(self, scene, cameraObj=None):
+ # Do the rendering process frame by frame
+ print "Start Rendering of %d frames" % (endFrame-startFrame)
+ for f in xrange(startFrame, endFrame+1):
+ print "\n\nFrame: %d" % f
+ context.currentFrame(f)
+
+ # Use some temporary workspace, a full copy of the scene
+ inputScene = self._SCENE.copy(2)
+ # And Set our camera accordingly
+ self.cameraObj = inputScene.getCurrentCamera()
+
+ try:
+ renderedScene = self.doRenderScene(inputScene)
+ except :
+ print "There was an error! Aborting."
+ import traceback
+ print traceback.print_exc()
+
+ self._SCENE.makeCurrent()
+ Scene.unlink(inputScene)
+ del inputScene
+ return
+
+ outputWriter.printCanvas(renderedScene,
+ doPrintPolygons = config.polygons['SHOW'],
+ doPrintEdges = config.edges['SHOW'],
+ showHiddenEdges = config.edges['SHOW_HIDDEN'])
+
+ # delete the rendered scene
+ self._SCENE.makeCurrent()
+ Scene.unlink(renderedScene)
+ del renderedScene
+
+ outputWriter.close()
+ print "Done!"
+ context.currentFrame(origCurrentFrame)
+
+
+ def doRenderScene(self, workScene):
"""Control the rendering process.
Here we control the entire rendering process invoking the operation
- needed to transforma project the 3D scene in two dimensions.
+ needed to transform and project the 3D scene in two dimensions.
+ """
+
+ # global processing of the scene
+
+ self._doSceneClipping(workScene)
- Parameters:
- scene --- the Blender Scene to render
- cameraObj --- the camera object to use for the viewing processing
+ self._doConvertGeometricObjsToMesh(workScene)
+
+ if config.output['JOIN_OBJECTS']:
+ self._joinMeshObjectsInScene(workScene)
+
+ self._doSceneDepthSorting(workScene)
+
+ # Per object activities
+
+ Objects = workScene.getChildren()
+ print "Total Objects: %d" % len(Objects)
+ for i,obj in enumerate(Objects):
+ print "\n\n-------"
+ print "Rendering Object: %d" % i
+
+ if obj.getType() != 'Mesh':
+ print "Only Mesh supported! - Skipping type:", obj.getType()
+ continue
+
+ print "Rendering: ", obj.getName()
+
+ mesh = obj.getData(mesh=1)
+
+ # Triangolarize the mesh??
+ for f in mesh.faces: f.sel = 1
+ mesh.quadToTriangle()
+
+ self._doModelingTransformation(mesh, obj.matrix)
+
+ self._doBackFaceCulling(mesh)
+
+ # When doing HSR with NEWELL we may want to flip all normals
+ # toward the viewer
+ if config.polygons['HSR'] == "NEWELL":
+ for f in mesh.faces:
+ f.sel = 1-f.sel
+ mesh.flipNormals()
+ for f in mesh.faces:
+ f.sel = 1
+
+ self._doLighting(mesh)
+
+
+ # Do "projection" now so we perform further processing
+ # in Normalized View Coordinates
+ self._doProjection(mesh, self.proj)
+
+ self._doViewFrustumClipping(mesh)
+
+ self._doHiddenSurfaceRemoval(mesh)
+
+ self._doEdgesStyle(mesh, edgeStyles[config.edges['STYLE']])
+
+
+ # Update the object data, important! :)
+ mesh.update()
+
+ return workScene
+
+
+ ##
+ # Private Methods
+ #
+
+ # Utility methods
+
+ def _getObjPosition(self, obj):
+ """Return the obj position in World coordinates.
+ """
+ return obj.matrix.translationPart()
+
+ def _cameraViewVector(self):
+ """Get the View Direction form the camera matrix.
"""
+ return Vector(self.cameraObj.matrix[2]).resize3D()
+
- if cameraObj == None:
- cameraObj = scene.getCurrentCamera()
+ # Faces methods
+
+ def _isFaceVisible(self, face):
+ """Determine if a face of an object is visible from the current camera.
- context = scene.getRenderingContext()
- self.canvasSize = (context.imageSizeX(), context.imageSizeY())
+ The view vector is calculated from the camera location and one of the
+ vertices of the face (expressed in World coordinates, after applying
+ modelview transformations).
+
+ After those transformations we determine if a face is visible by
+ computing the angle between the face normal and the view vector, this
+ angle has to be between -90 and 90 degrees for the face to be visible.
+ This corresponds somehow to the dot product between the two, if it
+ results > 0 then the face is visible.
+
+ There is no need to normalize those vectors since we are only interested in
+ the sign of the cross product and not in the product value.
+
+ NOTE: here we assume the face vertices are in WorldCoordinates, so
+ please transform the object _before_ doing the test.
+ """
+
+ normal = Vector(face.no)
+ camPos = self._getObjPosition(self.cameraObj)
+ view_vect = None
+
+ # View Vector in orthographics projections is the view Direction of
+ # the camera
+ if self.cameraObj.data.getType() == 1:
+ view_vect = self._cameraViewVector()
+
+ # View vector in perspective projections can be considered as
+ # the difference between the camera position and one point of
+ # the face, we choose the farthest point from the camera.
+ if self.cameraObj.data.getType() == 0:
+ vv = max( [ ((camPos - Vector(v.co)).length, (camPos - Vector(v.co))) for v in face] )
+ view_vect = vv[1]
+
+
+ # if d > 0 the face is visible from the camera
+ d = view_vect * normal
+ if d > 0:
+ return True
+ else:
+ return False
+
+
+ # Scene methods
+
+ def _doSceneClipping(self, scene):
+ """Clip whole objects against the View Frustum.
+
+ For now clip away only objects according to their center position.
+ """
+
+ cpos = self._getObjPosition(self.cameraObj)
+ view_vect = self._cameraViewVector()
+
+ near = self.cameraObj.data.clipStart
+ far = self.cameraObj.data.clipEnd
+
+ aspect = float(self.canvasRatio[0])/float(self.canvasRatio[1])
+ fovy = atan(0.5/aspect/(self.cameraObj.data.lens/32))
+ fovy = fovy * 360.0/pi
+
Objects = scene.getChildren()
+ for o in Objects:
+ if o.getType() != 'Mesh': continue;
+
+ obj_vect = Vector(cpos) - self._getObjPosition(o)
+
+ d = obj_vect*view_vect
+ theta = AngleBetweenVecs(obj_vect, view_vect)
+
+ # if the object is outside the view frustum, clip it away
+ if (d < near) or (d > far) or (theta > fovy):
+ scene.unlink(o)
+
+ def _doConvertGeometricObjsToMesh(self, scene):
+ """Convert all "geometric" objects to mesh ones.
+ """
+ geometricObjTypes = ['Mesh', 'Surf', 'Curve', 'Text']
+ #geometricObjTypes = ['Mesh', 'Surf', 'Curve']
+
+ Objects = scene.getChildren()
+ objList = [ o for o in Objects if o.getType() in geometricObjTypes ]
+ for obj in objList:
+ old_obj = obj
+ obj = self._convertToRawMeshObj(obj)
+ scene.link(obj)
+ scene.unlink(old_obj)
+
+
+ # XXX Workaround for Text and Curve which have some normals
+ # inverted when they are converted to Mesh, REMOVE that when
+ # blender will fix that!!
+ if old_obj.getType() in ['Curve', 'Text']:
+ me = obj.getData(mesh=1)
+ for f in me.faces: f.sel = 1;
+ for v in me.verts: v.sel = 1;
+ me.remDoubles(0)
+ me.triangleToQuad()
+ me.recalcNormals()
+ me.update()
+
+
+ def _doSceneDepthSorting(self, scene):
+ """Sort objects in the scene.
+
+ The object sorting is done accordingly to the object centers.
+ """
+
+ c = self._getObjPosition(self.cameraObj)
+
+ by_center_pos = (lambda o1, o2:
+ (o1.getType() == 'Mesh' and o2.getType() == 'Mesh') and
+ cmp((self._getObjPosition(o1) - Vector(c)).length,
+ (self._getObjPosition(o2) - Vector(c)).length)
+ )
+
+ # TODO: implement sorting by bounding box, if obj1.bb is inside obj2.bb,
+ # then ob1 goes farther than obj2, useful when obj2 has holes
+ by_bbox = None
- # A structure to store the transformed scene
- newscene = Scene.New("flat"+scene.name)
+ Objects = scene.getChildren()
+ Objects.sort(by_center_pos)
- for obj in Objects:
-
- if (obj.getType() != "Mesh"):
- print "Type:", obj.getType(), "\tSorry, only mesh Object supported!"
+ # update the scene
+ for o in Objects:
+ scene.unlink(o)
+ scene.link(o)
+
+ def _joinMeshObjectsInScene(self, scene):
+ """Merge all the Mesh Objects in a scene into a single Mesh Object.
+ """
+
+ oList = [o for o in scene.getChildren() if o.getType()=='Mesh']
+
+ # FIXME: Object.join() do not work if the list contains 1 object
+ if len(oList) == 1:
+ return
+
+ mesh = Mesh.New('BigOne')
+ bigObj = Object.New('Mesh', 'BigOne')
+ bigObj.link(mesh)
+
+ scene.link(bigObj)
+
+ try:
+ bigObj.join(oList)
+ except RuntimeError:
+ print "\nWarning! - Can't Join Objects\n"
+ scene.unlink(bigObj)
+ return
+ except TypeError:
+ print "Objects Type error?"
+
+ for o in oList:
+ scene.unlink(o)
+
+ scene.update()
+
+
+ # Per object/mesh methods
+
+ def _convertToRawMeshObj(self, object):
+ """Convert geometry based object to a mesh object.
+ """
+ me = Mesh.New('RawMesh_'+object.name)
+ me.getFromObject(object.name)
+
+ newObject = Object.New('Mesh', 'RawMesh_'+object.name)
+ newObject.link(me)
+
+ # If the object has no materials set a default material
+ if not me.materials:
+ me.materials = [Material.New()]
+ #for f in me.faces: f.mat = 0
+
+ newObject.setMatrix(object.getMatrix())
+
+ return newObject
+
+ def _doModelingTransformation(self, mesh, matrix):
+ """Transform object coordinates to world coordinates.
+
+ This step is done simply applying to the object its tranformation
+ matrix and recalculating its normals.
+ """
+ # XXX FIXME: blender do not transform normals in the right way when
+ # there are negative scale values
+ if matrix[0][0] < 0 or matrix[1][1] < 0 or matrix[2][2] < 0:
+ print "WARNING: Negative scales, expect incorrect results!"
+
+ mesh.transform(matrix, True)
+
+ def _doBackFaceCulling(self, mesh):
+ """Simple Backface Culling routine.
+
+ At this level we simply do a visibility test face by face and then
+ select the vertices belonging to visible faces.
+ """
+
+ # Select all vertices, so edges can be displayed even if there are no
+ # faces
+ for v in mesh.verts:
+ v.sel = 1
+
+ Mesh.Mode(Mesh.SelectModes['FACE'])
+ # Loop on faces
+ for f in mesh.faces:
+ f.sel = 0
+ if self._isFaceVisible(f):
+ f.sel = 1
+
+ def _doLighting(self, mesh):
+ """Apply an Illumination and shading model to the object.
+
+ The model used is the Phong one, it may be inefficient,
+ but I'm just learning about rendering and starting from Phong seemed
+ the most natural way.
+ """
+
+ # If the mesh has vertex colors already, use them,
+ # otherwise turn them on and do some calculations
+ if mesh.vertexColors:
+ return
+ mesh.vertexColors = 1
+
+ materials = mesh.materials
+
+ camPos = self._getObjPosition(self.cameraObj)
+
+ # We do per-face color calculation (FLAT Shading), we can easily turn
+ # to a per-vertex calculation if we want to implement some shading
+ # technique. For an example see:
+ # http://www.miralab.unige.ch/papers/368.pdf
+ for f in mesh.faces:
+ if not f.sel:
continue
- # Get a projector for this object
- proj = Projector(cameraObj, obj, self.canvasSize)
+ mat = None
+ if materials:
+ mat = materials[f.mat]
- # Let's store the transformed data
- transformed_mesh = NMesh.New("flat"+obj.name)
- transformed_mesh.hasVertexColours(1)
+ # A new default material
+ if mat == None:
+ mat = Material.New('defMat')
- # Store the materials
- materials = obj.getData().getMaterials()
+ # Check if it is a shadeless material
+ elif mat.getMode() & Material.Modes['SHADELESS']:
+ I = mat.getRGBCol()
+ # Convert to a value between 0 and 255
+ tmp_col = [ int(c * 255.0) for c in I]
- meshfaces = obj.getData().faces
+ for c in f.col:
+ c.r = tmp_col[0]
+ c.g = tmp_col[1]
+ c.b = tmp_col[2]
+ #c.a = tmp_col[3]
- for face in meshfaces:
+ continue
- # if the face is visible flatten it on the "picture plane"
- if self._isFaceVisible_old(face, obj, cameraObj):
-
- # Store transformed face
- newface = NMesh.Face()
- for vert in face:
+ # do vertex color calculation
- p = proj.doProjection(vert.co)
+ TotDiffSpec = Vector([0.0, 0.0, 0.0])
- tmp_vert = NMesh.Vert(p[0], p[1], p[2])
+ for l in self.lights:
+ light_obj = l
+ light_pos = self._getObjPosition(l)
+ light = light_obj.getData()
+
+ L = Vector(light_pos).normalize()
- # Add the vert to the mesh
- transformed_mesh.verts.append(tmp_vert)
-
- newface.v.append(tmp_vert)
-
-
- # Per-face color calculation
- # code taken mostly from the original vrm script
- # TODO: understand the code and rewrite it clearly
- ambient = -150
-
- fakelight = Object.Get("Lamp").loc
- if fakelight == None:
- fakelight = [1.0, 1.0, -0.3]
-
- norm = Vector(face.no)
- vektori = (norm[0]*fakelight[0]+norm[1]*fakelight[1]+norm[2]*fakelight[2])
- vduzine = fabs(sqrt(pow(norm[0],2)+pow(norm[1],2)+pow(norm[2],2))*sqrt(pow(fakelight[0],2)+pow(fakelight[1],2)+pow(fakelight[2],2)))
- intensity = floor(ambient + 200*acos(vektori/vduzine))/200
- if intensity < 0:
- intensity = 0
-
- if materials:
- tmp_col = materials[face.mat].getRGBCol()
+ V = (Vector(camPos) - Vector(f.cent)).normalize()
+
+ N = Vector(f.no).normalize()
+
+ if config.polygons['SHADING'] == 'TOON':
+ NL = ShadingUtils.toonShading(N*L)
+ else:
+ NL = (N*L)
+
+ # Should we use NL instead of (N*L) here?
+ R = 2 * (N*L) * N - L
+
+ Ip = light.getEnergy()
+
+ # Diffuse co-efficient
+ kd = mat.getRef() * Vector(mat.getRGBCol())
+ for i in [0, 1, 2]:
+ kd[i] *= light.col[i]
+
+ Idiff = Ip * kd * max(0, NL)
+
+
+ # Specular component
+ ks = mat.getSpec() * Vector(mat.getSpecCol())
+ ns = mat.getHardness()
+ Ispec = Ip * ks * pow(max(0, (V*R)), ns)
+
+ TotDiffSpec += (Idiff+Ispec)
+
+
+ # Ambient component
+ Iamb = Vector(Blender.World.Get()[0].getAmb())
+ ka = mat.getAmb()
+
+ # Emissive component (convert to a triplet)
+ ki = Vector([mat.getEmit()]*3)
+
+ #I = ki + Iamb + (Idiff + Ispec)
+ I = ki + (ka * Iamb) + TotDiffSpec
+
+
+ # Set Alpha component
+ I = list(I)
+ I.append(mat.getAlpha())
+
+ # Clamp I values between 0 and 1
+ I = [ min(c, 1) for c in I]
+ I = [ max(0, c) for c in I]
+
+ # Convert to a value between 0 and 255
+ tmp_col = [ int(c * 255.0) for c in I]
+
+ for c in f.col:
+ c.r = tmp_col[0]
+ c.g = tmp_col[1]
+ c.b = tmp_col[2]
+ c.a = tmp_col[3]
+
+ def _doProjection(self, mesh, projector):
+ """Apply Viewing and Projection tranformations.
+ """
+
+ for v in mesh.verts:
+ p = projector.doProjection(v.co[:])
+ v.co[0] = p[0]
+ v.co[1] = p[1]
+ v.co[2] = p[2]
+
+ #mesh.recalcNormals()
+ #mesh.update()
+
+ # We could reeset Camera matrix, since now
+ # we are in Normalized Viewing Coordinates,
+ # but doung that would affect World Coordinate
+ # processing for other objects
+
+ #self.cameraObj.data.type = 1
+ #self.cameraObj.data.scale = 2.0
+ #m = Matrix().identity()
+ #self.cameraObj.setMatrix(m)
+
+ def _doViewFrustumClipping(self, mesh):
+ """Clip faces against the View Frustum.
+ """
+
+ # HSR routines
+ def __simpleDepthSort(self, mesh):
+ """Sort faces by the furthest vertex.
+
+ This simple mesthod is known also as the painter algorithm, and it
+ solves HSR correctly only for convex meshes.
+ """
+
+ global progress
+ # The sorting requires circa n*log(n) steps
+ n = len(mesh.faces)
+ progress.setActivity("HSR: Painter", n*log(n))
+
+ by_furthest_z = (lambda f1, f2: progress.update() and
+ cmp(max([v.co[2] for v in f1]), max([v.co[2] for v in f2])+EPS)
+ )
+
+ # FIXME: using NMesh to sort faces. We should avoid that!
+ nmesh = NMesh.GetRaw(mesh.name)
+
+ # remember that _higher_ z values mean further points
+ nmesh.faces.sort(by_furthest_z)
+ nmesh.faces.reverse()
+
+ nmesh.update()
+
+ def __newellDepthSort(self, mesh):
+ """Newell's depth sorting.
+
+ """
+
+ from hsrtk import *
+
+ # Find non planar quads and convert them to triangle
+ #for f in mesh.faces:
+ # f.sel = 0
+ # if is_nonplanar_quad(f.v):
+ # print "NON QUAD??"
+ # f.sel = 1
+
+
+ # Now reselect all faces
+ for f in mesh.faces:
+ f.sel = 1
+
+ # FIXME: using NMesh to sort faces. We should avoid that!
+ nmesh = NMesh.GetRaw(mesh.name)
+
+ # remember that _higher_ z values mean further points
+ nmesh.faces.sort(by_furthest_z)
+ nmesh.faces.reverse()
+
+
+ # Begin depth sort tests
+
+ # use the smooth flag to set marked faces
+ for f in nmesh.faces:
+ f.smooth = 0
+
+ facelist = nmesh.faces[:]
+ maplist = []
+
+
+ # The steps are _at_least_ equal to len(facelist), we do not count the
+ # feces coming out from splitting!!
+ global progress
+ progress.setActivity("HSR: Newell", len(facelist))
+ #progress.setQuiet(True)
+
+
+ while len(facelist):
+ debug("\n----------------------\n")
+ debug("len(facelits): %d\n" % len(facelist))
+ P = facelist[0]
+
+ pSign = sign(P.normal[2])
+
+ # We can discard faces parallel to the view vector
+ if P.normal[2] == 0:
+ facelist.remove(P)
+ continue
+
+ split_done = 0
+ face_marked = 0
+
+ for Q in facelist[1:]:
+
+ debug("P.smooth: " + str(P.smooth) + "\n")
+ debug("Q.smooth: " + str(Q.smooth) + "\n")
+ debug("\n")
+
+ qSign = sign(Q.normal[2])
+ # TODO: check also if Q is parallel??
+
+ # Test 0: We need to test only those Qs whose furthest vertex
+ # is closer to the observer than the closest vertex of P.
+
+ zP = [v.co[2] for v in P.v]
+ zQ = [v.co[2] for v in Q.v]
+ notZOverlap = min(zP) > max(zQ)+EPS
+
+ if notZOverlap:
+ debug("\nTest 0\n")
+ debug("NOT Z OVERLAP!\n")
+ if Q.smooth == 0:
+ # If Q is not marked then we can safely print P
+ break
else:
- tmp_col = [0.5, 0.5, 0.5]
-
- tmp_col = [ (c>intensity) and int(round((c-intensity)*10)*25.5) for c in tmp_col ]
+ debug("met a marked face\n")
+ continue
+
+
+ # Test 1: X extent overlapping
+ xP = [v.co[0] for v in P.v]
+ xQ = [v.co[0] for v in Q.v]
+ #notXOverlap = (max(xP) <= min(xQ)) or (max(xQ) <= min(xP))
+ notXOverlap = (min(xQ) >= max(xP)-EPS) or (min(xP) >= max(xQ)-EPS)
+
+ if notXOverlap:
+ debug("\nTest 1\n")
+ debug("NOT X OVERLAP!\n")
+ continue
+
+
+ # Test 2: Y extent Overlapping
+ yP = [v.co[1] for v in P.v]
+ yQ = [v.co[1] for v in Q.v]
+ #notYOverlap = (max(yP) <= min(yQ)) or (max(yQ) <= min(yP))
+ notYOverlap = (min(yQ) >= max(yP)-EPS) or (min(yP) >= max(yQ)-EPS)
+
+ if notYOverlap:
+ debug("\nTest 2\n")
+ debug("NOT Y OVERLAP!\n")
+ continue
+
+
+ # Test 3: P vertices are all behind the plane of Q
+ n = 0
+ for Pi in P:
+ d = qSign * Distance(Vector(Pi), Q)
+ if d <= EPS:
+ n += 1
+ pVerticesBehindPlaneQ = (n == len(P))
+
+ if pVerticesBehindPlaneQ:
+ debug("\nTest 3\n")
+ debug("P BEHIND Q!\n")
+ continue
+
+
+ # Test 4: Q vertices in front of the plane of P
+ n = 0
+ for Qi in Q:
+ d = pSign * Distance(Vector(Qi), P)
+ if d >= -EPS:
+ n += 1
+ qVerticesInFrontPlaneP = (n == len(Q))
+
+ if qVerticesInFrontPlaneP:
+ debug("\nTest 4\n")
+ debug("Q IN FRONT OF P!\n")
+ continue
+
- vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2])
- newface.col = [vcol, vcol, vcol, 255]
+ # Test 5: Check if projections of polygons effectively overlap,
+ # in previous tests we checked only bounding boxes.
+
+ if not projectionsOverlap(P, Q):
+ debug("\nTest 5\n")
+ debug("Projections do not overlap!\n")
+ continue
+
+ # We still can't say if P obscures Q.
+
+ # But if Q is marked we do a face-split trying to resolve a
+ # difficulty (maybe a visibility cycle).
+ if Q.smooth == 1:
+ # Split P or Q
+ debug("Possibly a cycle detected!\n")
+ debug("Split here!!\n")
+
+ facelist = facesplit(P, Q, facelist, nmesh)
+ split_done = 1
+ break
+
+ # The question now is: Does Q obscure P?
+
+
+ # Test 3bis: Q vertices are all behind the plane of P
+ n = 0
+ for Qi in Q:
+ d = pSign * Distance(Vector(Qi), P)
+ if d <= EPS:
+ n += 1
+ qVerticesBehindPlaneP = (n == len(Q))
+
+ if qVerticesBehindPlaneP:
+ debug("\nTest 3bis\n")
+ debug("Q BEHIND P!\n")
+
+
+ # Test 4bis: P vertices in front of the plane of Q
+ n = 0
+ for Pi in P:
+ d = qSign * Distance(Vector(Pi), Q)
+ if d >= -EPS:
+ n += 1
+ pVerticesInFrontPlaneQ = (n == len(P))
+
+ if pVerticesInFrontPlaneQ:
+ debug("\nTest 4bis\n")
+ debug("P IN FRONT OF Q!\n")
+
+
+ # We don't even know if Q does obscure P, so they should
+ # intersect each other, split one of them in two parts.
+ if not qVerticesBehindPlaneP and not pVerticesInFrontPlaneQ:
+ debug("\nSimple Intersection?\n")
+ debug("Test 3bis or 4bis failed\n")
+ debug("Split here!!2\n")
+
+ facelist = facesplit(P, Q, facelist, nmesh)
+ split_done = 1
+ break
- transformed_mesh.addFace(newface)
+ facelist.remove(Q)
+ facelist.insert(0, Q)
+ Q.smooth = 1
+ face_marked = 1
+ debug("Q marked!\n")
+ break
+
+ # Write P!
+ if split_done == 0 and face_marked == 0:
+ facelist.remove(P)
+ maplist.append(P)
- # at the end of the loop on obj
-
- transformed_obj = Object.New(obj.getType(), "flat"+obj.name)
- transformed_obj.link(transformed_mesh)
- transformed_obj.loc = obj.loc
- newscene.link(transformed_obj)
+ progress.update()
-
- return newscene
+ if facelist == None:
+ maplist = [P, Q]
+ print [v.co for v in P]
+ print [v.co for v in Q]
+ break
+ # end of while len(facelist)
+
- ##
- # Private Methods
- #
+ nmesh.faces = maplist
- def _isFaceVisible_old(self, face, obj, cameraObj):
- """Determine if the face is visible from the current camera.
-
- The following code is taken basicly from the original vrm script.
- """
-
- camera = cameraObj
-
- numvert = len(face)
-
- # backface culling
-
- # translate and rotate according to the object matrix
- # and then translate according to the camera position
- #m = obj.getMatrix()
- #m.transpose()
-
- #a = m*Vector(face[0]) - Vector(cameraObj.loc)
- #b = m*Vector(face[1]) - Vector(cameraObj.loc)
- #c = m*Vector(face[numvert-1]) - Vector(cameraObj.loc)
-
- a = []
- a.append(face[0][0])
- a.append(face[0][1])
- a.append(face[0][2])
- a = RotatePoint(a[0], a[1], a[2], obj.RotX, obj.RotY, obj.RotZ)
- a[0] += obj.LocX - camera.LocX
- a[1] += obj.LocY - camera.LocY
- a[2] += obj.LocZ - camera.LocZ
- b = []
- b.append(face[1][0])
- b.append(face[1][1])
- b.append(face[1][2])
- b = RotatePoint(b[0], b[1], b[2], obj.RotX, obj.RotY, obj.RotZ)
- b[0] += obj.LocX - camera.LocX
- b[1] += obj.LocY - camera.LocY
- b[2] += obj.LocZ - camera.LocZ
- c = []
- c.append(face[numvert-1][0])
- c.append(face[numvert-1][1])
- c.append(face[numvert-1][2])
- c = RotatePoint(c[0], c[1], c[2], obj.RotX, obj.RotY, obj.RotZ)
- c[0] += obj.LocX - camera.LocX
- c[1] += obj.LocY - camera.LocY
- c[2] += obj.LocZ - camera.LocZ
-
- norm = [0, 0, 0]
- norm[0] = (b[1] - a[1])*(c[2] - a[2]) - (c[1] - a[1])*(b[2] - a[2])
- norm[1] = -((b[0] - a[0])*(c[2] - a[2]) - (c[0] - a[0])*(b[2] - a[2]))
- norm[2] = (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1])
-
- d = norm[0]*a[0] + norm[1]*a[1] + norm[2]*a[2]
- #d = DotVecs(Vector(norm), Vector(a))
-
- return (d<0)
-
- def _isFaceVisible(self, face, obj, cameraObj):
- """Determine if the face is visible from the current camera.
+ nmesh.update()
- The following code is taken basicly from the original vrm script.
+
+ def _doHiddenSurfaceRemoval(self, mesh):
+ """Do HSR for the given mesh.
"""
+ if len(mesh.faces) == 0:
+ return
- camera = cameraObj
+ if config.polygons['HSR'] == 'PAINTER':
+ print "\nUsing the Painter algorithm for HSR."
+ self.__simpleDepthSort(mesh)
- numvert = len(face)
+ elif config.polygons['HSR'] == 'NEWELL':
+ print "\nUsing the Newell's algorithm for HSR."
+ self.__newellDepthSort(mesh)
- # backface culling
- # translate and rotate according to the object matrix
- # and then translate according to the camera position
- m = obj.getMatrix()
- m.transpose()
-
- a = m*Vector(face[0]) - Vector(cameraObj.loc)
- b = m*Vector(face[1]) - Vector(cameraObj.loc)
- c = m*Vector(face[numvert-1]) - Vector(cameraObj.loc)
+ def _doEdgesStyle(self, mesh, edgestyleSelect):
+ """Process Mesh Edges accroding to a given selection style.
+
+ Examples of algorithms:
- norm = m*Vector(face.no)
+ Contours:
+ given an edge if its adjacent faces have the same normal (that is
+ they are complanar), than deselect it.
- d = DotVecs(norm, a)
+ Silhouettes:
+ given an edge if one its adjacent faces is frontfacing and the
+ other is backfacing, than select it, else deselect.
+ """
- return (d<0)
+ Mesh.Mode(Mesh.SelectModes['EDGE'])
- def _doClipping(face):
- return
+ edge_cache = MeshUtils.buildEdgeFaceUsersCache(mesh)
+
+ for i,edge_faces in enumerate(edge_cache):
+ mesh.edges[i].sel = 0
+ if edgestyleSelect(edge_faces):
+ mesh.edges[i].sel = 1
+
+ """
+ for e in mesh.edges:
+
+ e.sel = 0
+ if edgestyleSelect(e, mesh):
+ e.sel = 1
+ """
+
# ---------------------------------------------------------------------
#
-## Main Program
+## GUI Class and Main Program
#
# ---------------------------------------------------------------------
-# FIXME: really hackish code, just to test if the other parts work
-def depthSorting(scene):
-
- cameraObj = Scene.GetCurrent().getCurrentCamera()
- Objects = scene.getChildren()
+from Blender import BGL, Draw
+from Blender.BGL import *
- Objects.sort(lambda obj1, obj2:
- cmp(Vector(Vector(cameraObj.loc) - Vector(obj1.loc)).length,
- Vector(Vector(cameraObj.loc) - Vector(obj2.loc)).length
- )
- )
+class GUI:
- # hackish sorting of faces according to the max z value of a vertex
- for o in Objects:
+ def _init():
+
+ # Output Format menu
+ output_format = config.output['FORMAT']
+ default_value = outputWriters.keys().index(output_format)+1
+ GUI.outFormatMenu = Draw.Create(default_value)
+ GUI.evtOutFormatMenu = 0
+
+ # Animation toggle button
+ GUI.animToggle = Draw.Create(config.output['ANIMATION'])
+ GUI.evtAnimToggle = 1
+
+ # Join Objects toggle button
+ GUI.joinObjsToggle = Draw.Create(config.output['JOIN_OBJECTS'])
+ GUI.evtJoinObjsToggle = 2
+
+ # Render filled polygons
+ GUI.polygonsToggle = Draw.Create(config.polygons['SHOW'])
+
+ # Shading Style menu
+ shading_style = config.polygons['SHADING']
+ default_value = shadingStyles.keys().index(shading_style)+1
+ GUI.shadingStyleMenu = Draw.Create(default_value)
+ GUI.evtShadingStyleMenu = 21
+
+ GUI.evtPolygonsToggle = 3
+ # We hide the config.polygons['EXPANSION_TRICK'], for now
+
+ # Render polygon edges
+ GUI.showEdgesToggle = Draw.Create(config.edges['SHOW'])
+ GUI.evtShowEdgesToggle = 4
+
+ # Render hidden edges
+ GUI.showHiddenEdgesToggle = Draw.Create(config.edges['SHOW_HIDDEN'])
+ GUI.evtShowHiddenEdgesToggle = 5
+
+ # Edge Style menu
+ edge_style = config.edges['STYLE']
+ default_value = edgeStyles.keys().index(edge_style)+1
+ GUI.edgeStyleMenu = Draw.Create(default_value)
+ GUI.evtEdgeStyleMenu = 6
+
+ # Edge Width slider
+ GUI.edgeWidthSlider = Draw.Create(config.edges['WIDTH'])
+ GUI.evtEdgeWidthSlider = 7
+
+ # Edge Color Picker
+ c = config.edges['COLOR']
+ GUI.edgeColorPicker = Draw.Create(c[0]/255.0, c[1]/255.0, c[2]/255.0)
+ GUI.evtEdgeColorPicker = 71
+
+ # Render Button
+ GUI.evtRenderButton = 8
+
+ # Exit Button
+ GUI.evtExitButton = 9
+
+ def draw():
+
+ # initialize static members
+ GUI._init()
+
+ glClear(GL_COLOR_BUFFER_BIT)
+ glColor3f(0.0, 0.0, 0.0)
+ glRasterPos2i(10, 350)
+ Draw.Text("VRM: Vector Rendering Method script. Version %s." %
+ __version__)
+ glRasterPos2i(10, 335)
+ Draw.Text("Press Q or ESC to quit.")
+
+ # Build the output format menu
+ glRasterPos2i(10, 310)
+ Draw.Text("Select the output Format:")
+ outMenuStruct = "Output Format %t"
+ for t in outputWriters.keys():
+ outMenuStruct = outMenuStruct + "|%s" % t
+ GUI.outFormatMenu = Draw.Menu(outMenuStruct, GUI.evtOutFormatMenu,
+ 10, 285, 160, 18, GUI.outFormatMenu.val, "Choose the Output Format")
+
+ # Animation toggle
+ GUI.animToggle = Draw.Toggle("Animation", GUI.evtAnimToggle,
+ 10, 260, 160, 18, GUI.animToggle.val,
+ "Toggle rendering of animations")
+
+ # Join Objects toggle
+ GUI.joinObjsToggle = Draw.Toggle("Join objects", GUI.evtJoinObjsToggle,
+ 10, 235, 160, 18, GUI.joinObjsToggle.val,
+ "Join objects in the rendered file")
+
+ # Render Button
+ Draw.Button("Render", GUI.evtRenderButton, 10, 210-25, 75, 25+18,
+ "Start Rendering")
+ Draw.Button("Exit", GUI.evtExitButton, 95, 210-25, 75, 25+18, "Exit!")
+
+ # Rendering Styles
+ glRasterPos2i(200, 310)
+ Draw.Text("Rendering Style:")
+
+ # Render Polygons
+ GUI.polygonsToggle = Draw.Toggle("Filled Polygons", GUI.evtPolygonsToggle,
+ 200, 285, 160, 18, GUI.polygonsToggle.val,
+ "Render filled polygons")
+
+ if GUI.polygonsToggle.val == 1:
+
+ # Polygon Shading Style
+ shadingStyleMenuStruct = "Shading Style %t"
+ for t in shadingStyles.keys():
+ shadingStyleMenuStruct = shadingStyleMenuStruct + "|%s" % t.lower()
+ GUI.shadingStyleMenu = Draw.Menu(shadingStyleMenuStruct, GUI.evtShadingStyleMenu,
+ 200, 260, 160, 18, GUI.shadingStyleMenu.val,
+ "Choose the shading style")
+
+
+ # Render Edges
+ GUI.showEdgesToggle = Draw.Toggle("Show Edges", GUI.evtShowEdgesToggle,
+ 200, 235, 160, 18, GUI.showEdgesToggle.val,
+ "Render polygon edges")
+
+ if GUI.showEdgesToggle.val == 1:
+
+ # Edge Style
+ edgeStyleMenuStruct = "Edge Style %t"
+ for t in edgeStyles.keys():
+ edgeStyleMenuStruct = edgeStyleMenuStruct + "|%s" % t.lower()
+ GUI.edgeStyleMenu = Draw.Menu(edgeStyleMenuStruct, GUI.evtEdgeStyleMenu,
+ 200, 210, 160, 18, GUI.edgeStyleMenu.val,
+ "Choose the edge style")
+
+ # Edge size
+ GUI.edgeWidthSlider = Draw.Slider("Width: ", GUI.evtEdgeWidthSlider,
+ 200, 185, 140, 18, GUI.edgeWidthSlider.val,
+ 0.0, 10.0, 0, "Change Edge Width")
+
+ # Edge Color
+ GUI.edgeColorPicker = Draw.ColorPicker(GUI.evtEdgeColorPicker,
+ 342, 185, 18, 18, GUI.edgeColorPicker.val, "Choose Edge Color")
+
+ # Show Hidden Edges
+ GUI.showHiddenEdgesToggle = Draw.Toggle("Show Hidden Edges",
+ GUI.evtShowHiddenEdgesToggle,
+ 200, 160, 160, 18, GUI.showHiddenEdgesToggle.val,
+ "Render hidden edges as dashed lines")
+
+ glRasterPos2i(10, 160)
+ Draw.Text("%s (c) 2006" % __author__)
+
+ def event(evt, val):
+
+ if evt == Draw.ESCKEY or evt == Draw.QKEY:
+ Draw.Exit()
+ else:
+ return
- mesh = o.data
- mesh.faces.sort(
- lambda f1, f2:
- # Sort faces according to the min z coordinate in a face
- #cmp(min([v[2] for v in f1]), min([v[2] for v in f2])))
+ Draw.Redraw(1)
- # Sort faces according to the max z coordinate in a face
- cmp(max([v[2] for v in f1]), max([v[2] for v in f2])))
-
- # Sort faces according to the avg z coordinate in a face
- #cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2)))
- mesh.faces.reverse()
- mesh.update()
-
- # update the scene
- for o in scene.getChildren():
- scene.unlink(o)
- for o in Objects:
- scene.link(o)
-
+ def button_event(evt):
+
+ if evt == GUI.evtExitButton:
+ Draw.Exit()
+
+ elif evt == GUI.evtOutFormatMenu:
+ i = GUI.outFormatMenu.val - 1
+ config.output['FORMAT']= outputWriters.keys()[i]
+
+ elif evt == GUI.evtAnimToggle:
+ config.output['ANIMATION'] = bool(GUI.animToggle.val)
+
+ elif evt == GUI.evtJoinObjsToggle:
+ config.output['JOIN_OBJECTS'] = bool(GUI.joinObjsToggle.val)
+
+ elif evt == GUI.evtPolygonsToggle:
+ config.polygons['SHOW'] = bool(GUI.polygonsToggle.val)
+
+ elif evt == GUI.evtShadingStyleMenu:
+ i = GUI.shadingStyleMenu.val - 1
+ config.polygons['SHADING'] = shadingStyles.keys()[i]
+
+ elif evt == GUI.evtShowEdgesToggle:
+ config.edges['SHOW'] = bool(GUI.showEdgesToggle.val)
+
+ elif evt == GUI.evtShowHiddenEdgesToggle:
+ config.edges['SHOW_HIDDEN'] = bool(GUI.showHiddenEdgesToggle.val)
+
+ elif evt == GUI.evtEdgeStyleMenu:
+ i = GUI.edgeStyleMenu.val - 1
+ config.edges['STYLE'] = edgeStyles.keys()[i]
+
+ elif evt == GUI.evtEdgeWidthSlider:
+ config.edges['WIDTH'] = float(GUI.edgeWidthSlider.val)
+
+ elif evt == GUI.evtEdgeColorPicker:
+ config.edges['COLOR'] = [int(c*255.0) for c in GUI.edgeColorPicker.val]
+
+ elif evt == GUI.evtRenderButton:
+ label = "Save %s" % config.output['FORMAT']
+ # Show the File Selector
+ global outputfile
+ Blender.Window.FileSelector(vectorize, label, outputfile)
+
+ else:
+ print "Event: %d not handled!" % evt
+
+ if evt:
+ Draw.Redraw(1)
+ #GUI.conf_debug()
+
+ def conf_debug():
+ from pprint import pprint
+ print "\nConfig"
+ pprint(config.output)
+ pprint(config.polygons)
+ pprint(config.edges)
+
+ _init = staticmethod(_init)
+ draw = staticmethod(draw)
+ event = staticmethod(event)
+ button_event = staticmethod(button_event)
+ conf_debug = staticmethod(conf_debug)
+
+# A wrapper function for the vectorizing process
def vectorize(filename):
+ """The vectorizing process is as follows:
+
+ - Instanciate the writer and the renderer
+ - Render!
+ """
+
+ if filename == "":
+ print "\nERROR: invalid file name!"
+ return
- print "Filename: %s" % filename
+ from Blender import Window
+ editmode = Window.EditMode()
+ if editmode: Window.EditMode(0)
+
+ actualWriter = outputWriters[config.output['FORMAT']]
+ writer = actualWriter(filename)
- scene = Scene.GetCurrent()
renderer = Renderer()
+ renderer.doRendering(writer, config.output['ANIMATION'])
- flatScene = renderer.doRendering(scene)
- canvasSize = renderer.getCanvasSize()
-
- depthSorting(flatScene)
+ if editmode: Window.EditMode(1)
- writer = SVGVectorWriter(filename, canvasSize)
- writer.printCanvas(flatScene)
-
- Blender.Scene.unlink(flatScene)
- del flatScene
+# We use a global progress Indicator Object
+progress = None
# Here the main
if __name__ == "__main__":
- try:
- Blender.Window.FileSelector (vectorize, 'Save SVG', "proba.svg")
- except:
- vectorize("proba.svg")
+ global progress
+
+ outputfile = ""
+ basename = Blender.sys.basename(Blender.Get('filename'))
+ if basename != "":
+ outputfile = Blender.sys.splitext(basename)[0] + "." + str(config.output['FORMAT']).lower()
+
+ if Blender.mode == 'background':
+ progress = ConsoleProgressIndicator()
+ vectorize(outputfile)
+ else:
+ progress = GraphicalProgressIndicator()
+ Draw.Register(GUI.draw, GUI.event, GUI.button_event)