X-Git-Url: https://git.ao2.it/vrm.git/blobdiff_plain/08e34a873729c718a510ec642d36eebaef6f4ee7..e9fc019655d1dc322bcd845e667079ea2503fc7d:/vrm.py diff --git a/vrm.py b/vrm.py index d85b085..267b239 100755 --- a/vrm.py +++ b/vrm.py @@ -1,13 +1,21 @@ #!BPY """ Name: 'VRM' -Blender: 241 -Group: 'Export' -Tooltip: 'Vector Rendering Method Export Script 0.3' +Blender: 245 +Group: 'Render' +Tooltip: 'Vector Rendering Method script' +""" + +__author__ = "Antonio Ospite" +__url__ = ["http://vrm.ao2.it"] +__version__ = "0.3.beta" + +__bpydoc__ = """\ + Render the scene and save the result in vector format. """ # --------------------------------------------------------------------- -# Copyright (c) 2006 Antonio Ospite +# Copyright (c) 2006, 2007, 2008, 2009 Antonio Ospite # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -25,674 +33,3091 @@ Tooltip: 'Vector Rendering Method Export Script 0.3' # # --------------------------------------------------------------------- # -# NOTE: I do not know who is the original author of 'vrm'. -# The present code is almost entirely rewritten from scratch, -# but if I have to give credits to anyone, please let me know, -# so I can update the copyright. +# Additional credits: +# Thanks to Emilio Aguirre for S2flender from which I took inspirations :) +# Thanks to Nikola Radovanovic, the author of the original VRM script, +# the code you read here has been rewritten _almost_ entirely +# from scratch but Nikola gave me the idea, so I thank him publicly. # # --------------------------------------------------------------------- # -# Additional credits: -# Thanks to Emilio Aguirre for S2flender from which I took inspirations :) -# Thanks to Anthony C. D'Agostino for the original backface.py script +# Things TODO for a next release: +# - Shadeless shader +# - FIX the issue with negative scales in object tranformations! +# - Use a better depth sorting algorithm +# - Review how selections are made (this script uses selection states of +# primitives to represent visibility infos) +# - Use a data structure other than Mesh to represent the 2D image? +# Think to a way to merge (adjacent) polygons that have the same color. +# Or a way to use paths for silhouettes and contours. +# - Consider SMIL for animation handling instead of ECMA Script? (Firefox do +# not support SMIL for animations) +# - Switch to the Mesh structure, should be considerably faster +# (partially done, but with Mesh we cannot sort faces, yet) +# - Implement Edge Styles (silhouettes, contours, etc.) (partially done). +# - Implement Shading Styles? (partially done, to make more flexible). +# - Add Vector Writers other than SVG. +# - set the background color! +# - Check memory use!! # # --------------------------------------------------------------------- import Blender -from Blender import Scene, Object, NMesh, Lamp, Camera +from Blender import Scene, Object, Mesh, NMesh, Material, Lamp, Camera, Window from Blender.Mathutils import * from math import * +import sys +import time + +try: + set() +except NameError: + from sets import Set as set + + +def uniq(alist): + tmpdict = dict() + return [tmpdict.setdefault(e, e) for e in alist if e not in tmpdict] + # in python > 2.4 we ca use the following + #return [ u for u in alist if u not in locals()['_[1]'] ] + + +# Constants +EPS = 10e-5 + +# We use a global progress Indicator Object +progress = None + + +# Config class for global settings + +class config: + polygons = dict() + polygons['SHOW'] = True + polygons['SHADING'] = 'FLAT' # FLAT or TOON + polygons['HSR'] = 'PAINTER' # PAINTER or NEWELL + # Hidden to the user for now + polygons['EXPANSION_TRICK'] = True + + polygons['TOON_LEVELS'] = 2 + + edges = dict() + edges['SHOW'] = False + edges['SHOW_HIDDEN'] = False + edges['STYLE'] = 'MESH' # MESH or SILHOUETTE + edges['WIDTH'] = 2 + edges['COLOR'] = [0, 0, 0] + + output = dict() + output['FORMAT'] = 'SVG' + output['ANIMATION'] = False + output['JOIN_OBJECTS'] = True + + def saveToRegistry(): + registry = {} + + for k, v in config.__dict__.iteritems(): + + # config class store settings in dictionaries + if v.__class__ == dict().__class__: + + regkey_prefix = k.upper() + "_" + + for opt_k, opt_v in v.iteritems(): + regkey = regkey_prefix + opt_k + + registry[regkey] = opt_v + + Blender.Registry.SetKey('VRM', registry, True) + + saveToRegistry = staticmethod(saveToRegistry) + + def loadFromRegistry(): + registry = Blender.Registry.GetKey('VRM', True) + if not registry: + return + + for k, v in registry.iteritems(): + k_tmp = k.split('_') + conf_attr = k_tmp[0].lower() + conf_key = str.join("_", k_tmp[1:]) + conf_val = v + + if conf_attr in config.__dict__: + config.__dict__[conf_attr][conf_key] = conf_val + + loadFromRegistry = staticmethod(loadFromRegistry) + + +# Utility functions +print_debug = False + + +def dumpfaces(flist, filename): + """Dump a single face to a file. + """ + if not print_debug: + return + + class tmpmesh: + pass + + m = tmpmesh() + m.faces = flist + + writerobj = SVGVectorWriter(filename) + + writerobj.open() + writerobj._printPolygons(m) + + writerobj.close() + + +def debug(msg): + if print_debug: + sys.stderr.write(msg) + + +def EQ(v1, v2): + return (abs(v1[0] - v2[0]) < EPS and + abs(v1[1] - v2[1]) < EPS) +by_furthest_z = (lambda f1, f2: + cmp(max([v.co[2] for v in f1]), max([v.co[2] for v in f2]) + EPS) + ) + + +def sign(x): + + if x < -EPS: + #if x < 0: + return -1 + elif x > EPS: + #elif x > 0: + return 1 + else: + return 0 # --------------------------------------------------------------------- # -## Projections classes +## HSR Utility class # # --------------------------------------------------------------------- -class Projector: - """Calculate the projection of an object given the camera. - - A projector is useful to so some per-object transformation to obtain the - projection of an object given the camera. - - The main method is #doProjection# see the method description for the - parameter list. +EPS = 10e-5 +INF = 10e5 + + +class HSR: + """A utility class for HSR processing. """ - def __init__(self, cameraObj, obMesh, canvasSize): - """Calculate the projection matrix. + def is_nonplanar_quad(face): + """Determine if a quad is non-planar. + + From: http://mathworld.wolfram.com/Coplanar.html + + Geometric objects lying in a common plane are said to be coplanar. + Three noncollinear points determine a plane and so are trivially + coplanar. Four points are coplanar iff the volume of the tetrahedron + defined by them is 0, + + | x_1 y_1 z_1 1 | + | x_2 y_2 z_2 1 | + | x_3 y_3 z_3 1 | + | x_4 y_4 z_4 1 | == 0 + + Coplanarity is equivalent to the statement that the pair of lines + determined by the four points are not skew, and can be equivalently + stated in vector form as (x_3-x_1).[(x_2-x_1)x(x_4-x_3)]==0. + + An arbitrary number of n points x_1, ..., x_n can be tested for + coplanarity by finding the point-plane distances of the points + x_4, ..., x_n from the plane determined by (x_1,x_2,x_3) + and checking if they are all zero. + If so, the points are all coplanar. - The projection matrix depends, in this case, on the camera settings, - and also on object transformation matrix. + We here check only for 4-point complanarity. """ + n = len(face) - self.size = canvasSize + # assert(n>4) + if n < 3 or n > 4: + print "ERROR a mesh in Blender can't have more than 4 vertices or less than 3" + raise AssertionError - camera = cameraObj.getData() + elif n == 3: + # three points must be complanar + return False + else: # n == 4 + x1 = Vector(face[0].co) + x2 = Vector(face[1].co) + x3 = Vector(face[2].co) + x4 = Vector(face[3].co) - aspect = float(canvasSize[0])/float(canvasSize[1]) - near = camera.clipStart - far = camera.clipEnd + v = (x3 - x1) * CrossVecs((x2 - x1), (x4 - x3)) + if v != 0: + return True - fovy = atan(0.5/aspect/(camera.lens/32)) - fovy = fovy * 360/pi - - # What projection do we want? - if camera.type: - m2 = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale) - else: - m2 = self._calcPerspectiveMatrix(fovy, aspect, near, far) - + return False - # View transformation - cam = Matrix(cameraObj.getInverseMatrix()) - cam.transpose() + is_nonplanar_quad = staticmethod(is_nonplanar_quad) - m1 = Matrix(obMesh.getMatrix()) - m1.transpose() - - mP = cam * m1 - mP = m2 * mP + def pointInPolygon(poly, v): + return False - self.projectionMatrix = mP + pointInPolygon = staticmethod(pointInPolygon) - ## - # Public methods - # + def edgeIntersection(s1, s2, do_perturbate=False): - def doProjection(self, v): - """Project the point on the view plane. + (x1, y1) = s1[0].co[0], s1[0].co[1] + (x2, y2) = s1[1].co[0], s1[1].co[1] + + (x3, y3) = s2[0].co[0], s2[0].co[1] + (x4, y4) = s2[1].co[0], s2[1].co[1] + + #z1 = s1[0].co[2] + #z2 = s1[1].co[2] + #z3 = s2[0].co[2] + #z4 = s2[1].co[2] + + # calculate delta values (vector components) + dx1 = x2 - x1 + dx2 = x4 - x3 + dy1 = y2 - y1 + dy2 = y4 - y3 + + #dz1 = z2 - z1 + #dz2 = z4 - z3 + + C = dy2 * dx1 - dx2 * dy1 # cross product + if C == 0: # parallel + return None + + dx3 = x1 - x3 # combined origin offset vector + dy3 = y1 - y3 + + a1 = (dy3 * dx2 - dx3 * dy2) / C + a2 = (dy3 * dx1 - dx3 * dy1) / C + + # check for degeneracies + #print_debug("\n") + #print_debug(str(a1)+"\n") + #print_debug(str(a2)+"\n\n") + + if (a1 == 0 or a1 == 1 or a2 == 0 or a2 == 1): + # Intersection on boundaries, we consider the point external? + return None + + elif (a1 > 0.0 and a1 < 1.0 and a2 > 0.0 and a2 < 1.0): # lines cross + x = x1 + a1 * dx1 + y = y1 + a1 * dy1 + + #z = z1 + a1 * dz1 + z = 0 + return (NMesh.Vert(x, y, z), a1, a2) - Given a vertex calculate the projection using the current projection - matrix. - """ - - # Note that we need the vertex expressed using homogeneous coordinates - p = self.projectionMatrix * Vector([v[0], v[1], v[2], 1.0]) - - mW = self.size[0]/2 - mH = self.size[1]/2 - - if p[3]<=0: - p[0] = round(p[0]*mW)+mW - p[1] = round(p[1]*mH)+mH else: - p[0] = round((p[0]/p[3])*mW)+mW - p[1] = round((p[1]/p[3])*mH)+mH - - # For now we want (0,0) in the top-left corner of the canvas - # Mirror and translate along y - p[1] *= -1 - p[1] += self.size[1] - - return p + # lines have intersections but not those segments + return None - ## - # Private methods - # - - def _calcPerspectiveMatrix(self, fovy, aspect, near, far): - """Return a perspective projection matrix.""" - - top = near * tan(fovy * pi / 360.0) - bottom = -top - left = bottom*aspect - right= top*aspect - x = (2.0 * near) / (right-left) - y = (2.0 * near) / (top-bottom) - a = (right+left) / (right-left) - b = (top+bottom) / (top - bottom) - c = - ((far+near) / (far-near)) - d = - ((2*far*near)/(far-near)) - - m = Matrix( - [x, 0.0, a, 0.0], - [0.0, y, b, 0.0], - [0.0, 0.0, c, d], - [0.0, 0.0, -1.0, 0.0]) + edgeIntersection = staticmethod(edgeIntersection) - return m + def isVertInside(self, v): + winding_number = 0 + coincidence = False - def _calcOrthoMatrix(self, fovy, aspect , near, far, scale): - """Return an orthogonal projection matrix.""" - - top = near * tan(fovy * pi / 360.0) * (scale * 10) - bottom = -top - left = bottom * aspect - right= top * aspect - rl = right-left - tb = top-bottom - fn = near-far - tx = -((right+left)/rl) - ty = -((top+bottom)/tb) - tz = ((far+near)/fn) + # Create point at infinity + point_at_infinity = NMesh.Vert(-INF, v.co[1], -INF) - m = Matrix( - [2.0/rl, 0.0, 0.0, tx], - [0.0, 2.0/tb, 0.0, ty], - [0.0, 0.0, 2.0/fn, tz], - [0.0, 0.0, 0.0, 1.0]) - - return m + for i in range(len(self.v)): + s1 = (point_at_infinity, v) + s2 = (self.v[i - 1], self.v[i]) + if EQ(v.co, s2[0].co) or EQ(v.co, s2[1].co): + coincidence = True -# --------------------------------------------------------------------- -# -## Object representation class -# -# --------------------------------------------------------------------- + if HSR.edgeIntersection(s1, s2, do_perturbate=False): + winding_number += 1 -# TODO: a class to represent the needed properties of a 2D vector image -# Just use a NMesh structure? + # Check even or odd + if (winding_number % 2) == 0: + return False + else: + if coincidence: + return False + return True + isVertInside = staticmethod(isVertInside) -# --------------------------------------------------------------------- -# -## Vector Drawing Classes -# -# --------------------------------------------------------------------- + def det(a, b, c): + return ((b[0] - a[0]) * (c[1] - a[1]) - + (b[1] - a[1]) * (c[0] - a[0])) -## A generic Writer + det = staticmethod(det) -class VectorWriter: - """ - A class for printing output in a vectorial format. + def pointInPolygon(q, P): + is_in = False - Given a 2D representation of the 3D scene the class is responsible to - write it is a vector format. + point_at_infinity = NMesh.Vert(-INF, q.co[1], -INF) - Every subclasses of VectorWriter must have at last the following public - methods: - - printCanvas(mesh) --- where mesh is as specified before. - """ - - def __init__(self, fileName, canvasSize): - """Open the file named #fileName# and set the canvas size.""" - - self.file = open(fileName, "w") - print "Outputting to: ", fileName + det = HSR.det - self.canvasSize = canvasSize - + for i in range(len(P.v)): + p0 = P.v[i - 1] + p1 = P.v[i] + if (det(q.co, point_at_infinity.co, p0.co) < 0) != (det(q.co, point_at_infinity.co, p1.co) < 0): + if det(p0.co, p1.co, q.co) == 0: + #print "On Boundary" + return False + elif (det(p0.co, p1.co, q.co) < 0) != (det(p0.co, p1.co, point_at_infinity.co) < 0): + is_in = not is_in - ## - # Public Methods - # - - def printCanvas(mesh): - return - - ## - # Private Methods - # - - def _printHeader(): - return + return is_in - def _printFooter(): - return + pointInPolygon = staticmethod(pointInPolygon) + def projectionsOverlap(f1, f2): + """ If you have nonconvex, but still simple polygons, an acceptable method + is to iterate over all vertices and perform the Point-in-polygon test[1]. + The advantage of this method is that you can compute the exact + intersection point and collision normal that you will need to simulate + collision. When you have the point that lies inside the other polygon, you + just iterate over all edges of the second polygon again and look for edge + intersections. Note that this method detects collsion when it already + happens. This algorithm is fast enough to perform it hundreds of times per + sec. """ -## SVG Writer + for i in range(len(f1.v)): -class SVGVectorWriter(VectorWriter): - """A concrete class for writing SVG output. + # If a point of f1 in inside f2, there is an overlap! + v1 = f1.v[i] + #if HSR.isVertInside(f2, v1): + if HSR.pointInPolygon(v1, f2): + return True - The class does not support animations, yet. - Sorry. - """ + # If not the polygon can be ovelap as well, so we check for + # intersection between an edge of f1 and all the edges of f2 - def __init__(self, file, canvasSize): - """Simply call the parent Contructor.""" - VectorWriter.__init__(self, file, canvasSize) + v0 = f1.v[i - 1] + for j in range(len(f2.v)): + v2 = f2.v[j - 1] + v3 = f2.v[j] - ## - # Public Methods - # - - def printCanvas(self, scene): - """Convert the scene representation to SVG.""" + e1 = v0, v1 + e2 = v2, v3 - self._printHeader() - - Objects = scene.getChildren() - for obj in Objects: - self.file.write("\n") - - for face in obj.getData().faces: - self._printPolygon(face) + intrs = HSR.edgeIntersection(e1, e2) + if intrs: + #print_debug(str(v0.co) + " " + str(v1.co) + " " + + # str(v2.co) + " " + str(v3.co) ) + #print_debug("\nIntersection\n") - self._printWireframe(obj.getData()) - - self.file.write("\n") - - self._printFooter() - - ## - # Private Methods - # - - def _printHeader(self): - """Print SVG header.""" + return True - self.file.write("\n") - self.file.write("\n") - self.file.write("\n\n" % - self.canvasSize) + return False - def _printFooter(self): - """Print the SVG footer.""" + projectionsOverlap = staticmethod(projectionsOverlap) - self.file.write("\n\n") - self.file.close() + def midpoint(p1, p2): + """Return the midpoint of two vertices. + """ + m = MidpointVecs(Vector(p1), Vector(p2)) + mv = NMesh.Vert(m[0], m[1], m[2]) + + return mv - def _printWireframe(self, mesh): - """Print the wireframe using mesh edges... is this the correct way? + midpoint = staticmethod(midpoint) + + def facesplit(P, Q, facelist, nmesh): + """Split P or Q according to the strategy illustrated in the Newell's + paper. """ - print mesh.edges - print - print mesh.verts - - stroke_width=0.5 - stroke_col = [0, 0, 0] - - self.file.write("\n") + by_furthest_z = (lambda f1, f2: + cmp(max([v.co[2] for v in f1]), max([v.co[2] for v in f2]) + EPS) + ) - for e in mesh.edges: - self.file.write("\n") + # Choose if split P on Q plane or vice-versa - self.file.write("\n") - - - - def _printPolygon(self, face): - """Print our primitive, finally. - """ - - wireframe = False - - stroke_width=0.5 - - self.file.write("\n") + n = 0 + for Pi in P: + d = HSR.Distance(Vector(Pi), Q) + if d <= EPS: + n += 1 + pIntersectQ = (n != len(P)) + n = 0 + for Qi in Q: + d = HSR.Distance(Vector(Qi), P) + if d >= -EPS: + n += 1 + qIntersectP = (n != len(Q)) -# --------------------------------------------------------------------- -# -## Rendering Classes -# -# --------------------------------------------------------------------- + newfaces = [] -def RotatePoint(PX,PY,PZ,AngleX,AngleY,AngleZ): - - NewPoint = [] - # Rotate X - NewY = (PY * cos(AngleX))-(PZ * sin(AngleX)) - NewZ = (PZ * cos(AngleX))+(PY * sin(AngleX)) - # Rotate Y - PZ = NewZ - PY = NewY - NewZ = (PZ * cos(AngleY))-(PX * sin(AngleY)) - NewX = (PX * cos(AngleY))+(PZ * sin(AngleY)) - PX = NewX - PZ = NewZ - # Rotate Z - NewX = (PX * cos(AngleZ))-(PY * sin(AngleZ)) - NewY = (PY * cos(AngleZ))+(PX * sin(AngleZ)) - NewPoint.append(NewX) - NewPoint.append(NewY) - NewPoint.append(NewZ) - return NewPoint + # 1. If parts of P lie in both half-spaces of Q + # then splice P in two with the plane of Q + if pIntersectQ: + #print "We split P" + f = P + plane = Q -class Renderer: - """Render a scene viewed from a given camera. - - This class is responsible of the rendering process, hence transormation - and projection of the ojects in the scene are invoked by the renderer. + newfaces = HSR.splitOn(plane, f) - The user can optionally provide a specific camera for the rendering, see - the #doRendering# method for more informations. - """ + # 2. Else if parts of Q lie in both half-space of P + # then splice Q in two with the plane of P + if qIntersectP and newfaces == None: + #print "We split Q" + f = Q + plane = P - def __init__(self): - """Set the canvas size to a defaulr value. - - The only instance attribute here is the canvas size, which can be - queryed to the renderer by other entities. - """ - self.canvasSize = (0.0, 0.0) + newfaces = HSR.splitOn(plane, f) + #print "After" + # 3. Else slice P in half through the mid-point of + # the longest pair of opposite sides + if newfaces == None: - ## - # Public Methods - # + print "We ignore P..." + facelist.remove(P) + return facelist - def getCanvasSize(self): - """Return the current canvas size read from Blender rendering context""" - return self.canvasSize - - def doRendering(self, scene, cameraObj=None): - """Control the rendering process. - - Here we control the entire rendering process invoking the operation - needed to transforma project the 3D scene in two dimensions. + #f = P + + #if len(P)==3: + # v1 = midpoint(f[0], f[1]) + # v2 = midpoint(f[1], f[2]) + #if len(P)==4: + # v1 = midpoint(f[0], f[1]) + # v2 = midpoint(f[2], f[3]) + #vec3 = (Vector(v2)+10*Vector(f.normal)) + # + #v3 = NMesh.Vert(vec3[0], vec3[1], vec3[2]) + + #plane = NMesh.Face([v1, v2, v3]) + # + #newfaces = splitOn(plane, f) + + if newfaces == None: + print "Big FAT problem, we weren't able to split POLYGONS!" + raise AssertionError + + #print newfaces + if newfaces: + #for v in f: + # if v not in plane and v in nmesh.verts: + # nmesh.verts.remove(v) + for nf in newfaces: + + nf.mat = f.mat + nf.sel = f.sel + nf.col = [f.col[0]] * len(nf.v) + + nf.smooth = 0 + + for v in nf: + nmesh.verts.append(v) + # insert pieces in the list + facelist.append(nf) + + facelist.remove(f) + + # and resort the faces + facelist.sort(by_furthest_z) + facelist.sort(lambda f1, f2: cmp(f1.smooth, f2.smooth)) + facelist.reverse() + + #print [ f.smooth for f in facelist ] - Parameters: - scene --- the Blender Scene to render - cameraObj --- the camera object to use for the viewing processing + return facelist + + facesplit = staticmethod(facesplit) + + def isOnSegment(v1, v2, p, extremes_internal=False): + """Check if point p is in segment v1v2. """ - if cameraObj == None: - cameraObj = scene.getCurrentCamera() - - context = scene.getRenderingContext() - self.canvasSize = (context.imageSizeX(), context.imageSizeY()) - - Objects = scene.getChildren() - - # A structure to store the transformed scene - newscene = Scene.New("flat"+scene.name) - - for obj in Objects: - - if (obj.getType() != "Mesh"): - print "Type:", obj.getType(), "\tSorry, only mesh Object supported!" - continue + l1 = (v1 - p).length + l2 = (v2 - p).length - # Get a projector for this object - proj = Projector(cameraObj, obj, self.canvasSize) - - # Let's store the transformed data - transformed_mesh = NMesh.New("flat"+obj.name) - transformed_mesh.hasVertexColours(1) - - # process Edges - for v in obj.getData().verts: - transformed_mesh.verts.append(v) - transformed_mesh.edges = self._processEdges(obj.getData().edges) - print transformed_mesh.edges - - - # Store the materials - materials = obj.getData().getMaterials() - - meshfaces = obj.getData().faces - - for face in meshfaces: - - # if the face is visible flatten it on the "picture plane" - if self._isFaceVisible_old(face, obj, cameraObj): - - # Store transformed face - newface = NMesh.Face() - - for vert in face: - - p = proj.doProjection(vert.co) - - tmp_vert = NMesh.Vert(p[0], p[1], p[2]) - - # Add the vert to the mesh - transformed_mesh.verts.append(tmp_vert) - - newface.v.append(tmp_vert) - - - # Per-face color calculation - # code taken mostly from the original vrm script - # TODO: understand the code and rewrite it clearly - ambient = -150 - - fakelight = Object.Get("Lamp").loc - if fakelight == None: - fakelight = [1.0, 1.0, -0.3] - - norm = Vector(face.no) - vektori = (norm[0]*fakelight[0]+norm[1]*fakelight[1]+norm[2]*fakelight[2]) - vduzine = fabs(sqrt(pow(norm[0],2)+pow(norm[1],2)+pow(norm[2],2))*sqrt(pow(fakelight[0],2)+pow(fakelight[1],2)+pow(fakelight[2],2))) - intensity = floor(ambient + 200*acos(vektori/vduzine))/200 - if intensity < 0: - intensity = 0 - - if materials: - tmp_col = materials[face.mat].getRGBCol() - else: - tmp_col = [0.5, 0.5, 0.5] - - tmp_col = [ (c>intensity) and int(round((c-intensity)*10)*25.5) for c in tmp_col ] + # Should we consider extreme points as internal ? + # The test: + # if p == v1 or p == v2: + if l1 < EPS or l2 < EPS: + return extremes_internal + + l = (v1 - v2).length - vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2]) - newface.col = [vcol, vcol, vcol, 255] - - transformed_mesh.addFace(newface) + # if the sum of l1 and l2 is circa l, then the point is on segment, + if abs(l - (l1 + l2)) < EPS: + return True + else: + return False - # at the end of the loop on obj - - transformed_obj = Object.New(obj.getType(), "flat"+obj.name) - transformed_obj.link(transformed_mesh) - transformed_obj.loc = obj.loc - newscene.link(transformed_obj) + isOnSegment = staticmethod(isOnSegment) - - return newscene + def Distance(point, face): + """ Calculate the distance between a point and a face. + An alternative but more expensive method can be: - ## - # Private Methods - # + ip = Intersect(Vector(face[0]), Vector(face[1]), Vector(face[2]), + Vector(face.no), Vector(point), 0) - def _isFaceVisible_old(self, face, obj, cameraObj): - """Determine if the face is visible from the current camera. + d = Vector(ip - point).length - The following code is taken basicly from the original vrm script. + See: http://mathworld.wolfram.com/Point-PlaneDistance.html """ - camera = cameraObj + p = Vector(point) + plNormal = Vector(face.no) + plVert0 = Vector(face.v[0]) + + d = (plVert0 * plNormal) - (p * plNormal) + + #d = plNormal * (plVert0 - p) + + #print "\nd: %.10f - sel: %d, %s\n" % (d, face.sel, str(point)) + + return d - numvert = len(face) + Distance = staticmethod(Distance) - # backface culling + def makeFaces(vl): + # + # make one or two new faces based on a list of vertex-indices + # + newfaces = [] - # translate and rotate according to the object matrix - # and then translate according to the camera position - #m = obj.getMatrix() - #m.transpose() - - #a = m*Vector(face[0]) - Vector(cameraObj.loc) - #b = m*Vector(face[1]) - Vector(cameraObj.loc) - #c = m*Vector(face[numvert-1]) - Vector(cameraObj.loc) - - a = [] - a.append(face[0][0]) - a.append(face[0][1]) - a.append(face[0][2]) - a = RotatePoint(a[0], a[1], a[2], obj.RotX, obj.RotY, obj.RotZ) - a[0] += obj.LocX - camera.LocX - a[1] += obj.LocY - camera.LocY - a[2] += obj.LocZ - camera.LocZ - b = [] - b.append(face[1][0]) - b.append(face[1][1]) - b.append(face[1][2]) - b = RotatePoint(b[0], b[1], b[2], obj.RotX, obj.RotY, obj.RotZ) - b[0] += obj.LocX - camera.LocX - b[1] += obj.LocY - camera.LocY - b[2] += obj.LocZ - camera.LocZ - c = [] - c.append(face[numvert-1][0]) - c.append(face[numvert-1][1]) - c.append(face[numvert-1][2]) - c = RotatePoint(c[0], c[1], c[2], obj.RotX, obj.RotY, obj.RotZ) - c[0] += obj.LocX - camera.LocX - c[1] += obj.LocY - camera.LocY - c[2] += obj.LocZ - camera.LocZ + if len(vl) <= 4: + nf = NMesh.Face() - norm = [0, 0, 0] - norm[0] = (b[1] - a[1])*(c[2] - a[2]) - (c[1] - a[1])*(b[2] - a[2]) - norm[1] = -((b[0] - a[0])*(c[2] - a[2]) - (c[0] - a[0])*(b[2] - a[2])) - norm[2] = (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1]) + for v in vl: + nf.v.append(v) - d = norm[0]*a[0] + norm[1]*a[1] + norm[2]*a[2] - #d = DotVecs(Vector(norm), Vector(a)) + newfaces.append(nf) - return (d<0) - - def _isFaceVisible(self, face, obj, cameraObj): - """Determine if the face is visible from the current camera. + else: + nf = NMesh.Face() + + nf.v.append(vl[0]) + nf.v.append(vl[1]) + nf.v.append(vl[2]) + nf.v.append(vl[3]) + newfaces.append(nf) + + nf = NMesh.Face() + nf.v.append(vl[3]) + nf.v.append(vl[4]) + nf.v.append(vl[0]) + newfaces.append(nf) + + return newfaces - The following code is taken basicly from the original vrm script. + makeFaces = staticmethod(makeFaces) + + def splitOn(Q, P, return_positive_faces=True, return_negative_faces=True): + """Split P using the plane of Q. + Logic taken from the knife.py python script """ - camera = cameraObj + # Check if P and Q are parallel + u = CrossVecs(Vector(Q.no), Vector(P.no)) + ax = abs(u[0]) + ay = abs(u[1]) + az = abs(u[2]) + + if (ax + ay + az) < EPS: + print "PARALLEL planes!!" + return + + # The final aim is to find the intersection line between P + # and the plane of Q, and split P along this line + + nP = len(P.v) + + # Calculate point-plane Distance between vertices of P and plane Q + d = [] + for i in range(0, nP): + d.append(HSR.Distance(P.v[i], Q)) + + newVertList = [] + + posVertList = [] + negVertList = [] + for i in range(nP): + d0 = d[i - 1] + V0 = P.v[i - 1] + + d1 = d[i] + V1 = P.v[i] + + #print "d0:", d0, "d1:", d1 + + # if the vertex lies in the cutplane + if abs(d1) < EPS: + #print "d1 On cutplane" + posVertList.append(V1) + negVertList.append(V1) + else: + # if the previous vertex lies in cutplane + if abs(d0) < EPS: + #print "d0 on Cutplane" + if d1 > 0: + #print "d1 on positive Halfspace" + posVertList.append(V1) + else: + #print "d1 on negative Halfspace" + negVertList.append(V1) + else: + # if they are on the same side of the plane + if (d1 * d0) > 0: + #print "On the same half-space" + if d1 > 0: + #print "d1 on positive Halfspace" + posVertList.append(V1) + else: + #print "d1 on negative Halfspace" + negVertList.append(V1) + + # the vertices are not on the same side of the plane, so we have an intersection + else: + #print "Intersection" - numvert = len(face) + e = Vector(V0), Vector(V1) + tri = Vector(Q[0]), Vector(Q[1]), Vector(Q[2]) - # backface culling + inters = Intersect(tri[0], tri[1], tri[2], e[1] - e[0], e[0], 0) + if inters == None: + print "Split Break" + break - # translate and rotate according to the object matrix - # and then translate according to the camera position - m = obj.getMatrix() - m.transpose() - - a = m*Vector(face[0]) - Vector(cameraObj.loc) - b = m*Vector(face[1]) - Vector(cameraObj.loc) - c = m*Vector(face[numvert-1]) - Vector(cameraObj.loc) + #print "Intersection", inters - norm = m*Vector(face.no) + nv = NMesh.Vert(inters[0], inters[1], inters[2]) + newVertList.append(nv) - d = DotVecs(norm, a) + posVertList.append(nv) + negVertList.append(nv) - return (d<0) + if d1 > 0: + posVertList.append(V1) + else: + negVertList.append(V1) + # uniq for python > 2.4 + #posVertList = [ u for u in posVertList if u not in locals()['_[1]'] ] + #negVertList = [ u for u in negVertList if u not in locals()['_[1]'] ] - def _doClipping(): - return + # a more portable way + posVertList = uniq(posVertList) + negVertList = uniq(negVertList) + # If vertex are all on the same half-space, return + #if len(posVertList) < 3: + # print "Problem, we created a face with less that 3 vertices??" + # posVertList = [] + #if len(negVertList) < 3: + # print "Problem, we created a face with less that 3 vertices??" + # negVertList = [] - # Per object methods + if len(posVertList) < 3 or len(negVertList) < 3: + #print "RETURN NONE, SURE???" + return None - def _doVisibleSurfaceDetermination(object): - return + if not return_positive_faces: + posVertList = [] + if not return_negative_faces: + negVertList = [] - def _doColorizing(object): - return + newfaces = HSR.addNewFaces(posVertList, negVertList) + + return newfaces + + splitOn = staticmethod(splitOn) + + def addNewFaces(posVertList, negVertList): + # Create new faces resulting from the split + outfaces = [] + if len(posVertList) or len(negVertList): + + #newfaces = [posVertList] + [negVertList] + newfaces = ([[NMesh.Vert(v[0], v[1], v[2]) for v in posVertList]] + + [[NMesh.Vert(v[0], v[1], v[2]) for v in negVertList]]) + + for nf in newfaces: + if nf and len(nf) > 2: + outfaces += HSR.makeFaces(nf) + + return outfaces - def _doStylizingEdges(self, object, style): - """Process Mesh Edges. (For now copy the edge data, in next version it - can be a place where recognize silouhettes and/or contours). + addNewFaces = staticmethod(addNewFaces) - input: an edge list - return: a processed edge list + +# --------------------------------------------------------------------- +# +## Mesh Utility class +# +# --------------------------------------------------------------------- + +class MeshUtils: + + def buildEdgeFaceUsersCache(me): + ''' + Takes a mesh and returns a list aligned with the meshes edges. + Each item is a list of the faces that use the edge + would be the equiv for having ed.face_users as a property + + Taken from .blender/scripts/bpymodules/BPyMesh.py, + thanks to ideasman_42. + ''' + + def sorted_edge_indicies(ed): + i1 = ed.v1.index + i2 = ed.v2.index + if i1 > i2: + i1, i2 = i2, i1 + return i1, i2 + + face_edges_dict = dict([(sorted_edge_indicies(ed), (ed.index, [])) for ed in me.edges]) + for f in me.faces: + fvi = [v.index for v in f.v] # face vert idx's + for i in xrange(len(f)): + i1 = fvi[i] + i2 = fvi[i - 1] + + if i1 > i2: + i1, i2 = i2, i1 + + face_edges_dict[i1, i2][1].append(f) + + face_edges = [None] * len(me.edges) + for ed_index, ed_faces in face_edges_dict.itervalues(): + face_edges[ed_index] = ed_faces + + return face_edges + + def isMeshEdge(adjacent_faces): + """Mesh edge rule. + + A mesh edge is visible if _at_least_one_ of its adjacent faces is selected. + Note: if the edge has no adjacent faces we want to show it as well, + useful for "edge only" portion of objects. + """ + + if len(adjacent_faces) == 0: + return True + + selected_faces = [f for f in adjacent_faces if f.sel] + + if len(selected_faces) != 0: + return True + else: + return False + + def isSilhouetteEdge(adjacent_faces): + """Silhuette selection rule. + + An edge is a silhuette edge if it is shared by two faces with + different selection status or if it is a boundary edge of a selected + face. """ - return + if ((len(adjacent_faces) == 1 and adjacent_faces[0].sel == 1) or + (len(adjacent_faces) == 2 and + adjacent_faces[0].sel != adjacent_faces[1].sel) + ): + return True + else: + return False + + buildEdgeFaceUsersCache = staticmethod(buildEdgeFaceUsersCache) + isMeshEdge = staticmethod(isMeshEdge) + isSilhouetteEdge = staticmethod(isSilhouetteEdge) # --------------------------------------------------------------------- # -## Main Program +## Shading Utility class # # --------------------------------------------------------------------- +class ShadingUtils: -# FIXME: really hackish code, just to test if the other parts work -def depthSorting(scene): + shademap = None - cameraObj = Scene.GetCurrent().getCurrentCamera() - Objects = scene.getChildren() + def toonShadingMapSetup(): + levels = config.polygons['TOON_LEVELS'] - Objects.sort(lambda obj1, obj2: - cmp(Vector(Vector(cameraObj.loc) - Vector(obj1.loc)).length, - Vector(Vector(cameraObj.loc) - Vector(obj2.loc)).length - ) - ) - - # hackish sorting of faces according to the max z value of a vertex - for o in Objects: - - mesh = o.data - mesh.faces.sort( - lambda f1, f2: - # Sort faces according to the min z coordinate in a face - #cmp(min([v[2] for v in f1]), min([v[2] for v in f2]))) - - # Sort faces according to the max z coordinate in a face - cmp(max([v[2] for v in f1]), max([v[2] for v in f2]))) - - # Sort faces according to the avg z coordinate in a face - #cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2))) - mesh.faces.reverse() - mesh.update() - - # update the scene - for o in scene.getChildren(): - scene.unlink(o) - for o in Objects: - scene.link(o) - -def vectorize(filename): - """The vectorizing process is as follows: - - - Open the writer - - Render the scene - - Close the writer - - If you want to render an animation the second pass should be - repeated for any frame, and the frame number should be passed to the - renderer. - """ + texels = 2 * levels - 1 + tmp_shademap = [0.0] + [(i) / float(texels - 1) for i in xrange(1, texels - 1)] + [1.0] - print "Filename: %s" % filename - - scene = Scene.GetCurrent() - renderer = Renderer() - - flatScene = renderer.doRendering(scene) - canvasSize = renderer.getCanvasSize() + return tmp_shademap - depthSorting(flatScene) + def toonShading(u): - writer = SVGVectorWriter(filename, canvasSize) - writer.printCanvas(flatScene) + shademap = ShadingUtils.shademap + + if not shademap: + shademap = ShadingUtils.toonShadingMapSetup() + + v = 1.0 + for i in xrange(0, len(shademap) - 1): + pivot = (shademap[i] + shademap[i + 1]) / 2.0 + j = int(u > pivot) + + v = shademap[i + j] + + if v < shademap[i + 1]: + return v + + return v + + toonShadingMapSetup = staticmethod(toonShadingMapSetup) + toonShading = staticmethod(toonShading) + + +# --------------------------------------------------------------------- +# +## Projections classes +# +# --------------------------------------------------------------------- + +class Projector: + """Calculate the projection of an object given the camera. + + A projector is useful to so some per-object transformation to obtain the + projection of an object given the camera. + + The main method is #doProjection# see the method description for the + parameter list. + """ + + def __init__(self, cameraObj, canvasRatio): + """Calculate the projection matrix. + + The projection matrix depends, in this case, on the camera settings. + TAKE CARE: This projector expects vertices in World Coordinates! + """ + + camera = cameraObj.getData() + + aspect = float(canvasRatio[0]) / float(canvasRatio[1]) + near = camera.clipStart + far = camera.clipEnd + + scale = float(camera.scale) + + fovy = atan(0.5 / aspect / (camera.lens / 32)) + fovy = fovy * 360.0 / pi + + if Blender.Get('version') < 243: + camPersp = 0 + camOrtho = 1 + else: + camPersp = 'persp' + camOrtho = 'ortho' + + # What projection do we want? + if camera.type == camPersp: + mP = self._calcPerspectiveMatrix(fovy, aspect, near, far) + elif camera.type == camOrtho: + mP = self._calcOrthoMatrix(fovy, aspect, near, far, scale) + + # View transformation + cam = Matrix(cameraObj.getInverseMatrix()) + cam.transpose() + + mP = mP * cam + + self.projectionMatrix = mP + + ## + # Public methods + # + + def doProjection(self, v): + """Project the point on the view plane. + + Given a vertex calculate the projection using the current projection + matrix. + """ + + # Note that we have to work on the vertex using homogeneous coordinates + # From blender 2.42+ we don't need to resize the vector to be 4d + # when applying a 4x4 matrix, but we do that anyway since we need the + # 4th coordinate later + p = self.projectionMatrix * Vector(v).resize4D() + + # Perspective division + if p[3] != 0: + p[0] = p[0] / p[3] + p[1] = p[1] / p[3] + p[2] = p[2] / p[3] + + # restore the size + p[3] = 1.0 + p.resize3D() + + return p + + ## + # Private methods + # + + def _calcPerspectiveMatrix(self, fovy, aspect, near, far): + """Return a perspective projection matrix. + """ + + top = near * tan(fovy * pi / 360.0) + bottom = -top + left = bottom * aspect + right = top * aspect + x = (2.0 * near) / (right - left) + y = (2.0 * near) / (top - bottom) + a = (right + left) / (right - left) + b = (top + bottom) / (top - bottom) + c = - ((far + near) / (far - near)) + d = - ((2 * far * near) / (far - near)) + + m = Matrix( + [x, 0.0, a, 0.0], + [0.0, y, b, 0.0], + [0.0, 0.0, c, d], + [0.0, 0.0, -1.0, 0.0]) + + return m + + def _calcOrthoMatrix(self, fovy, aspect, near, far, scale): + """Return an orthogonal projection matrix. + """ + + # The 11 in the formula was found emiprically + top = near * tan(fovy * pi / 360.0) * (scale * 11) + bottom = -top + left = bottom * aspect + right = top * aspect + rl = right - left + tb = top - bottom + fn = near - far + tx = -((right + left) / rl) + ty = -((top + bottom) / tb) + tz = ((far + near) / fn) + + m = Matrix( + [2.0 / rl, 0.0, 0.0, tx], + [0.0, 2.0 / tb, 0.0, ty], + [0.0, 0.0, 2.0 / fn, tz], + [0.0, 0.0, 0.0, 1.0]) + + return m + + +# --------------------------------------------------------------------- +# +## Progress Indicator +# +# --------------------------------------------------------------------- + +class Progress: + """A model for a progress indicator. + + Do the progress calculation calculation and + the view independent stuff of a progress indicator. + """ + def __init__(self, steps=0): + self.name = "" + self.steps = steps + self.completed = 0 + self.progress = 0 + + def setSteps(self, steps): + """Set the number of steps of the activity wich we want to track. + """ + self.steps = steps + + def getSteps(self): + return self.steps + + def setName(self, name): + """Set the name of the activity wich we want to track. + """ + self.name = name + + def getName(self): + return self.name + + def getProgress(self): + return self.progress + + def reset(self): + self.completed = 0 + self.progress = 0 + + def update(self): + """Update the model, call this method when one step is completed. + """ + if self.progress == 100: + return False + + self.completed += 1 + self.progress = (float(self.completed) / float(self.steps)) * 100 + self.progress = int(self.progress) + + return True + + +class ProgressIndicator: + """An abstraction of a View for the Progress Model + """ + def __init__(self): + + # Use a refresh rate so we do not show the progress at + # every update, but every 'self.refresh_rate' times. + self.refresh_rate = 10 + self.shows_counter = 0 + + self.quiet = False + + self.progressModel = None + + def setQuiet(self, value): + self.quiet = value + + def setActivity(self, name, steps): + """Initialize the Model. + + In a future version (with subactivities-progress support) this method + could only set the current activity. + """ + self.progressModel = Progress() + self.progressModel.setName(name) + self.progressModel.setSteps(steps) + + def getActivity(self): + return self.progressModel + + def update(self): + """Update the model and show the actual progress. + """ + assert(self.progressModel) + + if self.progressModel.update(): + if self.quiet: + return + + self.show(self.progressModel.getProgress(), + self.progressModel.getName()) + + # We return always True here so we can call the update() method also + # from lambda funcs (putting the call in logical AND with other ops) + return True + + def show(self, progress, name=""): + self.shows_counter = (self.shows_counter + 1) % self.refresh_rate + if self.shows_counter != 0: + return + + if progress == 100: + self.shows_counter = -1 + + +class ConsoleProgressIndicator(ProgressIndicator): + """Show a progress bar on stderr, a la wget. + """ + def __init__(self): + ProgressIndicator.__init__(self) + + self.swirl_chars = ["-", "\\", "|", "/"] + self.swirl_count = -1 + + def show(self, progress, name): + ProgressIndicator.show(self, progress, name) + + bar_length = 70 + bar_progress = int((progress / 100.0) * bar_length) + bar = ("=" * bar_progress).ljust(bar_length) + + self.swirl_count = (self.swirl_count + 1) % len(self.swirl_chars) + swirl_char = self.swirl_chars[self.swirl_count] + + progress_bar = "%s |%s| %c %3d%%" % (name, bar, swirl_char, progress) + + sys.stderr.write(progress_bar + "\r") + if progress == 100: + sys.stderr.write("\n") + + +class GraphicalProgressIndicator(ProgressIndicator): + """Interface to the Blender.Window.DrawProgressBar() method. + """ + def __init__(self): + ProgressIndicator.__init__(self) + + #self.swirl_chars = ["-", "\\", "|", "/"] + # We have to use letters with the same width, for now! + # Blender progress bar considers the font widths when + # calculating the progress bar width. + self.swirl_chars = ["\\", "/"] + self.swirl_count = -1 + + def show(self, progress, name): + ProgressIndicator.show(self, progress) + + self.swirl_count = (self.swirl_count + 1) % len(self.swirl_chars) + swirl_char = self.swirl_chars[self.swirl_count] + + progress_text = "%s - %c %3d%%" % (name, swirl_char, progress) + + # Finally draw the Progress Bar + Window.WaitCursor(1) # Maybe we can move that call in the constructor? + Window.DrawProgressBar(progress / 100.0, progress_text) + + if progress == 100: + Window.DrawProgressBar(1, progress_text) + Window.WaitCursor(0) + +# --------------------------------------------------------------------- +# +## 2D Object representation class +# +# --------------------------------------------------------------------- + +# TODO: a class to represent the needed properties of a 2D vector image +# For now just using a [N]Mesh structure. + + +# --------------------------------------------------------------------- +# +## Vector Drawing Classes +# +# --------------------------------------------------------------------- + +## A generic Writer + +class VectorWriter: + """ + A class for printing output in a vectorial format. + + Given a 2D representation of the 3D scene the class is responsible to + write it is a vector format. + + Every subclasses of VectorWriter must have at last the following public + methods: + - open(self) + - close(self) + - printCanvas(self, scene, + doPrintPolygons=True, doPrintEdges=False, showHiddenEdges=False): + """ + + def __init__(self, fileName): + """Set the output file name and other properties""" + + try: + config.writer + except: + config.writer = dict() + config.writer['SETTING'] = True + + self.outputFileName = fileName + + context = Scene.GetCurrent().getRenderingContext() + self.canvasSize = (context.imageSizeX(), context.imageSizeY()) + + self.fps = context.fps + + self.startFrame = 1 + self.endFrame = 1 + self.animation = False + + ## + # Public Methods + # + + def open(self, startFrame=1, endFrame=1): + if startFrame != endFrame: + self.startFrame = startFrame + self.endFrame = endFrame + self.animation = True + + print "Outputting to: ", self.outputFileName + + return + + def close(self): + return + + def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, + showHiddenEdges=False): + """This is the interface for the needed printing routine. + """ + return + + +## SVG Writer + +class SVGVectorWriter(VectorWriter): + """A concrete class for writing SVG output. + """ + + def __init__(self, fileName): + """Simply call the parent Contructor. + """ + VectorWriter.__init__(self, fileName) + + self.file = None + + ## + # Public Methods + # + + def open(self, startFrame=1, endFrame=1): + """Do some initialization operations. + """ + VectorWriter.open(self, startFrame, endFrame) + + self.file = open(self.outputFileName, "w") + + self._printHeader() + + def close(self): + """Do some finalization operation. + """ + self._printFooter() + + if self.file: + self.file.close() + + # remember to call the close method of the parent as last + VectorWriter.close(self) + + def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, + showHiddenEdges=False): + """Convert the scene representation to SVG. + """ + + Objects = scene.objects + + context = scene.getRenderingContext() + framenumber = context.currentFrame() + + if self.animation: + framestyle = "display:none" + else: + framestyle = "display:block" + + # Assign an id to this group so we can set properties on it using DOM + self.file.write("\n" % + (framenumber, framestyle)) + + for obj in Objects: + + if obj.getType() != 'Mesh': + continue + + self.file.write("\n" % obj.getName()) + + mesh = obj.getData(mesh=1) + + if doPrintPolygons: + self._printPolygons(mesh) + + if doPrintEdges: + self._printEdges(mesh, showHiddenEdges) + + self.file.write("\n") + + self.file.write("\n") + + ## + # Private Methods + # + + def _calcCanvasCoord(self, v): + """Convert vertex in scene coordinates to canvas coordinates. + """ + + pt = Vector([0, 0, 0]) + + mW = float(self.canvasSize[0]) / 2.0 + mH = float(self.canvasSize[1]) / 2.0 + + # rescale to canvas size + pt[0] = v.co[0] * mW + mW + pt[1] = v.co[1] * mH + mH + pt[2] = v.co[2] + + # For now we want (0,0) in the top-left corner of the canvas. + # Mirror and translate along y + pt[1] *= -1 + pt[1] += self.canvasSize[1] + + return pt + + def _printHeader(self): + """Print SVG header.""" + + self.file.write("\n") + self.file.write("\n") + self.file.write("\n\n" % + self.canvasSize) + + if self.animation: + delay = 1000 / self.fps + + self.file.write("""\n\n + \n""") + + def _printFooter(self): + """Print the SVG footer.""" + + self.file.write("\n\n") + + def _printPolygons(self, mesh): + """Print the selected (visible) polygons. + """ + + if len(mesh.faces) == 0: + return + + self.file.write("\n") + + for face in mesh.faces: + if not face.sel: + continue + + self.file.write("\n") + + self.file.write("\n") + + def _printEdges(self, mesh, showHiddenEdges=False): + """Print the wireframe using mesh edges. + """ + + stroke_width = config.edges['WIDTH'] + stroke_col = config.edges['COLOR'] + + self.file.write("\n") + + for e in mesh.edges: + + hidden_stroke_style = "" + + if e.sel == 0: + if showHiddenEdges == False: + continue + else: + hidden_stroke_style = ";\n stroke-dasharray:3, 3" + + p1 = self._calcCanvasCoord(e.v1) + p2 = self._calcCanvasCoord(e.v2) + + self.file.write("\n") + + self.file.write("\n") + + +## SWF Writer + +try: + from ming import * + SWFSupported = True +except: + SWFSupported = False + + +class SWFVectorWriter(VectorWriter): + """A concrete class for writing SWF output. + """ + + def __init__(self, fileName): + """Simply call the parent Contructor. + """ + VectorWriter.__init__(self, fileName) + + self.movie = None + self.sprite = None + + ## + # Public Methods + # + + def open(self, startFrame=1, endFrame=1): + """Do some initialization operations. + """ + VectorWriter.open(self, startFrame, endFrame) + self.movie = SWFMovie() + self.movie.setDimension(self.canvasSize[0], self.canvasSize[1]) + if self.animation: + self.movie.setRate(self.fps) + numframes = endFrame - startFrame + 1 + self.movie.setFrames(numframes) + + def close(self): + """Do some finalization operation. + """ + self.movie.save(self.outputFileName) + + # remember to call the close method of the parent + VectorWriter.close(self) + + def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, + showHiddenEdges=False): + """Convert the scene representation to SVG. + """ + context = scene.getRenderingContext() + framenumber = context.currentFrame() + + Objects = scene.objects + + if self.sprite: + self.movie.remove(self.sprite) + + sprite = SWFSprite() + + for obj in Objects: + + if(obj.getType() != 'Mesh'): + continue + + mesh = obj.getData(mesh=1) + + if doPrintPolygons: + self._printPolygons(mesh, sprite) + + if doPrintEdges: + self._printEdges(mesh, sprite, showHiddenEdges) + + sprite.nextFrame() + i = self.movie.add(sprite) + # Remove the instance the next time + self.sprite = i + if self.animation: + self.movie.nextFrame() + + ## + # Private Methods + # + + def _calcCanvasCoord(self, v): + """Convert vertex in scene coordinates to canvas coordinates. + """ + + pt = Vector([0, 0, 0]) + + mW = float(self.canvasSize[0]) / 2.0 + mH = float(self.canvasSize[1]) / 2.0 + + # rescale to canvas size + pt[0] = v.co[0] * mW + mW + pt[1] = v.co[1] * mH + mH + pt[2] = v.co[2] + + # For now we want (0,0) in the top-left corner of the canvas. + # Mirror and translate along y + pt[1] *= -1 + pt[1] += self.canvasSize[1] + + return pt + + def _printPolygons(self, mesh, sprite): + """Print the selected (visible) polygons. + """ + + if len(mesh.faces) == 0: + return + + for face in mesh.faces: + if not face.sel: + continue + + if face.col: + fcol = face.col[0] + color = [fcol.r, fcol.g, fcol.b, fcol.a] + else: + color = [255, 255, 255, 255] + + s = SWFShape() + f = s.addFill(color[0], color[1], color[2], color[3]) + s.setRightFill(f) + + # The starting point of the shape + p0 = self._calcCanvasCoord(face.verts[0]) + s.movePenTo(p0[0], p0[1]) + + for v in face.verts[1:]: + p = self._calcCanvasCoord(v) + s.drawLineTo(p[0], p[1]) + + # Closing the shape + s.drawLineTo(p0[0], p0[1]) + + s.end() + sprite.add(s) + + def _printEdges(self, mesh, sprite, showHiddenEdges=False): + """Print the wireframe using mesh edges. + """ + + stroke_width = config.edges['WIDTH'] + stroke_col = config.edges['COLOR'] + + s = SWFShape() + + for e in mesh.edges: + + # Next, we set the line width and color for our shape. + s.setLine(stroke_width, stroke_col[0], stroke_col[1], stroke_col[2], + 255) + + if e.sel == 0: + if showHiddenEdges == False: + continue + else: + # SWF does not support dashed lines natively, so -for now- + # draw hidden lines thinner and half-trasparent + s.setLine(stroke_width / 2, stroke_col[0], stroke_col[1], + stroke_col[2], 128) + + p1 = self._calcCanvasCoord(e.v1) + p2 = self._calcCanvasCoord(e.v2) + + s.movePenTo(p1[0], p1[1]) + s.drawLineTo(p2[0], p2[1]) + + s.end() + sprite.add(s) + + +## PDF Writer + +try: + from reportlab.pdfgen import canvas + PDFSupported = True +except: + PDFSupported = False + + +class PDFVectorWriter(VectorWriter): + """A concrete class for writing PDF output. + """ + + def __init__(self, fileName): + """Simply call the parent Contructor. + """ + VectorWriter.__init__(self, fileName) + + self.canvas = None + + ## + # Public Methods + # + + def open(self, startFrame=1, endFrame=1): + """Do some initialization operations. + """ + VectorWriter.open(self, startFrame, endFrame) + size = (self.canvasSize[0], self.canvasSize[1]) + self.canvas = canvas.Canvas(self.outputFileName, pagesize=size, bottomup=0) + + def close(self): + """Do some finalization operation. + """ + self.canvas.save() + + # remember to call the close method of the parent + VectorWriter.close(self) + + def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, + showHiddenEdges=False): + """Convert the scene representation to SVG. + """ + context = scene.getRenderingContext() + framenumber = context.currentFrame() + + Objects = scene.objects + + for obj in Objects: + + if(obj.getType() != 'Mesh'): + continue + + mesh = obj.getData(mesh=1) + + if doPrintPolygons: + self._printPolygons(mesh) + + if doPrintEdges: + self._printEdges(mesh, showHiddenEdges) + + self.canvas.showPage() + + ## + # Private Methods + # + + def _calcCanvasCoord(self, v): + """Convert vertex in scene coordinates to canvas coordinates. + """ + + pt = Vector([0, 0, 0]) + + mW = float(self.canvasSize[0]) / 2.0 + mH = float(self.canvasSize[1]) / 2.0 + + # rescale to canvas size + pt[0] = v.co[0] * mW + mW + pt[1] = v.co[1] * mH + mH + pt[2] = v.co[2] + + # For now we want (0,0) in the top-left corner of the canvas. + # Mirror and translate along y + pt[1] *= -1 + pt[1] += self.canvasSize[1] + + return pt + + def _printPolygons(self, mesh): + """Print the selected (visible) polygons. + """ + + if len(mesh.faces) == 0: + return + + for face in mesh.faces: + if not face.sel: + continue + + if face.col: + fcol = face.col[0] + color = [fcol.r / 255.0, fcol.g / 255.0, fcol.b / 255.0, + fcol.a / 255.0] + else: + color = [1, 1, 1, 1] + + self.canvas.setFillColorRGB(color[0], color[1], color[2]) + # For debug + self.canvas.setStrokeColorRGB(0, 0, 0) + + path = self.canvas.beginPath() + + # The starting point of the path + p0 = self._calcCanvasCoord(face.verts[0]) + path.moveTo(p0[0], p0[1]) + + for v in face.verts[1:]: + p = self._calcCanvasCoord(v) + path.lineTo(p[0], p[1]) + + # Closing the shape + path.close() + + self.canvas.drawPath(path, stroke=0, fill=1) + + def _printEdges(self, mesh, showHiddenEdges=False): + """Print the wireframe using mesh edges. + """ + + stroke_width = config.edges['WIDTH'] + stroke_col = config.edges['COLOR'] + + self.canvas.setLineCap(1) + self.canvas.setLineJoin(1) + self.canvas.setLineWidth(stroke_width) + self.canvas.setStrokeColorRGB(stroke_col[0] / 255.0, stroke_col[1] / 255.0, + stroke_col[2] / 255) + + for e in mesh.edges: + + self.canvas.setLineWidth(stroke_width) + + if e.sel == 0: + if showHiddenEdges == False: + continue + else: + # PDF does not support dashed lines natively, so -for now- + # draw hidden lines thinner + self.canvas.setLineWidth(stroke_width / 2.0) + + p1 = self._calcCanvasCoord(e.v1) + p2 = self._calcCanvasCoord(e.v2) + + self.canvas.line(p1[0], p1[1], p2[0], p2[1]) + +# --------------------------------------------------------------------- +# +## Rendering Classes +# +# --------------------------------------------------------------------- + +# A dictionary to collect different shading style methods +shadingStyles = dict() +shadingStyles['FLAT'] = None +shadingStyles['TOON'] = None + +# A dictionary to collect different edge style methods +edgeStyles = dict() +edgeStyles['MESH'] = MeshUtils.isMeshEdge +edgeStyles['SILHOUETTE'] = MeshUtils.isSilhouetteEdge + +# A dictionary to collect the supported output formats +outputWriters = dict() +outputWriters['SVG'] = SVGVectorWriter +if SWFSupported: + outputWriters['SWF'] = SWFVectorWriter +if PDFSupported: + outputWriters['PDF'] = PDFVectorWriter + + +class Renderer: + """Render a scene viewed from the active camera. + + This class is responsible of the rendering process, transformation and + projection of the objects in the scene are invoked by the renderer. + + The rendering is done using the active camera for the current scene. + """ + + def __init__(self): + """Make the rendering process only for the current scene by default. + + We will work on a copy of the scene, to be sure that the current scene do + not get modified in any way. + """ + + # Render the current Scene, this should be a READ-ONLY property + self._SCENE = Scene.GetCurrent() + + # Use the aspect ratio of the scene rendering context + context = self._SCENE.getRenderingContext() + + aspect_ratio = float(context.imageSizeX()) / float(context.imageSizeY()) + self.canvasRatio = (float(context.aspectRatioX()) * aspect_ratio, + float(context.aspectRatioY()) + ) + + # Render from the currently active camera + #self.cameraObj = self._SCENE.objects.camera + + self.lights = [] + + ## + # Public Methods + # + + def doRendering(self, outputWriter, animation=False): + """Render picture or animation and write it out. + + The parameters are: + - a Vector writer object that will be used to output the result. + - a flag to tell if we want to render an animation or only the + current frame. + """ + + context = self._SCENE.getRenderingContext() + origCurrentFrame = context.currentFrame() + + # Handle the animation case + if not animation: + startFrame = origCurrentFrame + endFrame = startFrame + outputWriter.open() + else: + startFrame = context.startFrame() + endFrame = context.endFrame() + outputWriter.open(startFrame, endFrame) + + # Do the rendering process frame by frame + print "Start Rendering of %d frames" % (endFrame - startFrame + 1) + for f in xrange(startFrame, endFrame + 1): + print "\n\nFrame: %d" % f + + # FIXME To get the correct camera position we have to use +1 here. + # Is there a bug somewhere in the Scene module? + context.currentFrame(f + 1) + self.cameraObj = self._SCENE.objects.camera + + # Use some temporary workspace, a full copy of the scene + inputScene = self._SCENE.copy(2) + + # To get the objects at this frame remove the +1 ... + ctx = inputScene.getRenderingContext() + ctx.currentFrame(f) + + # Get a projector for this camera. + # NOTE: the projector wants object in world coordinates, + # so we should remember to apply modelview transformations + # _before_ we do projection transformations. + self.proj = Projector(self.cameraObj, self.canvasRatio) + + try: + renderedScene = self.doRenderScene(inputScene) + except: + print "There was an error! Aborting." + import traceback + print traceback.print_exc() + + self._SCENE.makeCurrent() + Scene.Unlink(inputScene) + del inputScene + return + + outputWriter.printCanvas(renderedScene, + doPrintPolygons=config.polygons['SHOW'], + doPrintEdges=config.edges['SHOW'], + showHiddenEdges=config.edges['SHOW_HIDDEN']) + + # delete the rendered scene + self._SCENE.makeCurrent() + Scene.Unlink(renderedScene) + del renderedScene + + outputWriter.close() + print "Done!" + context.currentFrame(origCurrentFrame) + + def doRenderScene(self, workScene): + """Control the rendering process. + + Here we control the entire rendering process invoking the operation + needed to transform and project the 3D scene in two dimensions. + """ + + # global processing of the scene + + self._filterHiddenObjects(workScene) + + self._buildLightSetup(workScene) + + self._doSceneClipping(workScene) + + self._doConvertGeometricObjsToMesh(workScene) + + if config.output['JOIN_OBJECTS']: + self._joinMeshObjectsInScene(workScene) + + self._doSceneDepthSorting(workScene) + + # Per object activities + + Objects = workScene.objects + + print "Total Objects: %d" % len(Objects) + for i, obj in enumerate(Objects): + print "\n\n-------" + print "Rendering Object: %d" % i + + if obj.getType() != 'Mesh': + print "Only Mesh supported! - Skipping type:", obj.getType() + continue + + print "Rendering: ", obj.getName() + + mesh = obj.getData(mesh=1) + + self._doModelingTransformation(mesh, obj.matrix) + + self._doBackFaceCulling(mesh) + + # When doing HSR with NEWELL we may want to flip all normals + # toward the viewer + if config.polygons['HSR'] == "NEWELL": + for f in mesh.faces: + f.sel = 1 - f.sel + mesh.flipNormals() + for f in mesh.faces: + f.sel = 1 + + self._doLighting(mesh) + + # Do "projection" now so we perform further processing + # in Normalized View Coordinates + self._doProjection(mesh, self.proj) + + self._doViewFrustumClipping(mesh) + + self._doHiddenSurfaceRemoval(mesh) + + self._doEdgesStyle(mesh, edgeStyles[config.edges['STYLE']]) + + # Update the object data, important! :) + mesh.update() + + return workScene + + ## + # Private Methods + # + + # Utility methods + + def _getObjPosition(self, obj): + """Return the obj position in World coordinates. + """ + return obj.matrix.translationPart() + + def _cameraViewVector(self): + """Get the View Direction form the camera matrix. + """ + return Vector(self.cameraObj.matrix[2]).resize3D() + + # Faces methods + + def _isFaceVisible(self, face): + """Determine if a face of an object is visible from the current camera. + + The view vector is calculated from the camera location and one of the + vertices of the face (expressed in World coordinates, after applying + modelview transformations). + + After those transformations we determine if a face is visible by + computing the angle between the face normal and the view vector, this + angle has to be between -90 and 90 degrees for the face to be visible. + This corresponds somehow to the dot product between the two, if it + results > 0 then the face is visible. + + There is no need to normalize those vectors since we are only interested in + the sign of the cross product and not in the product value. + + NOTE: here we assume the face vertices are in WorldCoordinates, so + please transform the object _before_ doing the test. + """ + + normal = Vector(face.no) + camPos = self._getObjPosition(self.cameraObj) + view_vect = None + + # View Vector in orthographics projections is the view Direction of + # the camera + if self.cameraObj.data.getType() == 1: + view_vect = self._cameraViewVector() + + # View vector in perspective projections can be considered as + # the difference between the camera position and one point of + # the face, we choose the farthest point from the camera. + if self.cameraObj.data.getType() == 0: + vv = max([((camPos - Vector(v.co)).length, (camPos - Vector(v.co))) for v in face]) + view_vect = vv[1] + + # if d > 0 the face is visible from the camera + d = view_vect * normal + + if d > 0: + return True + else: + return False + + # Scene methods + + def _filterHiddenObjects(self, scene): + """Discard object that are on hidden layers in the scene. + """ + + Objects = scene.objects + + visible_obj_list = [obj for obj in Objects if + set(obj.layers).intersection(set(scene.getLayers()))] + + for o in Objects: + if o not in visible_obj_list: + scene.objects.unlink(o) + + scene.update() + + def _buildLightSetup(self, scene): + # Get the list of lighting sources + obj_lst = scene.objects + self.lights = [o for o in obj_lst if o.getType() == 'Lamp'] + + # When there are no lights we use a default lighting source + # that have the same position of the camera + if len(self.lights) == 0: + l = Lamp.New('Lamp') + lobj = Object.New('Lamp') + lobj.loc = self.cameraObj.loc + lobj.link(l) + self.lights.append(lobj) + + def _doSceneClipping(self, scene): + """Clip whole objects against the View Frustum. + + For now clip away only objects according to their center position. + """ + + cam_pos = self._getObjPosition(self.cameraObj) + view_vect = self._cameraViewVector() + + near = self.cameraObj.data.clipStart + far = self.cameraObj.data.clipEnd + + aspect = float(self.canvasRatio[0]) / float(self.canvasRatio[1]) + fovy = atan(0.5 / aspect / (self.cameraObj.data.lens / 32)) + fovy = fovy * 360.0 / pi + + Objects = scene.objects + + for o in Objects: + if o.getType() != 'Mesh': + continue + + """ + obj_vect = Vector(cam_pos) - self._getObjPosition(o) + + d = obj_vect*view_vect + theta = AngleBetweenVecs(obj_vect, view_vect) + + # if the object is outside the view frustum, clip it away + if (d < near) or (d > far) or (theta > fovy): + scene.objects.unlink(o) + """ + + # Use the object bounding box + # (whose points are already in WorldSpace Coordinate) + + bb = o.getBoundBox() + + points_outside = 0 + for p in bb: + p_vect = Vector(cam_pos) - Vector(p) + + d = p_vect * view_vect + theta = AngleBetweenVecs(p_vect, view_vect) + + # Is this point outside the view frustum? + if (d < near) or (d > far) or (theta > fovy): + points_outside += 1 + + # If the bb is all outside the view frustum we clip the whole + # object away + if points_outside == len(bb): + scene.objects.unlink(o) + + def _doConvertGeometricObjsToMesh(self, scene): + """Convert all "geometric" objects to mesh ones. + """ + geometricObjTypes = ['Mesh', 'Surf', 'Curve', 'Text'] + #geometricObjTypes = ['Mesh', 'Surf', 'Curve'] + + Objects = scene.objects + + objList = [o for o in Objects if o.getType() in geometricObjTypes] + for obj in objList: + old_obj = obj + obj = self._convertToRawMeshObj(obj) + scene.objects.link(obj) + scene.objects.unlink(old_obj) + + # XXX Workaround for Text and Curve which have some normals + # inverted when they are converted to Mesh, REMOVE that when + # blender will fix that!! + if old_obj.getType() in ['Curve', 'Text']: + me = obj.getData(mesh=1) + + for f in me.faces: + f.sel = 1 + for v in me.verts: + v.sel = 1 + + me.remDoubles(0) + me.triangleToQuad() + me.recalcNormals() + me.update() + + def _doSceneDepthSorting(self, scene): + """Sort objects in the scene. + + The object sorting is done accordingly to the object centers. + """ + + c = self._getObjPosition(self.cameraObj) + + by_obj_center_pos = (lambda o1, o2: + (o1.getType() == 'Mesh' and o2.getType() == 'Mesh') and + cmp((self._getObjPosition(o1) - Vector(c)).length, + (self._getObjPosition(o2) - Vector(c)).length) + ) + + # Implement sorting by bounding box, the object with the bb + # nearest to the camera should be drawn as last. + by_nearest_bbox_point = (lambda o1, o2: + (o1.getType() == 'Mesh' and o2.getType() == 'Mesh') and + cmp(min([(Vector(p) - Vector(c)).length for p in o1.getBoundBox()]), + min([(Vector(p) - Vector(c)).length for p in o2.getBoundBox()]) + ) + ) + + Objects = list(scene.objects) + + #Objects.sort(by_obj_center_pos) + Objects.sort(by_nearest_bbox_point) + + # update the scene + for o in Objects: + scene.objects.unlink(o) + scene.objects.link(o) + + def _joinMeshObjectsInScene(self, scene): + """Merge all the Mesh Objects in a scene into a single Mesh Object. + """ + + oList = [o for o in scene.objects if o.getType() == 'Mesh'] + + # FIXME: Object.join() do not work if the list contains 1 object + if len(oList) == 1: + return + + mesh = Mesh.New('BigOne') + bigObj = Object.New('Mesh', 'BigOne') + bigObj.link(mesh) + + scene.objects.link(bigObj) + + try: + bigObj.join(oList) + except RuntimeError: + print "\nWarning! - Can't Join Objects\n" + scene.objects.unlink(bigObj) + return + except TypeError: + print "Objects Type error?" + + for o in oList: + scene.objects.unlink(o) + + scene.update() + + # Per object/mesh methods + + def _convertToRawMeshObj(self, object): + """Convert geometry based object to a mesh object. + """ + me = Mesh.New('RawMesh_' + object.name) + me.getFromObject(object.name) + + newObject = Object.New('Mesh', 'RawMesh_' + object.name) + newObject.link(me) + + # If the object has no materials set a default material + if not me.materials: + me.materials = [Material.New()] + #for f in me.faces: f.mat = 0 + + newObject.setMatrix(object.getMatrix()) + + return newObject + + def _doModelingTransformation(self, mesh, matrix): + """Transform object coordinates to world coordinates. + + This step is done simply applying to the object its tranformation + matrix and recalculating its normals. + """ + # XXX FIXME: blender do not transform normals in the right way when + # there are negative scale values + if matrix[0][0] < 0 or matrix[1][1] < 0 or matrix[2][2] < 0: + print "WARNING: Negative scales, expect incorrect results!" + + mesh.transform(matrix, True) + + def _doBackFaceCulling(self, mesh): + """Simple Backface Culling routine. + + At this level we simply do a visibility test face by face and then + select the vertices belonging to visible faces. + """ + + # Select all vertices, so edges can be displayed even if there are no + # faces + for v in mesh.verts: + v.sel = 1 + + Mesh.Mode(Mesh.SelectModes['FACE']) + # Loop on faces + for f in mesh.faces: + f.sel = 0 + if self._isFaceVisible(f): + f.sel = 1 + + def _doLighting(self, mesh): + """Apply an Illumination and shading model to the object. + + The model used is the Phong one, it may be inefficient, + but I'm just learning about rendering and starting from Phong seemed + the most natural way. + """ + + # If the mesh has vertex colors already, use them, + # otherwise turn them on and do some calculations + if mesh.vertexColors: + return + mesh.vertexColors = 1 + + materials = mesh.materials + + camPos = self._getObjPosition(self.cameraObj) + + # We do per-face color calculation (FLAT Shading), we can easily turn + # to a per-vertex calculation if we want to implement some shading + # technique. For an example see: + # http://www.miralab.unige.ch/papers/368.pdf + for f in mesh.faces: + if not f.sel: + continue + + mat = None + if materials: + mat = materials[f.mat] + + # A new default material + if mat == None: + mat = Material.New('defMat') + + # Check if it is a shadeless material + elif mat.getMode() & Material.Modes['SHADELESS']: + I = mat.getRGBCol() + # Convert to a value between 0 and 255 + tmp_col = [int(c * 255.0) for c in I] + + for c in f.col: + c.r = tmp_col[0] + c.g = tmp_col[1] + c.b = tmp_col[2] + #c.a = tmp_col[3] + + continue + + # do vertex color calculation + + TotDiffSpec = Vector([0.0, 0.0, 0.0]) + + for l in self.lights: + light_obj = l + light_pos = self._getObjPosition(l) + light = light_obj.getData() + + L = Vector(light_pos).normalize() + + V = (Vector(camPos) - Vector(f.cent)).normalize() + + N = Vector(f.no).normalize() + + if config.polygons['SHADING'] == 'TOON': + NL = ShadingUtils.toonShading(N * L) + else: + NL = (N * L) + + # Should we use NL instead of (N*L) here? + R = 2 * (N * L) * N - L + + Ip = light.getEnergy() + + # Diffuse co-efficient + kd = mat.getRef() * Vector(mat.getRGBCol()) + for i in [0, 1, 2]: + kd[i] *= light.col[i] + + Idiff = Ip * kd * max(0, NL) + + # Specular component + ks = mat.getSpec() * Vector(mat.getSpecCol()) + ns = mat.getHardness() + Ispec = Ip * ks * pow(max(0, (V * R)), ns) + + TotDiffSpec += (Idiff + Ispec) + + # Ambient component + Iamb = Vector(Blender.World.Get()[0].getAmb()) + ka = mat.getAmb() + + # Emissive component (convert to a triplet) + ki = Vector([mat.getEmit()] * 3) + + #I = ki + Iamb + (Idiff + Ispec) + I = ki + (ka * Iamb) + TotDiffSpec + + # Set Alpha component + I = list(I) + I.append(mat.getAlpha()) + + # Clamp I values between 0 and 1 + I = [min(c, 1) for c in I] + I = [max(0, c) for c in I] + + # Convert to a value between 0 and 255 + tmp_col = [int(c * 255.0) for c in I] + + for c in f.col: + c.r = tmp_col[0] + c.g = tmp_col[1] + c.b = tmp_col[2] + c.a = tmp_col[3] + + def _doProjection(self, mesh, projector): + """Apply Viewing and Projection tranformations. + """ + + for v in mesh.verts: + p = projector.doProjection(v.co[:]) + v.co[0] = p[0] + v.co[1] = p[1] + v.co[2] = p[2] + + #mesh.recalcNormals() + #mesh.update() + + # We could reeset Camera matrix, since now + # we are in Normalized Viewing Coordinates, + # but doung that would affect World Coordinate + # processing for other objects + + #self.cameraObj.data.type = 1 + #self.cameraObj.data.scale = 2.0 + #m = Matrix().identity() + #self.cameraObj.setMatrix(m) + + def _doViewFrustumClipping(self, mesh): + """Clip faces against the View Frustum. + """ + + # The Canonical View Volume, 8 vertices, and 6 faces, + # We consider its face normals pointing outside + + v1 = NMesh.Vert(1, 1, -1) + v2 = NMesh.Vert(1, -1, -1) + v3 = NMesh.Vert(-1, -1, -1) + v4 = NMesh.Vert(-1, 1, -1) + v5 = NMesh.Vert(1, 1, 1) + v6 = NMesh.Vert(1, -1, 1) + v7 = NMesh.Vert(-1, -1, 1) + v8 = NMesh.Vert(-1, 1, 1) + + cvv = [] + f1 = NMesh.Face([v1, v4, v3, v2]) + cvv.append(f1) + f2 = NMesh.Face([v5, v6, v7, v8]) + cvv.append(f2) + f3 = NMesh.Face([v1, v2, v6, v5]) + cvv.append(f3) + f4 = NMesh.Face([v2, v3, v7, v6]) + cvv.append(f4) + f5 = NMesh.Face([v3, v4, v8, v7]) + cvv.append(f5) + f6 = NMesh.Face([v4, v1, v5, v8]) + cvv.append(f6) + + nmesh = NMesh.GetRaw(mesh.name) + clippedfaces = nmesh.faces[:] + facelist = clippedfaces[:] + + for clipface in cvv: + + clippedfaces = [] + + for f in facelist: + + #newfaces = HSR.splitOn(clipface, f, return_positive_faces=False) + newfaces = None + + if not newfaces: + # Check if the face is all outside the view frustum + # TODO: Do this test before, it is more efficient + points_outside = 0 + for v in f: + if abs(v[0]) > (1 - EPS) or abs(v[1]) > (1 - EPS) or abs(v[2]) > (1 - EPS): + points_outside += 1 + + if points_outside != len(f): + clippedfaces.append(f) + else: + for nf in newfaces: + for v in nf: + nmesh.verts.append(v) + + nf.mat = f.mat + nf.sel = f.sel + nf.col = [f.col[0]] * len(nf.v) + + clippedfaces.append(nf) + facelist = clippedfaces[:] + + nmesh.faces = facelist + nmesh.update() + + # HSR routines + def __simpleDepthSort(self, mesh): + """Sort faces by the furthest vertex. + + This simple mesthod is known also as the painter algorithm, and it + solves HSR correctly only for convex meshes. + """ + + #global progress + + # The sorting requires circa n*log(n) steps + n = len(mesh.faces) + progress.setActivity("HSR: Painter", n * log(n)) + + by_furthest_z = (lambda f1, f2: progress.update() and + cmp(max([v.co[2] for v in f1]), max([v.co[2] for v in f2]) + EPS) + ) + + # FIXME: using NMesh to sort faces. We should avoid that! + nmesh = NMesh.GetRaw(mesh.name) + + # remember that _higher_ z values mean further points + nmesh.faces.sort(by_furthest_z) + nmesh.faces.reverse() + + nmesh.update() + + def __newellDepthSort(self, mesh): + """Newell's depth sorting. + + """ + + #global progress + + # Find non planar quads and convert them to triangle + #for f in mesh.faces: + # f.sel = 0 + # if is_nonplanar_quad(f.v): + # print "NON QUAD??" + # f.sel = 1 + + # Now reselect all faces + for f in mesh.faces: + f.sel = 1 + mesh.quadToTriangle() + + # FIXME: using NMesh to sort faces. We should avoid that! + nmesh = NMesh.GetRaw(mesh.name) + + # remember that _higher_ z values mean further points + nmesh.faces.sort(by_furthest_z) + nmesh.faces.reverse() + + # Begin depth sort tests + + # use the smooth flag to set marked faces + for f in nmesh.faces: + f.smooth = 0 + + facelist = nmesh.faces[:] + maplist = [] + + # The steps are _at_least_ equal to len(facelist), we do not count the + # feces coming out from splitting!! + progress.setActivity("HSR: Newell", len(facelist)) + #progress.setQuiet(True) + + while len(facelist): + debug("\n----------------------\n") + debug("len(facelits): %d\n" % len(facelist)) + P = facelist[0] + + pSign = sign(P.normal[2]) + + # We can discard faces parallel to the view vector + #if P.normal[2] == 0: + # facelist.remove(P) + # continue + + split_done = 0 + face_marked = 0 + + for Q in facelist[1:]: + + debug("P.smooth: " + str(P.smooth) + "\n") + debug("Q.smooth: " + str(Q.smooth) + "\n") + debug("\n") + + qSign = sign(Q.normal[2]) + # TODO: check also if Q is parallel?? + + # Test 0: We need to test only those Qs whose furthest vertex + # is closer to the observer than the closest vertex of P. + + zP = [v.co[2] for v in P.v] + zQ = [v.co[2] for v in Q.v] + notZOverlap = min(zP) > max(zQ) + EPS + + if notZOverlap: + debug("\nTest 0\n") + debug("NOT Z OVERLAP!\n") + if Q.smooth == 0: + # If Q is not marked then we can safely print P + break + else: + debug("met a marked face\n") + continue + + # Test 1: X extent overlapping + xP = [v.co[0] for v in P.v] + xQ = [v.co[0] for v in Q.v] + #notXOverlap = (max(xP) <= min(xQ)) or (max(xQ) <= min(xP)) + notXOverlap = min(xQ) >= (max(xP) - EPS) or min(xP) >= (max(xQ) - EPS) + + if notXOverlap: + debug("\nTest 1\n") + debug("NOT X OVERLAP!\n") + continue + + # Test 2: Y extent Overlapping + yP = [v.co[1] for v in P.v] + yQ = [v.co[1] for v in Q.v] + #notYOverlap = max(yP) <= min(yQ) or max(yQ) <= min(yP) + notYOverlap = min(yQ) >= (max(yP) - EPS) or min(yP) >= (max(yQ) - EPS) + + if notYOverlap: + debug("\nTest 2\n") + debug("NOT Y OVERLAP!\n") + continue + + # Test 3: P vertices are all behind the plane of Q + n = 0 + for Pi in P: + d = qSign * HSR.Distance(Vector(Pi), Q) + if d <= EPS: + n += 1 + pVerticesBehindPlaneQ = (n == len(P)) + + if pVerticesBehindPlaneQ: + debug("\nTest 3\n") + debug("P BEHIND Q!\n") + continue + + # Test 4: Q vertices in front of the plane of P + n = 0 + for Qi in Q: + d = pSign * HSR.Distance(Vector(Qi), P) + if d >= -EPS: + n += 1 + qVerticesInFrontPlaneP = (n == len(Q)) + + if qVerticesInFrontPlaneP: + debug("\nTest 4\n") + debug("Q IN FRONT OF P!\n") + continue + + # Test 5: Check if projections of polygons effectively overlap, + # in previous tests we checked only bounding boxes. + + #if not projectionsOverlap(P, Q): + if not (HSR.projectionsOverlap(P, Q) or HSR.projectionsOverlap(Q, P)): + debug("\nTest 5\n") + debug("Projections do not overlap!\n") + continue + + # We still can't say if P obscures Q. + + # But if Q is marked we do a face-split trying to resolve a + # difficulty (maybe a visibility cycle). + if Q.smooth == 1: + # Split P or Q + debug("Possibly a cycle detected!\n") + debug("Split here!!\n") + + facelist = HSR.facesplit(P, Q, facelist, nmesh) + split_done = 1 + break + + # The question now is: Does Q obscure P? + + # Test 3bis: Q vertices are all behind the plane of P + n = 0 + for Qi in Q: + d = pSign * HSR.Distance(Vector(Qi), P) + if d <= EPS: + n += 1 + qVerticesBehindPlaneP = (n == len(Q)) + + if qVerticesBehindPlaneP: + debug("\nTest 3bis\n") + debug("Q BEHIND P!\n") + + # Test 4bis: P vertices in front of the plane of Q + n = 0 + for Pi in P: + d = qSign * HSR.Distance(Vector(Pi), Q) + if d >= -EPS: + n += 1 + pVerticesInFrontPlaneQ = (n == len(P)) + + if pVerticesInFrontPlaneQ: + debug("\nTest 4bis\n") + debug("P IN FRONT OF Q!\n") + + # We don't even know if Q does obscure P, so they should + # intersect each other, split one of them in two parts. + if not qVerticesBehindPlaneP and not pVerticesInFrontPlaneQ: + debug("\nSimple Intersection?\n") + debug("Test 3bis or 4bis failed\n") + debug("Split here!!2\n") + + facelist = HSR.facesplit(P, Q, facelist, nmesh) + split_done = 1 + break + + facelist.remove(Q) + facelist.insert(0, Q) + Q.smooth = 1 + face_marked = 1 + debug("Q marked!\n") + break + + # Write P! + if split_done == 0 and face_marked == 0: + facelist.remove(P) + maplist.append(P) + dumpfaces(maplist, "dump" + str(len(maplist)).zfill(4) + ".svg") + + progress.update() + + if len(facelist) == 870: + dumpfaces([P, Q], "loopdebug.svg") + + #if facelist == None: + # maplist = [P, Q] + # print [v.co for v in P] + # print [v.co for v in Q] + # break + + # end of while len(facelist) + + nmesh.faces = maplist + #for f in nmesh.faces: + # f.sel = 1 + + nmesh.update() + + def _doHiddenSurfaceRemoval(self, mesh): + """Do HSR for the given mesh. + """ + if len(mesh.faces) == 0: + return + + if config.polygons['HSR'] == 'PAINTER': + print "\nUsing the Painter algorithm for HSR." + self.__simpleDepthSort(mesh) + + elif config.polygons['HSR'] == 'NEWELL': + print "\nUsing the Newell's algorithm for HSR." + self.__newellDepthSort(mesh) + + def _doEdgesStyle(self, mesh, edgestyleSelect): + """Process Mesh Edges accroding to a given selection style. + + Examples of algorithms: + + Contours: + given an edge if its adjacent faces have the same normal (that is + they are complanar), than deselect it. + + Silhouettes: + given an edge if one its adjacent faces is frontfacing and the + other is backfacing, than select it, else deselect. + """ + + Mesh.Mode(Mesh.SelectModes['EDGE']) + + edge_cache = MeshUtils.buildEdgeFaceUsersCache(mesh) + + for i, edge_faces in enumerate(edge_cache): + mesh.edges[i].sel = 0 + if edgestyleSelect(edge_faces): + mesh.edges[i].sel = 1 + + """ + for e in mesh.edges: + + e.sel = 0 + if edgestyleSelect(e, mesh): + e.sel = 1 + """ + # + + +# --------------------------------------------------------------------- +# +## GUI Class and Main Program +# +# --------------------------------------------------------------------- + +from Blender import BGL, Draw +from Blender.BGL import * + + +class GUI: + + def _init(): + + # Output Format menu + output_format = config.output['FORMAT'] + default_value = outputWriters.keys().index(output_format) + 1 + GUI.outFormatMenu = Draw.Create(default_value) + GUI.evtOutFormatMenu = 0 + + # Animation toggle button + GUI.animToggle = Draw.Create(config.output['ANIMATION']) + GUI.evtAnimToggle = 1 + + # Join Objects toggle button + GUI.joinObjsToggle = Draw.Create(config.output['JOIN_OBJECTS']) + GUI.evtJoinObjsToggle = 2 + + # Render filled polygons + GUI.polygonsToggle = Draw.Create(config.polygons['SHOW']) + + # Shading Style menu + shading_style = config.polygons['SHADING'] + default_value = shadingStyles.keys().index(shading_style) + 1 + GUI.shadingStyleMenu = Draw.Create(default_value) + GUI.evtShadingStyleMenu = 21 + + GUI.evtPolygonsToggle = 3 + # We hide the config.polygons['EXPANSION_TRICK'], for now + + # Render polygon edges + GUI.showEdgesToggle = Draw.Create(config.edges['SHOW']) + GUI.evtShowEdgesToggle = 4 + + # Render hidden edges + GUI.showHiddenEdgesToggle = Draw.Create(config.edges['SHOW_HIDDEN']) + GUI.evtShowHiddenEdgesToggle = 5 + + # Edge Style menu + edge_style = config.edges['STYLE'] + default_value = edgeStyles.keys().index(edge_style) + 1 + GUI.edgeStyleMenu = Draw.Create(default_value) + GUI.evtEdgeStyleMenu = 6 + + # Edge Width slider + GUI.edgeWidthSlider = Draw.Create(config.edges['WIDTH']) + GUI.evtEdgeWidthSlider = 7 + + # Edge Color Picker + c = config.edges['COLOR'] + GUI.edgeColorPicker = Draw.Create(c[0] / 255.0, c[1] / 255.0, c[2] / 255.0) + GUI.evtEdgeColorPicker = 71 + + # Render Button + GUI.evtRenderButton = 8 + + # Exit Button + GUI.evtExitButton = 9 + + # Save default button + GUI.evtSaveDefaultButton = 99 + + def draw(): + + # initialize static members + GUI._init() + + glClear(GL_COLOR_BUFFER_BIT) + glColor3f(0.0, 0.0, 0.0) + glRasterPos2i(10, 380) + Draw.Text("VRM: Vector Rendering Method script. Version %s." % + __version__) + glRasterPos2i(10, 365) + Draw.Text("%s (c) 2006, 2007" % __author__) + + glRasterPos2i(10, 335) + Draw.Text("Press Q or ESC to quit.") + + # Build the output format menu + glRasterPos2i(10, 310) + Draw.Text("Select the output Format:") + outMenuStruct = "Output Format %t" + for t in outputWriters.keys(): + outMenuStruct = outMenuStruct + "|%s" % t + GUI.outFormatMenu = Draw.Menu(outMenuStruct, GUI.evtOutFormatMenu, + 10, 285, 160, 18, GUI.outFormatMenu.val, "Choose the Output Format") + + # Animation toggle + GUI.animToggle = Draw.Toggle("Animation", GUI.evtAnimToggle, + 10, 260, 160, 18, GUI.animToggle.val, + "Toggle rendering of animations") + + # Join Objects toggle + GUI.joinObjsToggle = Draw.Toggle("Join objects", GUI.evtJoinObjsToggle, + 10, 235, 160, 18, GUI.joinObjsToggle.val, + "Join objects in the rendered file") + + # Render Button + Draw.Button("Render", GUI.evtRenderButton, 10, 210 - 25, 75, 25 + 18, + "Start Rendering") + Draw.Button("Exit", GUI.evtExitButton, 95, 210 - 25, 75, 25 + 18, "Exit!") + + Draw.Button("Save settings as default", GUI.evtSaveDefaultButton, 10, 210 - 50, 160, 18, + "Save settings as default") + + # Rendering Styles + glRasterPos2i(200, 310) + Draw.Text("Rendering Style:") + + # Render Polygons + GUI.polygonsToggle = Draw.Toggle("Filled Polygons", GUI.evtPolygonsToggle, + 200, 285, 160, 18, GUI.polygonsToggle.val, + "Render filled polygons") + + if GUI.polygonsToggle.val == 1: + + # Polygon Shading Style + shadingStyleMenuStruct = "Shading Style %t" + for t in shadingStyles.keys(): + shadingStyleMenuStruct = shadingStyleMenuStruct + "|%s" % t.lower() + GUI.shadingStyleMenu = Draw.Menu(shadingStyleMenuStruct, GUI.evtShadingStyleMenu, + 200, 260, 160, 18, GUI.shadingStyleMenu.val, + "Choose the shading style") + + # Render Edges + GUI.showEdgesToggle = Draw.Toggle("Show Edges", GUI.evtShowEdgesToggle, + 200, 235, 160, 18, GUI.showEdgesToggle.val, + "Render polygon edges") + + if GUI.showEdgesToggle.val == 1: + + # Edge Style + edgeStyleMenuStruct = "Edge Style %t" + for t in edgeStyles.keys(): + edgeStyleMenuStruct = edgeStyleMenuStruct + "|%s" % t.lower() + GUI.edgeStyleMenu = Draw.Menu(edgeStyleMenuStruct, GUI.evtEdgeStyleMenu, + 200, 210, 160, 18, GUI.edgeStyleMenu.val, + "Choose the edge style") + + # Edge size + GUI.edgeWidthSlider = Draw.Slider("Width: ", GUI.evtEdgeWidthSlider, + 200, 185, 140, 18, GUI.edgeWidthSlider.val, + 0.0, 10.0, 0, "Change Edge Width") + + # Edge Color + GUI.edgeColorPicker = Draw.ColorPicker(GUI.evtEdgeColorPicker, + 342, 185, 18, 18, GUI.edgeColorPicker.val, "Choose Edge Color") + + # Show Hidden Edges + GUI.showHiddenEdgesToggle = Draw.Toggle("Show Hidden Edges", + GUI.evtShowHiddenEdgesToggle, + 200, 160, 160, 18, GUI.showHiddenEdgesToggle.val, + "Render hidden edges as dashed lines") + + def event(evt, val): + + if evt == Draw.ESCKEY or evt == Draw.QKEY: + Draw.Exit() + else: + return + + Draw.Redraw(1) + + def button_event(evt): + + if evt == GUI.evtExitButton: + Draw.Exit() + + elif evt == GUI.evtOutFormatMenu: + i = GUI.outFormatMenu.val - 1 + config.output['FORMAT'] = outputWriters.keys()[i] + # Set the new output file + global outputfile + outputfile = Blender.sys.splitext(basename)[0] + "." + str(config.output['FORMAT']).lower() + + elif evt == GUI.evtAnimToggle: + config.output['ANIMATION'] = bool(GUI.animToggle.val) + + elif evt == GUI.evtJoinObjsToggle: + config.output['JOIN_OBJECTS'] = bool(GUI.joinObjsToggle.val) + + elif evt == GUI.evtPolygonsToggle: + config.polygons['SHOW'] = bool(GUI.polygonsToggle.val) + + elif evt == GUI.evtShadingStyleMenu: + i = GUI.shadingStyleMenu.val - 1 + config.polygons['SHADING'] = shadingStyles.keys()[i] + + elif evt == GUI.evtShowEdgesToggle: + config.edges['SHOW'] = bool(GUI.showEdgesToggle.val) + + elif evt == GUI.evtShowHiddenEdgesToggle: + config.edges['SHOW_HIDDEN'] = bool(GUI.showHiddenEdgesToggle.val) + + elif evt == GUI.evtEdgeStyleMenu: + i = GUI.edgeStyleMenu.val - 1 + config.edges['STYLE'] = edgeStyles.keys()[i] + + elif evt == GUI.evtEdgeWidthSlider: + config.edges['WIDTH'] = float(GUI.edgeWidthSlider.val) + + elif evt == GUI.evtEdgeColorPicker: + config.edges['COLOR'] = [int(c * 255.0) for c in GUI.edgeColorPicker.val] + + elif evt == GUI.evtRenderButton: + label = "Save %s" % config.output['FORMAT'] + # Show the File Selector + global outputfile + Blender.Window.FileSelector(vectorize, label, outputfile) + + elif evt == GUI.evtSaveDefaultButton: + config.saveToRegistry() + + else: + print "Event: %d not handled!" % evt + + if evt: + Draw.Redraw(1) + #GUI.conf_debug() + + def conf_debug(): + from pprint import pprint + print "\nConfig" + pprint(config.output) + pprint(config.polygons) + pprint(config.edges) + + _init = staticmethod(_init) + draw = staticmethod(draw) + event = staticmethod(event) + button_event = staticmethod(button_event) + conf_debug = staticmethod(conf_debug) + + +# A wrapper function for the vectorizing process +def vectorize(filename): + """The vectorizing process is as follows: + + - Instanciate the writer and the renderer + - Render! + """ + + if filename == "": + print "\nERROR: invalid file name!" + return + + from Blender import Window + editmode = Window.EditMode() + if editmode: + Window.EditMode(0) + + actualWriter = outputWriters[config.output['FORMAT']] + writer = actualWriter(filename) + + renderer = Renderer() + renderer.doRendering(writer, config.output['ANIMATION']) + + if editmode: + Window.EditMode(1) - Blender.Scene.unlink(flatScene) - del flatScene # Here the main if __name__ == "__main__": - # with this trick we can run the script in batch mode - try: - Blender.Window.FileSelector (vectorize, 'Save SVG', "proba.svg") - except: - vectorize("proba.svg") + global progress + + config.loadFromRegistry() + + # initialize writer setting also here to configure writer specific + # settings on startup + actualWriter = outputWriters[config.output['FORMAT']] + writer = actualWriter("") + + outputfile = "" + basename = Blender.sys.basename(Blender.Get('filename')) + if basename != "": + outputfile = Blender.sys.splitext(basename)[0] + "." + str(config.output['FORMAT']).lower() + + if Blender.mode == 'background': + progress = ConsoleProgressIndicator() + vectorize(outputfile) + else: + progress = GraphicalProgressIndicator() + Draw.Register(GUI.draw, GUI.event, GUI.button_event)