+ The rendering is done using the active camera for the current scene.
+ """
+
+ def __init__(self):
+ """Make the rendering process only for the current scene by default.
+
+ We will work on a copy of the scene, to be sure that the current scene do
+ not get modified in any way.
+ """
+
+ # Render the current Scene, this should be a READ-ONLY property
+ self._SCENE = Scene.GetCurrent()
+
+ # Use the aspect ratio of the scene rendering context
+ context = self._SCENE.getRenderingContext()
+
+ aspect_ratio = float(context.imageSizeX())/float(context.imageSizeY())
+ self.canvasRatio = (float(context.aspectRatioX())*aspect_ratio,
+ float(context.aspectRatioY())
+ )
+
+ # Render from the currently active camera
+ #self.cameraObj = self._SCENE.getCurrentCamera()
+
+ # Get the list of lighting sources
+ obj_lst = self._SCENE.getChildren()
+ self.lights = [ o for o in obj_lst if o.getType() == 'Lamp']
+
+ # When there are no lights we use a default lighting source
+ # that have the same position of the camera
+ if len(self.lights) == 0:
+ l = Lamp.New('Lamp')
+ lobj = Object.New('Lamp')
+ lobj.loc = self.cameraObj.loc
+ lobj.link(l)
+ self.lights.append(lobj)
+
+
+ ##
+ # Public Methods
+ #
+
+ def doRendering(self, outputWriter, animation=False):
+ """Render picture or animation and write it out.
+
+ The parameters are:
+ - a Vector writer object that will be used to output the result.
+ - a flag to tell if we want to render an animation or only the
+ current frame.
+ """
+
+ context = self._SCENE.getRenderingContext()
+ origCurrentFrame = context.currentFrame()
+
+ # Handle the animation case
+ if not animation:
+ startFrame = origCurrentFrame
+ endFrame = startFrame
+ outputWriter.open()
+ else:
+ startFrame = context.startFrame()
+ endFrame = context.endFrame()
+ outputWriter.open(startFrame, endFrame)
+
+ # Do the rendering process frame by frame
+ print "Start Rendering of %d frames" % (endFrame-startFrame+1)
+ for f in xrange(startFrame, endFrame+1):
+ print "\n\nFrame: %d" % f
+
+ # FIXME To get the correct camera position we have to use +1 here.
+ # Is there a bug somewhere in the Scene module?
+ context.currentFrame(f+1)
+ self.cameraObj = self._SCENE.getCurrentCamera()
+
+ # Use some temporary workspace, a full copy of the scene
+ inputScene = self._SCENE.copy(2)
+
+ # To get the objects at this frame remove the +1 ...
+ ctx = inputScene.getRenderingContext()
+ ctx.currentFrame(f)
+
+
+ # Get a projector for this camera.
+ # NOTE: the projector wants object in world coordinates,
+ # so we should remember to apply modelview transformations
+ # _before_ we do projection transformations.
+ self.proj = Projector(self.cameraObj, self.canvasRatio)
+
+ try:
+ renderedScene = self.doRenderScene(inputScene)
+ except :
+ print "There was an error! Aborting."
+ import traceback
+ print traceback.print_exc()
+
+ self._SCENE.makeCurrent()
+ Scene.unlink(inputScene)
+ del inputScene
+ return
+
+ outputWriter.printCanvas(renderedScene,
+ doPrintPolygons = config.polygons['SHOW'],
+ doPrintEdges = config.edges['SHOW'],
+ showHiddenEdges = config.edges['SHOW_HIDDEN'])
+
+ # delete the rendered scene
+ self._SCENE.makeCurrent()
+ Scene.unlink(renderedScene)
+ del renderedScene
+
+ outputWriter.close()
+ print "Done!"
+ context.currentFrame(origCurrentFrame)
+
+
+ def doRenderScene(self, workScene):
+ """Control the rendering process.
+
+ Here we control the entire rendering process invoking the operation
+ needed to transform and project the 3D scene in two dimensions.
+ """
+
+ # global processing of the scene
+
+ self._doSceneClipping(workScene)
+
+ self._doConvertGeometricObjsToMesh(workScene)
+
+ if config.output['JOIN_OBJECTS']:
+ self._joinMeshObjectsInScene(workScene)
+
+ self._doSceneDepthSorting(workScene)
+
+ # Per object activities
+
+ Objects = workScene.getChildren()
+ print "Total Objects: %d" % len(Objects)
+ for i,obj in enumerate(Objects):
+ print "\n\n-------"
+ print "Rendering Object: %d" % i
+
+ if obj.getType() != 'Mesh':
+ print "Only Mesh supported! - Skipping type:", obj.getType()
+ continue
+
+ print "Rendering: ", obj.getName()
+
+ mesh = obj.getData(mesh=1)
+
+ self._doModelingTransformation(mesh, obj.matrix)
+
+ self._doBackFaceCulling(mesh)
+
+
+ # When doing HSR with NEWELL we may want to flip all normals
+ # toward the viewer
+ if config.polygons['HSR'] == "NEWELL":
+ for f in mesh.faces:
+ f.sel = 1-f.sel
+ mesh.flipNormals()
+ for f in mesh.faces:
+ f.sel = 1
+
+ self._doLighting(mesh)
+
+ # Do "projection" now so we perform further processing
+ # in Normalized View Coordinates
+ self._doProjection(mesh, self.proj)
+
+ self._doViewFrustumClipping(mesh)
+
+ self._doHiddenSurfaceRemoval(mesh)
+
+ self._doEdgesStyle(mesh, edgeStyles[config.edges['STYLE']])
+
+ # Update the object data, important! :)
+ mesh.update()
+
+ return workScene
+
+
+ ##
+ # Private Methods
+ #
+
+ # Utility methods
+
+ def _getObjPosition(self, obj):
+ """Return the obj position in World coordinates.
+ """
+ return obj.matrix.translationPart()
+
+ def _cameraViewVector(self):
+ """Get the View Direction form the camera matrix.
+ """
+ return Vector(self.cameraObj.matrix[2]).resize3D()
+
+
+ # Faces methods
+
+ def _isFaceVisible(self, face):
+ """Determine if a face of an object is visible from the current camera.
+
+ The view vector is calculated from the camera location and one of the
+ vertices of the face (expressed in World coordinates, after applying
+ modelview transformations).
+
+ After those transformations we determine if a face is visible by
+ computing the angle between the face normal and the view vector, this
+ angle has to be between -90 and 90 degrees for the face to be visible.
+ This corresponds somehow to the dot product between the two, if it
+ results > 0 then the face is visible.
+
+ There is no need to normalize those vectors since we are only interested in
+ the sign of the cross product and not in the product value.
+
+ NOTE: here we assume the face vertices are in WorldCoordinates, so
+ please transform the object _before_ doing the test.
+ """
+
+ normal = Vector(face.no)
+ camPos = self._getObjPosition(self.cameraObj)
+ view_vect = None
+
+ # View Vector in orthographics projections is the view Direction of
+ # the camera
+ if self.cameraObj.data.getType() == 1:
+ view_vect = self._cameraViewVector()
+
+ # View vector in perspective projections can be considered as
+ # the difference between the camera position and one point of
+ # the face, we choose the farthest point from the camera.
+ if self.cameraObj.data.getType() == 0:
+ vv = max( [ ((camPos - Vector(v.co)).length, (camPos - Vector(v.co))) for v in face] )
+ view_vect = vv[1]
+
+
+ # if d > 0 the face is visible from the camera
+ d = view_vect * normal
+
+ if d > 0:
+ return True
+ else:
+ return False
+
+
+ # Scene methods
+
+ def _doSceneClipping(self, scene):
+ """Clip whole objects against the View Frustum.
+
+ For now clip away only objects according to their center position.
+ """
+
+ cam_pos = self._getObjPosition(self.cameraObj)
+ view_vect = self._cameraViewVector()
+
+ near = self.cameraObj.data.clipStart
+ far = self.cameraObj.data.clipEnd
+
+ aspect = float(self.canvasRatio[0])/float(self.canvasRatio[1])
+ fovy = atan(0.5/aspect/(self.cameraObj.data.lens/32))
+ fovy = fovy * 360.0/pi
+
+ Objects = scene.getChildren()
+ for o in Objects:
+ if o.getType() != 'Mesh': continue;
+
+ """
+ obj_vect = Vector(cam_pos) - self._getObjPosition(o)
+
+ d = obj_vect*view_vect
+ theta = AngleBetweenVecs(obj_vect, view_vect)
+
+ # if the object is outside the view frustum, clip it away
+ if (d < near) or (d > far) or (theta > fovy):
+ scene.unlink(o)
+ """
+
+ # Use the object bounding box
+ # (whose points are already in WorldSpace Coordinate)
+
+ bb = o.getBoundBox()
+
+ points_outside = 0
+ for p in bb:
+ p_vect = Vector(cam_pos) - Vector(p)
+
+ d = p_vect * view_vect
+ theta = AngleBetweenVecs(p_vect, view_vect)
+
+ # Is this point outside the view frustum?
+ if (d < near) or (d > far) or (theta > fovy):
+ points_outside += 1
+
+ # If the bb is all outside the view frustum we clip the whole
+ # object away
+ if points_outside == len(bb):
+ scene.unlink(o)
+
+
+
+ def _doConvertGeometricObjsToMesh(self, scene):
+ """Convert all "geometric" objects to mesh ones.
+ """
+ geometricObjTypes = ['Mesh', 'Surf', 'Curve', 'Text']
+ #geometricObjTypes = ['Mesh', 'Surf', 'Curve']
+
+ Objects = scene.getChildren()
+ objList = [ o for o in Objects if o.getType() in geometricObjTypes ]
+ for obj in objList:
+ old_obj = obj
+ obj = self._convertToRawMeshObj(obj)
+ scene.link(obj)
+ scene.unlink(old_obj)
+
+
+ # XXX Workaround for Text and Curve which have some normals
+ # inverted when they are converted to Mesh, REMOVE that when
+ # blender will fix that!!
+ if old_obj.getType() in ['Curve', 'Text']:
+ me = obj.getData(mesh=1)
+ for f in me.faces: f.sel = 1;
+ for v in me.verts: v.sel = 1;
+ me.remDoubles(0)
+ me.triangleToQuad()
+ me.recalcNormals()
+ me.update()
+
+
+ def _doSceneDepthSorting(self, scene):
+ """Sort objects in the scene.
+
+ The object sorting is done accordingly to the object centers.
+ """
+
+ c = self._getObjPosition(self.cameraObj)
+
+ by_obj_center_pos = (lambda o1, o2:
+ (o1.getType() == 'Mesh' and o2.getType() == 'Mesh') and
+ cmp((self._getObjPosition(o1) - Vector(c)).length,
+ (self._getObjPosition(o2) - Vector(c)).length)
+ )
+
+ # Implement sorting by bounding box, the object with the bb
+ # nearest to the camera should be drawn as last.
+ by_nearest_bbox_point = (lambda o1, o2:
+ (o1.getType() == 'Mesh' and o2.getType() == 'Mesh') and
+ cmp( min( [(Vector(p) - Vector(c)).length for p in o1.getBoundBox()] ),
+ min( [(Vector(p) - Vector(c)).length for p in o2.getBoundBox()] )
+ )
+ )
+
+
+ Objects = scene.getChildren()
+ #Objects.sort(by_obj_center_pos)
+ Objects.sort(by_nearest_bbox_point)
+
+ # update the scene
+ for o in Objects:
+ scene.unlink(o)
+ scene.link(o)
+
+ def _joinMeshObjectsInScene(self, scene):
+ """Merge all the Mesh Objects in a scene into a single Mesh Object.
+ """
+
+ oList = [o for o in scene.getChildren() if o.getType()=='Mesh']
+
+ # FIXME: Object.join() do not work if the list contains 1 object
+ if len(oList) == 1:
+ return
+
+ mesh = Mesh.New('BigOne')
+ bigObj = Object.New('Mesh', 'BigOne')
+ bigObj.link(mesh)
+
+ scene.link(bigObj)
+
+ try:
+ bigObj.join(oList)
+ except RuntimeError:
+ print "\nWarning! - Can't Join Objects\n"
+ scene.unlink(bigObj)
+ return
+ except TypeError:
+ print "Objects Type error?"
+
+ for o in oList:
+ scene.unlink(o)
+
+ scene.update()
+
+
+ # Per object/mesh methods
+
+ def _convertToRawMeshObj(self, object):
+ """Convert geometry based object to a mesh object.
+ """
+ me = Mesh.New('RawMesh_'+object.name)
+ me.getFromObject(object.name)
+
+ newObject = Object.New('Mesh', 'RawMesh_'+object.name)
+ newObject.link(me)
+
+ # If the object has no materials set a default material
+ if not me.materials:
+ me.materials = [Material.New()]
+ #for f in me.faces: f.mat = 0
+
+ newObject.setMatrix(object.getMatrix())
+
+ return newObject
+
+ def _doModelingTransformation(self, mesh, matrix):
+ """Transform object coordinates to world coordinates.
+
+ This step is done simply applying to the object its tranformation
+ matrix and recalculating its normals.
+ """
+ # XXX FIXME: blender do not transform normals in the right way when
+ # there are negative scale values
+ if matrix[0][0] < 0 or matrix[1][1] < 0 or matrix[2][2] < 0:
+ print "WARNING: Negative scales, expect incorrect results!"
+
+ mesh.transform(matrix, True)
+
+ def _doBackFaceCulling(self, mesh):
+ """Simple Backface Culling routine.
+
+ At this level we simply do a visibility test face by face and then
+ select the vertices belonging to visible faces.
+ """
+
+ # Select all vertices, so edges can be displayed even if there are no
+ # faces
+ for v in mesh.verts:
+ v.sel = 1
+
+ Mesh.Mode(Mesh.SelectModes['FACE'])
+ # Loop on faces
+ for f in mesh.faces:
+ f.sel = 0
+ if self._isFaceVisible(f):
+ f.sel = 1
+
+ def _doLighting(self, mesh):
+ """Apply an Illumination and shading model to the object.
+
+ The model used is the Phong one, it may be inefficient,
+ but I'm just learning about rendering and starting from Phong seemed
+ the most natural way.
+ """
+
+ # If the mesh has vertex colors already, use them,
+ # otherwise turn them on and do some calculations
+ if mesh.vertexColors:
+ return
+ mesh.vertexColors = 1
+
+ materials = mesh.materials
+
+ camPos = self._getObjPosition(self.cameraObj)
+
+ # We do per-face color calculation (FLAT Shading), we can easily turn
+ # to a per-vertex calculation if we want to implement some shading
+ # technique. For an example see:
+ # http://www.miralab.unige.ch/papers/368.pdf
+ for f in mesh.faces:
+ if not f.sel:
+ continue
+
+ mat = None
+ if materials:
+ mat = materials[f.mat]
+
+ # A new default material
+ if mat == None:
+ mat = Material.New('defMat')
+
+ # Check if it is a shadeless material
+ elif mat.getMode() & Material.Modes['SHADELESS']:
+ I = mat.getRGBCol()
+ # Convert to a value between 0 and 255
+ tmp_col = [ int(c * 255.0) for c in I]
+
+ for c in f.col:
+ c.r = tmp_col[0]
+ c.g = tmp_col[1]
+ c.b = tmp_col[2]
+ #c.a = tmp_col[3]
+
+ continue
+
+
+ # do vertex color calculation
+
+ TotDiffSpec = Vector([0.0, 0.0, 0.0])
+
+ for l in self.lights:
+ light_obj = l
+ light_pos = self._getObjPosition(l)
+ light = light_obj.getData()
+
+ L = Vector(light_pos).normalize()
+
+ V = (Vector(camPos) - Vector(f.cent)).normalize()
+
+ N = Vector(f.no).normalize()
+
+ if config.polygons['SHADING'] == 'TOON':
+ NL = ShadingUtils.toonShading(N*L)
+ else:
+ NL = (N*L)
+
+ # Should we use NL instead of (N*L) here?
+ R = 2 * (N*L) * N - L
+
+ Ip = light.getEnergy()
+
+ # Diffuse co-efficient
+ kd = mat.getRef() * Vector(mat.getRGBCol())
+ for i in [0, 1, 2]:
+ kd[i] *= light.col[i]
+
+ Idiff = Ip * kd * max(0, NL)
+
+
+ # Specular component
+ ks = mat.getSpec() * Vector(mat.getSpecCol())
+ ns = mat.getHardness()
+ Ispec = Ip * ks * pow(max(0, (V*R)), ns)
+
+ TotDiffSpec += (Idiff+Ispec)
+
+
+ # Ambient component
+ Iamb = Vector(Blender.World.Get()[0].getAmb())
+ ka = mat.getAmb()
+
+ # Emissive component (convert to a triplet)
+ ki = Vector([mat.getEmit()]*3)
+
+ #I = ki + Iamb + (Idiff + Ispec)
+ I = ki + (ka * Iamb) + TotDiffSpec
+
+
+ # Set Alpha component
+ I = list(I)
+ I.append(mat.getAlpha())
+
+ # Clamp I values between 0 and 1
+ I = [ min(c, 1) for c in I]
+ I = [ max(0, c) for c in I]
+
+ # Convert to a value between 0 and 255
+ tmp_col = [ int(c * 255.0) for c in I]
+
+ for c in f.col:
+ c.r = tmp_col[0]
+ c.g = tmp_col[1]
+ c.b = tmp_col[2]
+ c.a = tmp_col[3]
+
+ def _doProjection(self, mesh, projector):
+ """Apply Viewing and Projection tranformations.
+ """
+
+ for v in mesh.verts:
+ p = projector.doProjection(v.co[:])
+ v.co[0] = p[0]
+ v.co[1] = p[1]
+ v.co[2] = p[2]
+
+ #mesh.recalcNormals()
+ #mesh.update()
+
+ # We could reeset Camera matrix, since now
+ # we are in Normalized Viewing Coordinates,
+ # but doung that would affect World Coordinate
+ # processing for other objects
+
+ #self.cameraObj.data.type = 1
+ #self.cameraObj.data.scale = 2.0
+ #m = Matrix().identity()
+ #self.cameraObj.setMatrix(m)
+
+ def _doViewFrustumClipping(self, mesh):
+ """Clip faces against the View Frustum.
+ """
+
+ # The Canonical View Volume, 8 vertices, and 6 faces,
+ # We consider its face normals pointing outside
+
+ v1 = NMesh.Vert(1, 1, -1)
+ v2 = NMesh.Vert(1, -1, -1)
+ v3 = NMesh.Vert(-1, -1, -1)
+ v4 = NMesh.Vert(-1, 1, -1)
+ v5 = NMesh.Vert(1, 1, 1)
+ v6 = NMesh.Vert(1, -1, 1)
+ v7 = NMesh.Vert(-1, -1, 1)
+ v8 = NMesh.Vert(-1, 1, 1)
+
+ cvv = []
+ f1 = NMesh.Face([v1, v4, v3, v2])
+ cvv.append(f1)
+ f2 = NMesh.Face([v5, v6, v7, v8])
+ cvv.append(f2)
+ f3 = NMesh.Face([v1, v2, v6, v5])
+ cvv.append(f3)
+ f4 = NMesh.Face([v2, v3, v7, v6])
+ cvv.append(f4)
+ f5 = NMesh.Face([v3, v4, v8, v7])
+ cvv.append(f5)
+ f6 = NMesh.Face([v4, v1, v5, v8])
+ cvv.append(f6)
+
+ nmesh = NMesh.GetRaw(mesh.name)
+ clippedfaces = nmesh.faces[:]
+ facelist = clippedfaces[:]
+
+ for clipface in cvv:
+
+ clippedfaces = []
+
+ for f in facelist:
+
+ newfaces = HSR.splitOn(clipface, f, return_positive_faces=False)
+
+ if not newfaces:
+ # Check if the face is all outside the view frustum
+ # TODO: Do this test before, it is more efficient
+ points_outside = 0
+ for v in f:
+ if abs(v[0]) > 1-EPS or abs(v[1]) > 1-EPS or abs(v[2]) > 1-EPS:
+ points_outside += 1
+
+ if points_outside != len(f):
+ clippedfaces.append(f)
+ else:
+ for nf in newfaces:
+ for v in nf:
+ nmesh.verts.append(v)
+
+ nf.mat = f.mat
+ nf.sel = f.sel
+ nf.col = [f.col[0]] * len(nf.v)
+
+ clippedfaces.append(nf)
+ facelist = clippedfaces[:]
+
+
+ nmesh.faces = facelist
+ nmesh.update()
+
+
+ # HSR routines
+ def __simpleDepthSort(self, mesh):
+ """Sort faces by the furthest vertex.
+
+ This simple mesthod is known also as the painter algorithm, and it
+ solves HSR correctly only for convex meshes.
+ """
+
+ #global progress
+
+ # The sorting requires circa n*log(n) steps
+ n = len(mesh.faces)
+ progress.setActivity("HSR: Painter", n*log(n))
+
+ by_furthest_z = (lambda f1, f2: progress.update() and
+ cmp(max([v.co[2] for v in f1]), max([v.co[2] for v in f2])+EPS)
+ )
+
+ # FIXME: using NMesh to sort faces. We should avoid that!
+ nmesh = NMesh.GetRaw(mesh.name)
+
+ # remember that _higher_ z values mean further points
+ nmesh.faces.sort(by_furthest_z)
+ nmesh.faces.reverse()
+
+ nmesh.update()
+
+
+ def __newellDepthSort(self, mesh):
+ """Newell's depth sorting.
+
+ """
+
+ #global progress
+
+ # Find non planar quads and convert them to triangle
+ #for f in mesh.faces:
+ # f.sel = 0
+ # if is_nonplanar_quad(f.v):
+ # print "NON QUAD??"
+ # f.sel = 1
+
+
+ # Now reselect all faces
+ for f in mesh.faces:
+ f.sel = 1
+ mesh.quadToTriangle()
+
+ # FIXME: using NMesh to sort faces. We should avoid that!
+ nmesh = NMesh.GetRaw(mesh.name)
+
+ # remember that _higher_ z values mean further points
+ nmesh.faces.sort(by_furthest_z)
+ nmesh.faces.reverse()
+
+ # Begin depth sort tests
+
+ # use the smooth flag to set marked faces
+ for f in nmesh.faces:
+ f.smooth = 0
+
+ facelist = nmesh.faces[:]
+ maplist = []
+
+
+ # The steps are _at_least_ equal to len(facelist), we do not count the
+ # feces coming out from splitting!!
+ progress.setActivity("HSR: Newell", len(facelist))
+ #progress.setQuiet(True)
+
+
+ while len(facelist):
+ debug("\n----------------------\n")
+ debug("len(facelits): %d\n" % len(facelist))
+ P = facelist[0]
+
+ pSign = sign(P.normal[2])
+
+ # We can discard faces parallel to the view vector
+ #if P.normal[2] == 0:
+ # facelist.remove(P)
+ # continue
+
+ split_done = 0
+ face_marked = 0
+
+ for Q in facelist[1:]:
+
+ debug("P.smooth: " + str(P.smooth) + "\n")
+ debug("Q.smooth: " + str(Q.smooth) + "\n")
+ debug("\n")
+
+ qSign = sign(Q.normal[2])
+ # TODO: check also if Q is parallel??
+
+ # Test 0: We need to test only those Qs whose furthest vertex
+ # is closer to the observer than the closest vertex of P.
+
+ zP = [v.co[2] for v in P.v]
+ zQ = [v.co[2] for v in Q.v]
+ notZOverlap = min(zP) > max(zQ) + EPS
+
+ if notZOverlap:
+ debug("\nTest 0\n")
+ debug("NOT Z OVERLAP!\n")
+ if Q.smooth == 0:
+ # If Q is not marked then we can safely print P
+ break
+ else:
+ debug("met a marked face\n")
+ continue
+
+
+ # Test 1: X extent overlapping
+ xP = [v.co[0] for v in P.v]
+ xQ = [v.co[0] for v in Q.v]
+ #notXOverlap = (max(xP) <= min(xQ)) or (max(xQ) <= min(xP))
+ notXOverlap = (min(xQ) >= max(xP)-EPS) or (min(xP) >= max(xQ)-EPS)
+
+ if notXOverlap:
+ debug("\nTest 1\n")
+ debug("NOT X OVERLAP!\n")
+ continue
+
+
+ # Test 2: Y extent Overlapping
+ yP = [v.co[1] for v in P.v]
+ yQ = [v.co[1] for v in Q.v]
+ #notYOverlap = (max(yP) <= min(yQ)) or (max(yQ) <= min(yP))
+ notYOverlap = (min(yQ) >= max(yP)-EPS) or (min(yP) >= max(yQ)-EPS)
+
+ if notYOverlap:
+ debug("\nTest 2\n")
+ debug("NOT Y OVERLAP!\n")
+ continue
+
+
+ # Test 3: P vertices are all behind the plane of Q
+ n = 0
+ for Pi in P:
+ d = qSign * HSR.Distance(Vector(Pi), Q)
+ if d <= EPS:
+ n += 1
+ pVerticesBehindPlaneQ = (n == len(P))
+
+ if pVerticesBehindPlaneQ:
+ debug("\nTest 3\n")
+ debug("P BEHIND Q!\n")
+ continue
+
+
+ # Test 4: Q vertices in front of the plane of P
+ n = 0
+ for Qi in Q:
+ d = pSign * HSR.Distance(Vector(Qi), P)
+ if d >= -EPS:
+ n += 1
+ qVerticesInFrontPlaneP = (n == len(Q))
+
+ if qVerticesInFrontPlaneP:
+ debug("\nTest 4\n")
+ debug("Q IN FRONT OF P!\n")
+ continue
+
+
+ # Test 5: Check if projections of polygons effectively overlap,
+ # in previous tests we checked only bounding boxes.
+
+ #if not projectionsOverlap(P, Q):
+ if not ( HSR.projectionsOverlap(P, Q) or HSR.projectionsOverlap(Q, P)):
+ debug("\nTest 5\n")
+ debug("Projections do not overlap!\n")
+ continue
+
+ # We still can't say if P obscures Q.
+
+ # But if Q is marked we do a face-split trying to resolve a
+ # difficulty (maybe a visibility cycle).
+ if Q.smooth == 1:
+ # Split P or Q
+ debug("Possibly a cycle detected!\n")
+ debug("Split here!!\n")