+ def _doSceneDepthSorting(self, scene):
+ """Sort objects in the scene.
+
+ The object sorting is done accordingly to the object centers.
+ """
+
+ c = self._cameraWorldPosition()
+
+ Objects = scene.getChildren()
+
+ #Objects.sort(lambda obj1, obj2:
+ # cmp((Vector(obj1.loc) - Vector(c)).length,
+ # (Vector(obj2.loc) - Vector(c)).length
+ # )
+ # )
+
+ Objects.sort(lambda obj1, obj2:
+ cmp((self._worldPosition(obj1) - Vector(c)).length,
+ (self._worldPosition(obj2) - Vector(c)).length
+ )
+ )
+
+ # update the scene
+ for o in Objects:
+ scene.unlink(o)
+ scene.link(o)
+
+
+ def _joinMeshObjectsInScene(self, scene):
+ """Merge all the Mesh Objects in a scene into a single Mesh Object.
+ """
+ bigObj = Object.New('Mesh', 'BigOne')
+ oList = [o for o in scene.getChildren() if o.getType()=='Mesh']
+ print "Before join", oList
+ bigObj.join(oList)
+ print "After join"
+ scene.link(bigObj)
+ for o in oList:
+ scene.unlink(o)
+
+
+ # Per object methods
+
+ def _convertToRawMeshObj(self, object):
+ """Convert geometry based object to a mesh object.
+ """
+ me = Mesh.New('RawMesh_'+object.name)
+ me.getFromObject(object.name)
+
+ newObject = Object.New('Mesh', 'RawMesh_'+object.name)
+ newObject.link(me)
+
+ newObject.setMatrix(object.getMatrix())
+
+ return newObject
+
+ def _doModelToWorldCoordinates(self, mesh, matrix):
+ """Transform object coordinates to world coordinates.
+
+ This step is done simply applying to the object its tranformation
+ matrix and recalculating its normals.
+ """
+ mesh.transform(matrix, True)
+
+ def _doObjectDepthSorting(self, mesh):
+ """Sort faces in an object.
+
+ The faces in the object are sorted following the distance of the
+ vertices from the camera position.
+ """
+ c = self._cameraWorldPosition()
+
+ # hackish sorting of faces
+ mesh.faces.sort(
+ lambda f1, f2:
+ # Sort faces according to the min distance from the camera
+ #cmp(min([(Vector(v.co)-Vector(c)).length for v in f1]),
+ # min([(Vector(v.co)-Vector(c)).length for v in f2])))
+
+ # Sort faces according to the max distance from the camera
+ cmp(max([(Vector(v.co)-Vector(c)).length for v in f1]),
+ max([(Vector(v.co)-Vector(c)).length for v in f2])))
+
+ # Sort faces according to the avg distance from the camera
+ #cmp(sum([(Vector(v.co)-Vector(c)).length for v in f1])/len(f1),
+ # sum([(Vector(v.co)-Vector(c)).length for v in f2])/len(f2)))
+
+ mesh.faces.reverse()
+
+ def _doBackFaceCulling(self, mesh):
+ """Simple Backface Culling routine.
+
+ At this level we simply do a visibility test face by face and then
+ select the vertices belonging to visible faces.
+ """
+
+ # Select all vertices, so edges without faces can be displayed
+ for v in mesh.verts:
+ v.sel = 1
+
+ Mesh.Mode(Mesh.SelectModes['FACE'])
+ # Loop on faces
+ for f in mesh.faces:
+ f.sel = 0
+ if self._isFaceVisible(f):
+ f.sel = 1
+
+ # Is this the correct way to propagate the face selection info to the
+ # vertices belonging to a face ??
+ # TODO: Using the Mesh class this should come for free. Right?
+ Mesh.Mode(Mesh.SelectModes['VERTEX'])
+ for f in mesh.faces:
+ if not f.sel:
+ for v in f:
+ v.sel = 0
+
+ for f in mesh.faces:
+ if f.sel:
+ for v in f:
+ v.sel = 1
+
+ def _doColorAndLighting(self, mesh):
+ """Apply an Illumination model to the object.
+
+ The Illumination model used is the Phong one, it may be inefficient,
+ but I'm just learning about rendering and starting from Phong seemed
+ the most natural way.
+ """
+
+ # If the mesh has vertex colors already, use them,
+ # otherwise turn them on and do some calculations
+ if mesh.hasVertexColours():
+ return
+ mesh.hasVertexColours(True)
+
+ materials = mesh.materials
+
+ # TODO: use multiple lighting sources
+ light_obj = self.lights[0]
+ light_pos = self._worldPosition(light_obj)
+ light = light_obj.data
+
+ camPos = self._cameraWorldPosition()
+
+ # We do per-face color calculation (FLAT Shading), we can easily turn
+ # to a per-vertex calculation if we want to implement some shading
+ # technique. For an example see:
+ # http://www.miralab.unige.ch/papers/368.pdf
+ for f in mesh.faces:
+ if not f.sel:
+ continue
+
+ mat = None
+ if materials:
+ mat = materials[f.mat]
+
+ # A new default material
+ if not mat:
+ mat = Material.New('defMat')
+
+ L = Vector(light_pos).normalize()
+
+ V = (Vector(camPos) - Vector(f.v[0].co)).normalize()
+
+ N = Vector(f.no).normalize()
+
+ R = 2 * (N*L) * N - L
+
+ # TODO: Attenuation factor (not used for now)
+ a0 = 1; a1 = 0.0; a2 = 0.0
+ d = (Vector(f.v[0].co) - Vector(light_pos)).length
+ fd = min(1, 1.0/(a0 + a1*d + a2*d*d))
+
+ # Ambient component
+ Ia = 1.0
+ ka = mat.getAmb() * Vector([0.1, 0.1, 0.1])
+ Iamb = Ia * ka
+
+ # Diffuse component (add light.col for kd)
+ kd = mat.getRef() * Vector(mat.getRGBCol())
+ Ip = light.getEnergy()
+ Idiff = Ip * kd * (N*L)
+
+ # Specular component
+ ks = mat.getSpec() * Vector(mat.getSpecCol())
+ ns = mat.getHardness()
+ Ispec = Ip * ks * pow((V * R), ns)
+
+ # Emissive component
+ ki = Vector([mat.getEmit()]*3)
+
+ I = ki + Iamb + Idiff + Ispec
+
+ # Clamp I values between 0 and 1
+ I = [ min(c, 1) for c in I]
+ I = [ max(0, c) for c in I]
+ tmp_col = [ int(c * 255.0) for c in I]
+
+ vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2], 255)
+ f.col = []
+ for v in f.v:
+ f.col.append(vcol)
+
+ def _doEdgesStyle(self, mesh, style):
+ """Process Mesh Edges. (For now copy the edge data, in next version it
+ can be a place where recognize silouhettes and/or contours).
+
+ input: an edge list
+ return: a processed edge list
+ """
+ #print "\tTODO: _doEdgeStyle()"