X-Git-Url: https://git.ao2.it/vrm.git/blobdiff_plain/2bfb6873ab89019125b7e6602eac6ca8fef7940c..f3c77e9118aad35bfc7180996315c56b88b20706:/vrm.py diff --git a/vrm.py b/vrm.py index bbff105..c694b77 100755 --- a/vrm.py +++ b/vrm.py @@ -1,250 +1,817 @@ #!BPY - """ Name: 'VRM' -Blender: 237 +Blender: 241 Group: 'Export' -Tooltip: 'Vector Rendering Method Export Script' +Tooltip: 'Vector Rendering Method Export Script 0.3' """ +# --------------------------------------------------------------------- +# Copyright (c) 2006 Antonio Ospite +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# --------------------------------------------------------------------- +# +# NOTE: I do not know who is the original author of 'vrm'. +# The present code is almost entirely rewritten from scratch, +# but if I have to give credits to anyone, please let me know, +# so I can update the copyright. +# +# --------------------------------------------------------------------- +# +# Additional credits: +# Thanks to Emilio Aguirre for S2flender from which I took inspirations :) +# Thanks to Anthony C. D'Agostino for the original backface.py script +# +# --------------------------------------------------------------------- import Blender -from Blender import Scene, Object, Lamp, Camera +from Blender import Scene, Object, Mesh, NMesh, Lamp, Camera +from Blender.Mathutils import * from math import * -from Blender.Window import * -from Blender.Scene import Render - -def init(): - print "Init\n" - renderDir = context.getRenderPath() +# --------------------------------------------------------------------- +# +## Projections classes +# +# --------------------------------------------------------------------- + +class Projector: + """Calculate the projection of an object given the camera. + + A projector is useful to so some per-object transformation to obtain the + projection of an object given the camera. + The main method is #doProjection# see the method description for the + parameter list. + """ + + def __init__(self, cameraObj, canvasRatio): + """Calculate the projection matrix. + + The projection matrix depends, in this case, on the camera settings, + and also on object transformation matrix. + """ + + camera = cameraObj.getData() + + aspect = float(canvasRatio[0])/float(canvasRatio[1]) + near = camera.clipStart + far = camera.clipEnd + + fovy = atan(0.5/aspect/(camera.lens/32)) + fovy = fovy * 360/pi + + # What projection do we want? + if camera.type: + m2 = self._calcOrthoMatrix(fovy, aspect, near, far, 17) #camera.scale) + else: + m2 = self._calcPerspectiveMatrix(fovy, aspect, near, far) + + + # View transformation + cam = Matrix(cameraObj.getInverseMatrix()) + cam.transpose() + + # FIXME: remove the commented part, we used to pass object in local + # coordinates, but this is not very clean, we should apply modelview + # tranformations _before_ (at some other level). + #m1 = Matrix(obMesh.getMatrix()) + #m1.transpose() + + #mP = cam * m1 + mP = cam + mP = m2 * mP + + self.projectionMatrix = mP + + ## + # Public methods + # + + def doProjection(self, v): + """Project the point on the view plane. + + Given a vertex calculate the projection using the current projection + matrix. + """ + + # Note that we need the vertex expressed using homogeneous coordinates + p = self.projectionMatrix * Vector(v).resize4D() + + if p[3]>0: + p[0] = p[0]/p[3] + p[1] = p[1]/p[3] + + return p -# distance from camera Z' -def Distance(PX,PY,PZ): + ## + # Private methods + # - dist = sqrt(PX*PX+PY*PY+PZ*PZ) - return dist + def _calcPerspectiveMatrix(self, fovy, aspect, near, far): + """Return a perspective projection matrix.""" + + top = near * tan(fovy * pi / 360.0) + bottom = -top + left = bottom*aspect + right= top*aspect + x = (2.0 * near) / (right-left) + y = (2.0 * near) / (top-bottom) + a = (right+left) / (right-left) + b = (top+bottom) / (top - bottom) + c = - ((far+near) / (far-near)) + d = - ((2*far*near)/(far-near)) + + m = Matrix( + [x, 0.0, a, 0.0], + [0.0, y, b, 0.0], + [0.0, 0.0, c, d], + [0.0, 0.0, -1.0, 0.0]) + + return m + + def _calcOrthoMatrix(self, fovy, aspect , near, far, scale): + """Return an orthogonal projection matrix.""" + + top = near * tan(fovy * pi / 360.0) * (scale * 10) + bottom = -top + left = bottom * aspect + right= top * aspect + rl = right-left + tb = top-bottom + fn = near-far + tx = -((right+left)/rl) + ty = -((top+bottom)/tb) + tz = ((far+near)/fn) + + m = Matrix( + [2.0/rl, 0.0, 0.0, tx], + [0.0, 2.0/tb, 0.0, ty], + [0.0, 0.0, 2.0/fn, tz], + [0.0, 0.0, 0.0, 1.0]) + + return m + + +# --------------------------------------------------------------------- +# +## Object representation class +# +# --------------------------------------------------------------------- + +# TODO: a class to represent the needed properties of a 2D vector image +# Just use a NMesh structure? + + +# --------------------------------------------------------------------- +# +## Vector Drawing Classes +# +# --------------------------------------------------------------------- + +## A generic Writer + +class VectorWriter: + """ + A class for printing output in a vectorial format. + + Given a 2D representation of the 3D scene the class is responsible to + write it is a vector format. + + Every subclasses of VectorWriter must have at last the following public + methods: + - printCanvas(mesh) --- where mesh is as specified before. + """ + + def __init__(self, fileName): + """Open the file named #fileName# and set the canvas size.""" + + self.file = open(fileName, "w") + print "Outputting to: ", fileName + + + context = Scene.GetCurrent().getRenderingContext() + self.canvasSize = ( context.imageSizeX(), context.imageSizeY() ) + + + ## + # Public Methods + # + + def printCanvas(mesh): + return + + ## + # Private Methods + # + + def _printHeader(): + return + + def _printFooter(): + return + + +## SVG Writer + +class SVGVectorWriter(VectorWriter): + """A concrete class for writing SVG output. + + The class does not support animations, yet. + Sorry. + """ + + def __init__(self, file): + """Simply call the parent Contructor.""" + VectorWriter.__init__(self, file) + + + ## + # Public Methods + # + + def open(self): + self._printHeader() + + def close(self): + self._printFooter() + + + + def printCanvas(self, scene, doPrintPolygons=True, doPrintEdges=False, showHiddenEdges=False): + """Convert the scene representation to SVG.""" + + Objects = scene.getChildren() + for obj in Objects: + + if(obj.getType() != 'Mesh'): + continue + # + + self.file.write("\n") + + + if doPrintPolygons: + for face in obj.getData().faces: + self._printPolygon(face) + + if doPrintEdges: + self._printEdges(obj.getData(), showHiddenEdges) + + self.file.write("\n") + + + ## + # Private Methods + # + + def _printHeader(self): + """Print SVG header.""" + + self.file.write("\n") + self.file.write("\n") + self.file.write("\n\n" % + self.canvasSize) + + def _printFooter(self): + """Print the SVG footer.""" + + self.file.write("\n\n") + self.file.close() + + def _printEdges(self, mesh, showHiddenEdges=False): + """Print the wireframe using mesh edges... is this the correct way? + """ + + stroke_width=0.5 + stroke_col = [0, 0, 0] + + self.file.write("\n") + + for e in mesh.edges: + + hidden_stroke_style = "" + + # And edge is selected if both vertives are selected + if e.v1.sel == 0 or e.v2.sel == 0: + if showHiddenEdges == False: + continue + else: + hidden_stroke_style = ";\n stroke-dasharray:3, 3" + + p1 = self._calcCanvasCoord(e.v1) + p2 = self._calcCanvasCoord(e.v2) + + self.file.write("\n") + + self.file.write("\n") + + + + def _printPolygon(self, face): + """Print our primitive, finally. + """ + + wireframe = False + + stroke_width=0.5 + + self.file.write("\n") + + def _calcCanvasCoord(self, v): + + pt = Vector([0, 0, 0]) + + mW = self.canvasSize[0]/2 + mH = self.canvasSize[1]/2 + + # rescale to canvas size + pt[0] = round(v[0]*mW)+mW + pt[1] = round(v[1]*mH)+mH + + # For now we want (0,0) in the top-left corner of the canvas + # Mirror and translate along y + pt[1] *= -1 + pt[1] += self.canvasSize[1] + + return pt + -def Dodaj(x,y,z): +# --------------------------------------------------------------------- +# +## Rendering Classes +# +# --------------------------------------------------------------------- + +class Renderer: + """Render a scene viewed from a given camera. - print "" + This class is responsible of the rendering process, hence transformation + and projection of the ojects in the scene are invoked by the renderer. + + The user can optionally provide a specific camera for the rendering, see + the #doRendering# method for more informations. + """ + + def __init__(self): + """Make the rendering process only for the current scene by default. + """ + + # Render the current Scene set as a READ-ONLY property + self._SCENE = Scene.GetCurrent() + + # Use the aspect ratio of the scene rendering context + context = self._SCENE.getRenderingContext() + self.canvasRatio = (context.aspectRatioX(), context.aspectRatioY()) + + # Render from the currently active camera + self.camera = self._SCENE.getCurrentCamera() + + + ## + # Public Methods + # + + def doRendering(self, outputWriter, animation=0): + """Render picture or animation and write it out. + + The parameters are: + - a Vector writer object than will be used to output the result. + - a flag to tell if we want to render an animation or the only + current frame. + """ + + context = self._SCENE.getRenderingContext() + currentFrame = context.currentFrame() + + # Handle the animation case + if animation == 0: + startFrame = currentFrame + endFrame = startFrame + else: + startFrame = context.startFrame() + endFrame = context.endFrame() + + # Do the rendering process frame by frame + print "Start Rendering!" + for f in range(startFrame, endFrame+1): + context.currentFrame(f) + renderedScene = self.doRenderScene(self._SCENE) + outputWriter.printCanvas(renderedScene, + doPrintPolygons=False, doPrintEdges=True, showHiddenEdges=True) + + # clear the rendered scene + self._SCENE.makeCurrent() + Scene.unlink(renderedScene) + del renderedScene + + print "Done!" + context.currentFrame(currentFrame) + + + + def doRenderScene(self, inputScene): + """Control the rendering process. + + Here we control the entire rendering process invoking the operation + needed to transform and project the 3D scene in two dimensions. + """ + + # Use some temporary workspace, a full copy of the scene + workScene = inputScene.copy(2) + + # Get a projector for this scene. + # NOTE: the projector wants object in world coordinates, + # so we should apply modelview transformations _before_ + # projection transformations + proj = Projector(self.camera, self.canvasRatio) + + # global processing of the scene + self._doDepthSorting(workScene) + + # Per object activities + Objects = workScene.getChildren() + + for obj in Objects: + + if (obj.getType() != 'Mesh'): + print "Type:", obj.getType(), "\tSorry, only mesh Object supported!" + continue + # + + self._doModelViewTransformations(obj) + + self._doBackFaceCulling(obj) + + self._doColorAndLighting(obj) + + # 'style' can be a function that determine + # if an edge should be showed? + self._doEdgesStyle(obj, style=None) + + self._doProjection(obj, proj) + + return workScene + + + def oldRenderScene(scene): + + # Per object activities + Objects = workScene.getChildren() + + for obj in Objects: + + if (obj.getType() != 'Mesh'): + print "Type:", obj.getType(), "\tSorry, only mesh Object supported!" + continue + + # Get a projector for this object + proj = Projector(self.camera, obj, self.canvasSize) + + # Let's store the transformed data + transformed_mesh = NMesh.New("flat"+obj.name) + transformed_mesh.hasVertexColours(1) + + # process Edges + self._doProcessEdges(obj) + + for v in obj.getData().verts: + transformed_mesh.verts.append(v) + transformed_mesh.edges = self._processEdges(obj.getData().edges) + #print transformed_mesh.edges + + + # Store the materials + materials = obj.getData().getMaterials() + + meshfaces = obj.getData().faces + + for face in meshfaces: + + # if the face is visible flatten it on the "picture plane" + if self._isFaceVisible(face, obj, cameraObj): + + # Store transformed face + newface = NMesh.Face() + + for vert in face: + + p = proj.doProjection(vert.co) + + tmp_vert = NMesh.Vert(p[0], p[1], p[2]) + + # Add the vert to the mesh + transformed_mesh.verts.append(tmp_vert) + + newface.v.append(tmp_vert) + + + # Per-face color calculation + # code taken mostly from the original vrm script + # TODO: understand the code and rewrite it clearly + ambient = -150 + + fakelight = Object.Get("Lamp").loc + if fakelight == None: + fakelight = [1.0, 1.0, -0.3] + + norm = Vector(face.no) + vektori = (norm[0]*fakelight[0]+norm[1]*fakelight[1]+norm[2]*fakelight[2]) + vduzine = fabs(sqrt(pow(norm[0],2)+pow(norm[1],2)+pow(norm[2],2))*sqrt(pow(fakelight[0],2)+pow(fakelight[1],2)+pow(fakelight[2],2))) + intensity = floor(ambient + 200*acos(vektori/vduzine))/200 + if intensity < 0: + intensity = 0 + + if materials: + tmp_col = materials[face.mat].getRGBCol() + else: + tmp_col = [0.5, 0.5, 0.5] + + tmp_col = [ (c>intensity) and int(round((c-intensity)*10)*25.5) for c in tmp_col ] + + vcol = NMesh.Col(tmp_col[0], tmp_col[1], tmp_col[2]) + newface.col = [vcol, vcol, vcol, 255] + + transformed_mesh.addFace(newface) + + # at the end of the loop on obj + + transformed_obj = Object.New(obj.getType(), "flat"+obj.name) + transformed_obj.link(transformed_mesh) + transformed_obj.loc = obj.loc + newscene.link(transformed_obj) + + + return workScene + + + ## + # Private Methods + # + + # Faces methods + + def _isFaceVisible(self, face, obj, camObj): + """Determine if a face of an object is visible from a given camera. + + The normals need to be transformed, but note that we should apply only the + rotation part of the tranformation matrix, since the normals are + normalized and they can be intended as starting from the origin. + + The view vector is calculated from the camera location and one of the + vertices of the face (expressed in World coordinates, after applying + modelview transformations). + + After those transformations we determine if a face is visible by computing + the angle between the face normal and the view vector, this angle + corresponds somehow to the dot product between the two. If the product + results <= 0 then the angle between the two vectors is less that 90 + degrees and then the face is visible. + + There is no need to normalize those vectors since we are only interested in + the sign of the cross product and not in the product value. + """ + + # The transformation matrix of the object + mObj = Matrix(obj.getMatrix()) + mObj.transpose() + + # The normal after applying the current object rotation + #normal = mObj.rotationPart() * Vector(face.no) + normal = Vector(face.no) + + # View vector in orthographics projections can be considered simply s the + # camera position + #view_vect = Vector(camObj.loc) + + # View vector as in perspective projections + # it is the dofference between the camera position and + # one point of the face, we choose the first point, + # but maybe a better choice may be the farthest point from the camera. + point = Vector(face[0].co) + #point = mObj * point.resize4D() + #point.resize3D() + view_vect = Vector(camObj.loc) - point + + + # if d <= 0 the face is visible from the camera + d = view_vect * normal + + if d <= 0: + return False + else: + return True -def RotatePoint(PX,PY,PZ,AngleX,AngleY,AngleZ): + + # Scene methods + + def _doClipping(): + return + + def _doDepthSorting(self, scene): + + cameraObj = self.camera + Objects = scene.getChildren() + + Objects.sort(lambda obj1, obj2: + cmp(Vector(Vector(cameraObj.loc) - Vector(obj1.loc)).length, + Vector(Vector(cameraObj.loc) - Vector(obj2.loc)).length + ) + ) + + # hackish sorting of faces according to the max z value of a vertex + for o in Objects: + + if (o.getType() != 'Mesh'): + continue + # + + mesh = o.data + mesh.faces.sort( + lambda f1, f2: + # Sort faces according to the min z coordinate in a face + #cmp(min([v[2] for v in f1]), min([v[2] for v in f2]))) + + # Sort faces according to the max z coordinate in a face + cmp(max([v[2] for v in f1]), max([v[2] for v in f2]))) + + # Sort faces according to the avg z coordinate in a face + #cmp(sum([v[2] for v in f1])/len(f1), sum([v[2] for v in f2])/len(f2))) + mesh.faces.reverse() + mesh.update() + + # update the scene + # FIXME: check if it is correct + scene.update() + #for o in scene.getChildren(): + # scene.unlink(o) + #for o in Objects: + # scene.link(o) + + # Per object methods + + def _doModelViewTransformations(self, object): + if(object.getType() != 'Mesh'): + return + + matMV = object.matrix + mesh = object.data + mesh.transform(matMV, True) + mesh.update() + + + def _doBackFaceCulling(self, object): + if(object.getType() != 'Mesh'): + return + + print "doing Backface Culling" + mesh = object.data + + # Select all vertices, so edges without faces can be displayed + for v in mesh.verts: + v.sel = 1 + + Mesh.Mode(Mesh.SelectModes['FACE']) + # Loop on faces + for f in mesh.faces: + f.sel = 0 + if self._isFaceVisible(f, object, self.camera): + f.sel = 1 + + for f in mesh.faces: + if not f.sel: + for v in f: + v.sel = 0 + + for f in mesh.faces: + if f.sel: + for v in f: + v.sel = 1 + + mesh.update() + + + + #Mesh.Mode(Mesh.SelectModes['VERTEX']) + + def _doColorAndLighting(self, object): + return + + def _doEdgesStyle(self, object, style): + """Process Mesh Edges. (For now copy the edge data, in next version it + can be a place where recognize silouhettes and/or contours). + + input: an edge list + return: a processed edge list + """ + return + + def _doProjection(self, object, projector): + + if(object.getType() != 'Mesh'): + return + + mesh = object.data + for v in mesh.verts: + p = projector.doProjection(v.co) + v[0] = p[0] + v[1] = p[1] + v[2] = p[2] + mesh.update() + + + +# --------------------------------------------------------------------- +# +## Main Program +# +# --------------------------------------------------------------------- + + +# FIXME: really hackish code, just to test if the other parts work - NewPoint = [] - # Rotate X - NewY = (PY * cos(AngleX))-(PZ * sin(AngleX)) - NewZ = (PZ * cos(AngleX))+(PY * sin(AngleX)) - # Rotate Y - PZ = NewZ - PY = NewY - NewZ = (PZ * cos(AngleY))-(PX * sin(AngleY)) - NewX = (PX * cos(AngleY))+(PZ * sin(AngleY)) - PX = NewX - PZ = NewZ - # Rotate Z - NewX = (PX * cos(AngleZ))-(PY * sin(AngleZ)) - NewY = (PY * cos(AngleZ))+(PX * sin(AngleZ)) - NewPoint.append(NewX) - NewPoint.append(NewY) - NewPoint.append(NewZ) - return NewPoint - -def flatern(vertx, verty, vertz): - - cam = Camera.get() # Get the cameras in scene - Lens = cam[0].getLens() # The First Blender camera lens - - camTyp = cam[0].getType() - - msize = (context.imageSizeX(), context.imageSizeY()) - xres = msize[0] # X res for output - yres = msize[1] # Y res for output - ratio = xres/yres - - screenxy=[0,0] - x=-vertx - y=verty - z=vertz - - fov = atan(ratio * 16.0 / Lens) # Get fov stuff - dist = xres/2*tan(fov) # Calculate dist from pinhole camera to image plane -#---------------------------- -# calculate x'=dist*x/z & y'=dist*x/z -#---------------------------- - screenxy[0]=int(xres/2+4*x*dist/z) - screenxy[1]=int(yres/2+4*y*dist/z) - return screenxy - -def writesvg(ob): - - for i in range(0, ob[0]+1): - print ob[i], "\n" - print "WriteSVG\n" - -######## -# Main # -######## - -scena = Scene.GetCurrent() -context = scena.getRenderingContext() - -#print dir(context) - -init() - -tacka = [0,0,0] -lice = [3,tacka,tacka,tacka,tacka] - -msize = (context.imageSizeX(), context.imageSizeY()) -print msize - -file=open("proba.svg","w") - -file.write("\n") -#file.write("\n") - -Objects = Blender.Object.Get() -NUMobjects = len(Objects) - -startFrm = context.startFrame() -endFrm = startFrm -#endFrm = context.endFrame() -camera = scena.getCurrentCamera() # Get the current camera - -for f in range(startFrm, endFrm+1): - #scena.currentFrame(f) - Blender.Set('curframe', f) - - DrawProgressBar (f/(endFrm+1-startFrm),"Rendering ..." + str(context.currentFrame())) - - print "Frame: ", f, "\n" - if startFrm <> endFrm: file.write("\n") - for o in range(NUMobjects): - - if Objects[o].getType() == "Mesh": - - obj = Objects[o] # Get the first selected object - objname = obj.name # The object name - - - OBJmesh = obj.getData() # Get the mesh data for the object - numfaces=len(OBJmesh.faces) # The number of faces in the object - numEachVert=len(OBJmesh.faces[0]) # The number of verts in each face - - #------------ - # Get the Material Colors - #------------ -# MATinfo = OBJmesh.getMaterials() -# -# if len(MATinfo) > 0: -# RGB=MATinfo[0].rgbCol -# R=int(RGB[0]*255) -# G=int(RGB[1]*255) -# B=int(RGB[2]*255) -# color=`R`+"."+`G`+"."+`B` -# print color -# else: -# color="100.100.100" - - objekat = [] - - objekat.append(0) - - for face in range(numfaces): - numvert = len(OBJmesh.faces[face]) - objekat.append(numvert) - objekat[0] += 1 - -# backface cutting - a = [] - a.append(OBJmesh.faces[face][0][0]) - a.append(OBJmesh.faces[face][0][1]) - a.append(OBJmesh.faces[face][0][2]) - a = RotatePoint(a[0], a[1], a[2], obj.RotX, obj.RotY, obj.RotZ) - a[0] += obj.LocX - camera.LocX - a[1] += obj.LocY - camera.LocY - a[2] += obj.LocZ - camera.LocZ - b = [] - b.append(OBJmesh.faces[face][1][0]) - b.append(OBJmesh.faces[face][1][1]) - b.append(OBJmesh.faces[face][1][2]) - b = RotatePoint(b[0], b[1], b[2], obj.RotX, obj.RotY, obj.RotZ) - b[0] += obj.LocX - camera.LocX - b[1] += obj.LocY - camera.LocY - b[2] += obj.LocZ - camera.LocZ - c = [] - c.append(OBJmesh.faces[face][numvert-1][0]) - c.append(OBJmesh.faces[face][numvert-1][1]) - c.append(OBJmesh.faces[face][numvert-1][2]) - c = RotatePoint(c[0], c[1], c[2], obj.RotX, obj.RotY, obj.RotZ) - c[0] += obj.LocX - camera.LocX - c[1] += obj.LocY - camera.LocY - c[2] += obj.LocZ - camera.LocZ - - norm = [0,0,0] - norm[0] = (b[1] - a[1])*(c[2] - a[2]) - (c[1] - a[1])*(b[2] - a[2]) - norm[1] = -((b[0] - a[0])*(c[2] - a[2]) - (c[0] - a[0])*(b[2] - a[2])) - norm[2] = (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1]) - - d = norm[0]*a[0] + norm[1]*a[1] + norm[2]*a[2] - - if d < 0: - file.write("\n") - if startFrm <> endFrm: - file.write("\n") - file.write("\n") - file.write("\n") - -#flatern() -#writesvg(objekat) -file.write("") -file.close() -print file -DrawProgressBar (1.0,"Finished.") -print "Finished\n" +def vectorize(filename): + """The vectorizing process is as follows: + + - Open the writer + - Render the scene + - Close the writer + + If you want to render an animation the second pass should be + repeated for any frame, and the frame number should be passed to the + renderer. + """ + writer = SVGVectorWriter(filename) + + writer.open() + + renderer = Renderer() + renderer.doRendering(writer) + + writer.close() + + +# Here the main +if __name__ == "__main__": + # with this trick we can run the script in batch mode + try: + Blender.Window.FileSelector (vectorize, 'Save SVG', "proba.svg") + Blender.Redraw() + except: + from Blender import Window + editmode = Window.EditMode() + if editmode: Window.EditMode(0) + + vectorize("proba.svg") + if editmode: Window.EditMode(1) + + +