From fa5f1ca735e5dca02162712c565d7ebe1e1a27bb Mon Sep 17 00:00:00 2001 From: OssiLehtinen Date: Thu, 13 Jul 2017 13:46:16 +0300 Subject: [PATCH 01/10] Update protocol_swiftpro.py Add the laser move protocol --- protocol_swiftpro.py | 1 + 1 file changed, 1 insertion(+) diff --git a/protocol_swiftpro.py b/protocol_swiftpro.py index 9f9ab81..600ce39 100644 --- a/protocol_swiftpro.py +++ b/protocol_swiftpro.py @@ -7,6 +7,7 @@ READY = "@1" OK = "OK" SET_POSITION = "G0 X{} Y{} Z{} F{}" +SET_POSITION_LASER = "G1 X{} Y{} Z{} F{}" SET_POSITION_RELATIVE = "G2204 X{} Y{} Z{} F{}" SIMULATION = "M2222 X{} Y{} Z{} P0" GET_FIRMWARE_VERSION = "P2203" From 643705527f3003097e7b240fd55ae60784640a0f Mon Sep 17 00:00:00 2001 From: OssiLehtinen Date: Thu, 13 Jul 2017 13:49:02 +0300 Subject: [PATCH 02/10] Update uArmRobot.py Add the gripper-method --- uArmRobot.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/uArmRobot.py b/uArmRobot.py index a17cb61..0d72387 100644 --- a/uArmRobot.py +++ b/uArmRobot.py @@ -100,6 +100,11 @@ def pump(self, state): cmd = protocol.SET_PUMP.format(int(state)) self.sendcmd(cmd,True) + def gripper(self, state): + self.pumping = state + cmd = protocol.SET_GRIPPER.format(int(state)) + self.sendcmd(cmd,True) + def mode(self, modeid): # 0= Normal # 1= Laser From e7d0548096cd0215b7cf703c4f23eb54125b5ff6 Mon Sep 17 00:00:00 2001 From: OssiLehtinen Date: Thu, 13 Jul 2017 13:49:55 +0300 Subject: [PATCH 03/10] Update uArmRobot.py Add the python version related checks. --- uArmRobot.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/uArmRobot.py b/uArmRobot.py index 0d72387..15eafed 100644 --- a/uArmRobot.py +++ b/uArmRobot.py @@ -9,7 +9,7 @@ import sys import math from math import pi - +import sys class robot: serid = 100 @@ -38,7 +38,7 @@ def connect(self): while (not Ready): line = self.ser.readline() if (self.debug): print (line) - if line.startswith("@5"): + if line.startswith(b"@5"): Ready = True self.connected = True if (self.debug): print ("Connected!") @@ -64,13 +64,20 @@ def sendcmd(self, cmnd, waitresponse): id = self.serid self.serid += 1 cmnd = "#{} {}".format(id,cmnd) - cmndString = bytes(cmnd + "\n") + if sys.version_info >= (3,0): + cmndString = bytes(cmnd + "\n", 'utf8') + else: + cmndString = bytes(cmnd + "\n") if (self.debug): print ("Serial send: {}".format(cmndString)) self.ser.write(cmndString) if (waitresponse): line = self.ser.readline() - while not line.startswith("$" + str(id)): - line = self.ser.readline() + if sys.version_info >= (3,0): + while not line.startswith(bytes("$" + str(id), 'utf8')): + line = self.ser.readline() + else: + while not line.startswith("$" + str(id)): + line = self.ser.readline() if (self.debug): print ("Response {}".format(line)) if (self.moving): self.moving = False From d5d646f96e72bba21b696ecd1d55bedc74f5b6a2 Mon Sep 17 00:00:00 2001 From: OssiLehtinen Date: Thu, 13 Jul 2017 13:52:39 +0300 Subject: [PATCH 04/10] Add files via upload Add the laser/svg/bitmap related extensions --- bird.svg | 24 ++++ bitmap_draw_example.py | 31 +++++ grid.py | 48 ++++++++ laser_draw_example.py | 33 ++++++ uArmLaserRobot.py | 251 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 387 insertions(+) create mode 100644 bird.svg create mode 100644 bitmap_draw_example.py create mode 100644 grid.py create mode 100644 laser_draw_example.py create mode 100644 uArmLaserRobot.py diff --git a/bird.svg b/bird.svg new file mode 100644 index 0000000..17a0fc8 --- /dev/null +++ b/bird.svg @@ -0,0 +1,24 @@ + + + + + + + + + + diff --git a/bitmap_draw_example.py b/bitmap_draw_example.py new file mode 100644 index 0000000..31dee18 --- /dev/null +++ b/bitmap_draw_example.py @@ -0,0 +1,31 @@ +import uArmLaserRobot + +mode = 1 + +#Configure Serial port +#serialport = "com3" # for windows +serialport = "/dev/ttyACM0" # for linux like system + +# Connect to uArm +myRobot = uArmLaserRobot.laserRobot(serialport) +myRobot.debug = True # Enable / Disable debug output on screen, by default disabled +myRobot.connect() +myRobot.mode(mode) # Set mode to Normal + + + +x_offset = 170 +height = 150 +draw_speed = 6000 +targetWidth = 25 +lineSpacing = .5 + + +myRobot.drawBitmap('skull.jpg', targetWidth, lineSpacing, x_offset, height, draw_speed) + + + +# Dock the arm before exit +myRobot.goto(225, 0, 150, 6000) +myRobot.goto(130, 0, 90, 6000) +myRobot.goto(97, 0, 30, 6000) diff --git a/grid.py b/grid.py new file mode 100644 index 0000000..a48c4b3 --- /dev/null +++ b/grid.py @@ -0,0 +1,48 @@ +# Script for drawing a grid with the laser for calibration purposes + +# Please, don't leave the arm unattended while operating the laser. + +import uArmLaserRobot + +mode = 1 + +#Configure Serial port +#serialport = "com3" # for windows +serialport = "/dev/ttyACM0" # for linux like system + +# Connect to uArm +myRobot = uArmLaserRobot.laserRobot(serialport) +myRobot.debug = True # Enable / Disable debug output on screen, by default disabled +myRobot.connect() +myRobot.mode(mode) # Set mode to Normal + + + +# Larger grid +#gridSizeX = 120 +#gridSizeY = 200 +#gridOffsetX = 140 + +# Smaller test grid +gridSizeX = 40 +gridSizeY = 40 +gridOffsetX = 180 + + +workingHeight = 150 +drawSpeed = 1000 + +# Horizontal lines +for i in range(int(gridOffsetX/10), int((gridOffsetX+gridSizeX)/10+1)): + print(i*10) + myRobot.goto(i*10, -gridSizeY/2, workingHeight, 6000) + myRobot.goto_laser(i*10, gridSizeY/2, workingHeight, drawSpeed) + myRobot.goto(i*10, gridSizeY/2, workingHeight, 6000) # Switch the laser off + +# Vertical lines +for i in range(int(-gridSizeY/20), int(gridSizeY/20+1)): + print(i*10) + myRobot.goto(gridOffsetX+gridSizeX, i*10, workingHeight, 6000) + myRobot.goto_laser(gridOffsetX, i*10, workingHeight, drawSpeed) + myRobot.goto(gridOffsetX, i*10, workingHeight, 6000) # Switch the laser off + diff --git a/laser_draw_example.py b/laser_draw_example.py new file mode 100644 index 0000000..92029a3 --- /dev/null +++ b/laser_draw_example.py @@ -0,0 +1,33 @@ +import uArmLaserRobot + +mode = 0 + +steps_per_seg = 10 +x_offset = 170 +height = -11 +draw_speed = 100 +targetWidth = 20 +lineSpacing = 1.0 + +#Configure Serial port +#serialport = "com3" # for windows +serialport = "/dev/ttyACM0" # for linux like system + +# Connect to uArm +myRobot = uArmLaserRobot.laserRobot(serialport) +myRobot.debug = True # Enable / Disable debug output on screen, by default disabled +myRobot.connect() +myRobot.mode(mode) # Set mode to Normal + +coords = myRobot.parseSVG('bird.svg', targetWidth, x_offset, steps_per_seg) + +myRobot.set_path_start(coords, height, mode) + +myRobot.drawPath(coords, draw_speed, height, mode) +#myRobot.fillSVG('bird.svg', targetWidth, lineSpacing, x_offset, height, draw_speed, mode) # The bird svg doesn't have fill atm, so this is a bit silly... +myRobot.loff() + +# Dock the arm before exit +myRobot.goto(225, 0, 150, 6000) +myRobot.goto(130, 0, 90, 6000) +myRobot.goto(97, 0, 30, 6000) diff --git a/uArmLaserRobot.py b/uArmLaserRobot.py new file mode 100644 index 0000000..ded74ba --- /dev/null +++ b/uArmLaserRobot.py @@ -0,0 +1,251 @@ +# Extensions to the uArm Swift Pro library by: Richard Garsthagen +# +# Extension to the authored by Ossi Lehtinen. +# The laser cuts holes to stuff. Use at your own risk and don't leave unattended. +# +# I'm most curious if you create something neat with this, so if you like, don't hesitate to drop me an email: ossi.lehtinen@gmail.com +# +# The svgo and convert binaries are external to Python. Meaning, you need to install imagemagick and node.js on your system (e.g., sudo apt-get install imagemagick npm) and the svgo command for node.js (npg install -g svgo) + + +import uArmRobot +import protocol_swiftpro as protocol +from svgpathtools import svg2paths2, wsvg +import numpy as np +import time +from PIL import Image +from subprocess import call +import tempfile +import platform + + +class laserRobot(uArmRobot.robot): + + delay_after_move = 0.0 + move_pen_lift = 3.0 + temp_folder = tempfile.mkdtemp() + + def goto_laser(self,x,y,z,speed): + self.moving = True + x = str(round(x, 2)) + y = str(round(y, 2)) + z = str(round(z, 2)) + s = str(round(speed, 2)) + cmd = protocol.SET_POSITION_LASER.format(x,y,z,s) + self.sendcmd(cmd, True) + + def loff(self): + self.goto(200,0,150,6000) + + def parseSVG(self, filename, targetWidth, xOffset, steps_per_seg): + + if(platform.system() == "Windows"): + call(["node", "C:/Users/oswald/AppData/Roaming/npm/node_modules/svgo/bin/svgo", filename, "-o", self.temp_folder + "/clean.svg"]) + else: + call(["svgo", filename, "-o", self.temp_folder + "/clean.svg"]) + + # Parse the path + paths, attributes, svg_attrs = svg2paths2(self.temp_folder + "/clean.svg") + + # Find the bounding box + xmin = 100000 + xmax = -10000 + ymin = 10000 + ymax = -10000 + + for i in range(len(paths)): + path = paths[i] + attribute = attributes[i] + # A crude check for wether a path should be drawn. Does it have a style defined? This caused trouble elsewhere... + for seg in path: + for p in range(steps_per_seg+1): + cp = seg.point(float(p)/float(steps_per_seg)) + cx = np.real(cp) + cy = np.imag(cp) + if(cx < xmin): xmin = cx + if(cy < ymin): ymin = cy + if(cx > xmax): xmax = cx + if(cy > ymax): ymax = cy + + + # The scaling factor to reach the targetWidth + scale = targetWidth/(xmax-xmin) + + # Transform the paths to lists of coordinates + coords = [] + + for i in range(len(paths)): + path = paths[i] + attribute = attributes[i] + if('stroke' in attribute or 'class' in attribute): + for seg in path: + segcoords = [] + for p in range(steps_per_seg+1): + cp = seg.point(float(p)/float(steps_per_seg)) + segcoords.append([scale*(np.real(cp)-xmin)+xOffset, scale*(np.imag(cp)-ymin) - scale*((ymax-ymin)/2.0)]) + coords.append(segcoords) + + return coords + + + + def set_path_start(self, coords, height, mode): + + move_lift = 0 + if(mode == 0): + move_lift = self.move_pen_lift + + self.goto(coords[0][0][0], coords[0][0][1], height+move_lift*2, 6000) + + if(mode == 1): + for i in range(0, 5): + self.goto_laser(coords[0][0][0], coords[0][0][1], height+move_lift*2, 6000) + #time.sleep(0.0001) + self.goto(coords[0][0][0], coords[0][0][1], height+move_lift*2, 6000) + time.sleep(1.0) + + + def drawPath(self, coords, draw_speed, height, mode): + + # Lift the pen if using one + move_lift = 0 + if(mode == 0): + move_lift = self.move_pen_lift + + + # The starting point + self.goto(coords[0][0][0], coords[0][0][1], height+move_lift*2, 6000) + + lastCoord = coords[0][0] + + epsilon = 0.1 + #if(abs(seg[0][0] - lastCoord[0]) > epsilon and abs(seg[0][1] - lastCoord[1]) > epsilon): + + for seg in coords: + if(abs(seg[0][0] - lastCoord[0]) > epsilon and abs(seg[0][1] - lastCoord[1]) > epsilon): + self.goto(lastCoord[0], lastCoord[1], height+move_lift, 6000) + self.goto(seg[0][0], seg[0][1], height+move_lift, 6000) + # Not sure if this helps with anything, but the idea is to give the arm a moment after a long transition + time.sleep(0.15) + for p in seg: + self.goto_laser(p[0], p[1], height, draw_speed) + lastCoord = p + + + # Back to the starting point (and turn the laser off) + self.goto(lastCoord[0], lastCoord[1], height+move_lift*2, 6000) + self.goto(coords[0][0][0], coords[0][0][1], height+move_lift*2, 6000) + + def fillSVG(self, filename, targetWidth, lineSpacing, xOffset, height, draw_speed, mode): + + # Convert the svg to bitmap + concmd = "convert" + if(platform.system() == "Windows"): + concmd = "magick" + + call([concmd, "-density", "1000", "-background", "white", "-alpha", "remove", filename, self.temp_folder + "/clean.png"]) + + self.drawBitmap(self.temp_folder + "/clean.png", targetWidth, lineSpacing, xOffset, height, draw_speed, mode) + + def drawBitmap(self, imagepath, printSizeX, lineSpacing, xOffset, height, draw_speed, mode): + + # Lift the pen if using one + move_lift = 0 + if(mode == 0): + move_lift = self.move_pen_lift + + + im = Image.open(imagepath) + + smallImSizeX = printSizeX*2 # Determines the resolution of the dithering + + scale = float(smallImSizeX)/float(im.size[0]) + + im2 = im.resize( (int(scale*im.size[0]), int(scale*im.size[1])) ) + im = im2.convert("1") + + imageSizeX = im.size[0] + imageSizeY = im.size[1] + + scale = float(printSizeX)/float(imageSizeX) + + printSizeY = imageSizeY*scale + yOffset = -printSizeY/2 + + # Y-direction + for j in range(int(yOffset/lineSpacing), -int(yOffset/lineSpacing)+1): + + cY = j*lineSpacing + print(cY) + + cp = 0 + nextp = cp+1 + cpVal = im.getpixel((cp, int((cY-yOffset)/scale))) + self.goto(xOffset, cY, height+move_lift, 6000) + + prevX = xOffset + prevY = cY + + while(cp < imageSizeX-1): + + while(im.getpixel((nextp, int((cY-yOffset)/scale)))==cpVal and nextp < imageSizeX-1): + nextp = nextp + 1 + + if(cpVal == 0): + self.goto(prevX, prevY, height, draw_speed) + self.goto_laser(xOffset+nextp*scale, cY, height, draw_speed) + self.goto(xOffset+nextp*scale, cY, height+move_lift, draw_speed) + else: + self.goto(xOffset+nextp*scale, cY, height+move_lift, 6000) + + prevX = xOffset+nextp*scale + prevY = cY + + self.goto(prevX, prevY, height+move_lift, 6000) + cp = nextp + cpVal = im.getpixel((cp, int((cY-yOffset)/scale))) + + + # X-direction + for j in range(int(xOffset/lineSpacing), int((xOffset+printSizeX)/lineSpacing)): + + cX = j*lineSpacing + print(cX) + + cp = 0 + nextp = cp+1 + cpVal = im.getpixel((int((cX-xOffset)/scale), cp)) + self.goto(cX, yOffset, height+move_lift, 6000) + prevX = cX + prevY = yOffset + + while(cp < imageSizeY-1): + + while(im.getpixel((int((cX-xOffset)/scale), nextp))==cpVal and nextp < imageSizeY-1): + nextp = nextp + 1 + + if(cpVal == 0): + self.goto(prevX, prevY, height, draw_speed) + self.goto_laser(cX, yOffset+nextp*scale, height, draw_speed) + self.goto(cX, yOffset+nextp*scale, height+move_lift, draw_speed) + else: + self.goto(cX, yOffset+nextp*scale, height+move_lift, 6000) + + prevX = cX + prevY = yOffset+nextp*scale + + self.goto(prevX, prevY, height+move_lift, 6000) + cp = nextp + cpVal = im.getpixel((int((cX-xOffset)/scale), cp)) + + + + + + self.loff() + + + + + + From 2310669def62485fe15c645c4d93b3c2be98b075 Mon Sep 17 00:00:00 2001 From: OssiLehtinen Date: Thu, 13 Jul 2017 13:56:39 +0300 Subject: [PATCH 05/10] Add files via upload --- uArmLaser_windows_guide.txt | 93 +++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 uArmLaser_windows_guide.txt diff --git a/uArmLaser_windows_guide.txt b/uArmLaser_windows_guide.txt new file mode 100644 index 0000000..69fe622 --- /dev/null +++ b/uArmLaser_windows_guide.txt @@ -0,0 +1,93 @@ + +Download and install Python: + +https://www.python.org/downloads/windows/ + +I picked the 3.6.2 x86-64 version. + +Set your 'path' environment variable to include your python path: + +1. Start menu -> Edit environment variables for your account +2. Double click on Path +3. Add two new lines with: + C:\Users\YOUR_USER_NAME\AppData\Local\Programs\Python\Python36 + C:\Users\YOUR_USER_NAME\AppData\Local\Programs\Python\Python36\Scripts + + Note, change the user name to yours, and also otherwise check that the path is where you have python installed. + + + + + +Install python extensions: + +1. Start command prompt: Start-menu -> Command prompt +2. Run commands: + pip install pyserial + pip install svgpathtools + pip install svgwrite + pip install numpy + pip install pillow + + + + +Download and install imagemagick: + +https://www.imagemagick.org/script/binary-releases.php + +I picked version ImageMagick-7.0.6-0-Q16-x64-dll.exe. Default options. + + + +Download and install node.js: + +https://nodejs.org/en/download/ + +I went for the 64-bit Windows installer. Default options. + + + +Install the svgo extension to node.js: +Launch command prompt again: + npm install -g svgo + + + + +Download the py-scripts from +https://github.com/OssiLehtinen/uArm + +Click 'Clone or Download' -> Download zip +Open the downloaded archive and copy the contents somewhere such as C:/Users/YOUR_USER_NAME/uArm/ + + + + +Make svgo discoverable from python. Not pretty at all, so if someone has better ideas... + +1. Open uArmLaserRobot.py in an text editor. This is where you jsut copied the stuff from GitHub to. Right click on the file and go to Edit with IDLE +2. Locate lines with + + call(["node", "C:/Users/oswald/AppData/Roaming/npm/node_modules/svgo/bin/svgo", filename, "-o", self.temp_folder + "/clean.svg"]) + +3. Modify the C:/Users/.../svgo/bin/svgo path to where the svgo binary is located on your system. Probably just changing the username from oswald to yours is enough for most people. +4. Save the file + + + +Test run: + +1. open laser_draw_example.py in IDLE +2. Run menu -> Run Module + +If you get error messages about ports or what not, you need to find the correct serial port for your device (set at "com4" atm) +1. Start menu -> devices +2. Locate Arduino Mega 2560 and see what com-port is given on that line and edit the py-script accordingly. + + +To draw other images, set the filename from "bird.svg" to what you wish to draw and go from there. You can also play around with the variables set in the script, such as x_offset and what not. + + + +Good luck and have fun ;) From 575e3f36fb9a47f9d9c2bef41e56d9d4800c537e Mon Sep 17 00:00:00 2001 From: OssiLehtinen Date: Thu, 13 Jul 2017 13:57:25 +0300 Subject: [PATCH 06/10] Update README.md --- README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/README.md b/README.md index a1f7da8..0792d5e 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,23 @@ + +# uArmProPython +uArm Swift Pro robot arm Python library and example code snippets. Based on a fork from Richar Gasthaagen's repository + +Note 1: The svgo and convert binaries are external to Python. Meaning, you need to install imagemagick and node.js on your system (e.g., sudo apt-get install imagemagick npm) and the svgo command for node.js (npg install -g svgo). These functionalities were tested on Kubuntu Linux 16.04. Installation instructions for Windows can be found in the file uArmLaser_windows_guide.txt. + +Please, be careful with the laser. Don't blind yourself or burn your house down :) + +If you create something cool with these functions, please show me too :) Primarily I'm just curious, and also seeing other people's work might give ideas on how to improve the code. So, don't hesitate to drop me an email: ossi.lehtinen@gmail.com + + + + + + + + + +The original readme: + # uArmProPython Python Library for the uArm swift Pro robot arm From a235ff28e141d18f913909d5d94205a14ff3a9d6 Mon Sep 17 00:00:00 2001 From: OssiLehtinen Date: Thu, 13 Jul 2017 14:04:52 +0300 Subject: [PATCH 07/10] Update laser_draw_example.py Corrected some parameters --- laser_draw_example.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/laser_draw_example.py b/laser_draw_example.py index 92029a3..ee9c97b 100644 --- a/laser_draw_example.py +++ b/laser_draw_example.py @@ -1,10 +1,10 @@ import uArmLaserRobot -mode = 0 +mode = 1 steps_per_seg = 10 x_offset = 170 -height = -11 +height = 150 draw_speed = 100 targetWidth = 20 lineSpacing = 1.0 From 0b5cb7ee1a57b194cf6ca6609ae10859e67c4064 Mon Sep 17 00:00:00 2001 From: OssiLehtinen Date: Thu, 13 Jul 2017 15:27:30 +0300 Subject: [PATCH 08/10] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0792d5e..e42e2f7 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # uArmProPython -uArm Swift Pro robot arm Python library and example code snippets. Based on a fork from Richar Gasthaagen's repository +uArm Swift Pro robot arm Python library and example code snippets. Based on a fork from Richar Gasthaagen's repository. Note 1: The svgo and convert binaries are external to Python. Meaning, you need to install imagemagick and node.js on your system (e.g., sudo apt-get install imagemagick npm) and the svgo command for node.js (npg install -g svgo). These functionalities were tested on Kubuntu Linux 16.04. Installation instructions for Windows can be found in the file uArmLaser_windows_guide.txt. From 9e30fb7562cc89152a0b3f140b13d5cf483f730f Mon Sep 17 00:00:00 2001 From: OssiLehtinen Date: Thu, 13 Jul 2017 15:34:57 +0300 Subject: [PATCH 09/10] Delete svg_example.py --- svg_example.py | 175 ------------------------------------------------- 1 file changed, 175 deletions(-) delete mode 100644 svg_example.py diff --git a/svg_example.py b/svg_example.py deleted file mode 100644 index d0c343a..0000000 --- a/svg_example.py +++ /dev/null @@ -1,175 +0,0 @@ -# Example made by OssiLehtinen -# - -from svgpathtools import svg2paths, wsvg -import numpy as np - -import uArmRobot -import time - - -#Configure Serial Port -#serialport = "com3" # for windows -serialport = "/dev/ttyACM0" # for linux like system - -# Connect to uArm -myRobot = uArmRobot.robot(serialport) -myRobot.debug = True # Enable / Disable debug output on screen, by default disabled -myRobot.connect() -myRobot.mode(1) # Set mode to Normal - -# Read in the svg -paths, attributes = svg2paths('drawing.svg') - -scale = .25 -steps_per_seg = 3 -coords = [] -x_offset = 200 -height = 90 -draw_speed = 1000 - -# Convert the paths to a list of coordinates -for i in range(len(paths)): - path = paths[i] - attribute = attributes[i] - # A crude check for whether a path should be drawn. Does it have a style defined? - if 'style' in attribute: - for seg in path: - segcoords = [] - for p in range(steps_per_seg+1): - cp = seg.point(float(p)/float(steps_per_seg)) - segcoords.append([-np.real(cp)*scale+x_offset, np.imag(cp)*scale]) - coords.append(segcoords) - - - - -# The starting point -myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000) - - -for seg in coords: - myRobot.goto(seg[0][0], seg[0][1], height, 6000) - time.sleep(0.15) - for p in seg: - myRobot.goto_laser(p[0], p[1], height, draw_speed) - - - -# Back to the starting point (and turn the laser off) -myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000) - -I used Inkscape to produce some test-files and everything seemed to work fine. One thing to do, is convert text to paths in Inkscape before saving. - -Surely not an issue-free solution, but perhaps a starting point for something more advanced. One noticeable thing is that drawing the line segments is a bit stuttery, not sure how to improve this. - -Cheers, -Ossi - @OssiLehtinen - -OssiLehtinen commented 10 minutes ago -The same stuff after some modifications: - -Allow targeting set width of the image. -Allow lifting up the pen between paths, if such a pen is used. -from svgpathtools import svg2paths, wsvg -import numpy as np - -import uArmRobot -import time - -mode = 1 - -#Configure Serial port -#serialport = "com3" # for windows -serialport = "/dev/ttyACM0" # for linux like system - -# Connect to uArm -myRobot = uArmRobot.robot(serialport) -myRobot.debug = True # Enable / Disable debug output on screen, by default disabled -myRobot.connect() -myRobot.mode(mode) # Set mode to Normal - - -steps_per_seg = 10 -x_offset = 140 -height = 150 -draw_speed = 2500 - -targetWidth = 160 - - -# Parse the path -paths, attributes = svg2paths('008.svg') - - -# Find the bounding box -xmin = 100000 -xmax = -10000 -ymin = 10000 -ymax = -10000 - -for i in range(len(paths)): - path = paths[i] - attribute = attributes[i] - # A crude check for wether a path should be drawn. Does it have a style defined? This caused trouble elsewhere... - if 'style' in attribute: - for seg in path: - for p in range(steps_per_seg+1): - cp = seg.point(float(p)/float(steps_per_seg)) - cx = np.real(cp) - cy = np.imag(cp) - if(cx < xmin): xmin = cx - if(cy < ymin): ymin = cy - if(cx > xmax): xmax = cx - if(cy > ymax): ymax = cy - - -# The scaling factor to reach the targetWidth -scale = targetWidth/(xmax-xmin) - -# Transform the paths to lists of coordinates -coords = [] - -for i in range(len(paths)): - path = paths[i] - attribute = attributes[i] - # A crude check for wether a path should be drawn. Does it have a style defined? - if 'style' in attribute: - for seg in path: - segcoords = [] - for p in range(steps_per_seg+1): - cp = seg.point(float(p)/float(steps_per_seg)) - segcoords.append([scale*(np.real(cp)-xmin)+x_offset, scale*(np.imag(cp)-ymin) - scale*((ymax-ymin)/2.0)]) - coords.append(segcoords) - - - -# Lift the pen if using one -move_lift = 0 -if(mode == 0): - move_lift = 5 - -# The starting point -myRobot.goto(coords[0][0][0], coords[0][0][1], height+move_lift*2, 6000) - -lastCoord = coords[0][0] - -epsilon = 0.1 - - - if(abs(seg[0][0] - lastCoord[0]) > epsilon and abs(seg[0][1] - lastCoord[1]) > epsilon): - -for seg in coords: - if(abs(seg[0][0] - lastCoord[0]) > epsilon and abs(seg[0][1] - lastCoord[1]) > epsilon): - myRobot.goto(lastCoord[0], lastCoord[1], height+move_lift, 6000) - myRobot.goto(seg[0][0], seg[0][1], height+move_lift, 6000) - # Not sure if this helps with anything, but the idea is to give the arm a moment after a long transition - time.sleep(0.15) - for p in seg: - myRobot.goto_laser(p[0], p[1], height, draw_speed) - lastCoord = p - - -# Back to the starting point (and turn the laser off) -myRobot.goto(coords[0][0][0], coords[0][0][1], height+move_lift*2, 6000) From b1fb604f8f07cb802d934ffae455cdaf8096396a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Frank=20Dr=C3=A4ger?= Date: Tue, 25 Jul 2017 00:00:21 +0200 Subject: [PATCH 10/10] Check for Line elements in parseSVG paths I was wondering if it makes sense to cut a line into 'steps_per_seg' subsegments. This significantly reduces the number of G-Code commands sent to the robot. I came across this as I am working on my Inkscape plugin. But I still have some distance to go. If you are interested: https://github.com/fdraeger/uArmSwiftPro_InkscapePlugin Keep up the good work! --- uArmLaserRobot.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/uArmLaserRobot.py b/uArmLaserRobot.py index ded74ba..fe6046d 100644 --- a/uArmLaserRobot.py +++ b/uArmLaserRobot.py @@ -11,6 +11,7 @@ import uArmRobot import protocol_swiftpro as protocol from svgpathtools import svg2paths2, wsvg +from svgpathtools.path import Line import numpy as np import time from PIL import Image @@ -80,9 +81,16 @@ def parseSVG(self, filename, targetWidth, xOffset, steps_per_seg): if('stroke' in attribute or 'class' in attribute): for seg in path: segcoords = [] - for p in range(steps_per_seg+1): - cp = seg.point(float(p)/float(steps_per_seg)) - segcoords.append([scale*(np.real(cp)-xmin)+xOffset, scale*(np.imag(cp)-ymin) - scale*((ymax-ymin)/2.0)]) + # no need to create segments, if we already have a Line. + if isinstance(seg, Line): + cp = seg.start + segcoords.append([scale*(np.real(cp)-xmin)+xOffset, scale*(np.imag(cp)-ymin) - scale*((ymax-ymin)/2.0)]) + cp = seg.end + segcoords.append([scale*(np.real(cp)-xmin)+xOffset, scale*(np.imag(cp)-ymin) - scale*((ymax-ymin)/2.0)]) + else: + for p in range(steps_per_seg+1): + cp = seg.point(float(p)/float(steps_per_seg)) + segcoords.append([scale*(np.real(cp)-xmin)+xOffset, scale*(np.imag(cp)-ymin) - scale*((ymax-ymin)/2.0)]) coords.append(segcoords) return coords