赞
踩
提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
提示:本博文没有图片,原博文有图片
提示:如有侵权,请原作者留言
原文链接:https://blog.csdn.net/weixin_51583957/article/details/123958565
机器视觉巡线处理是参考openmv官方代码
Openmv官网源代码:book. openmv.cc/project/follow-lines.html
根据官网视频及教程将源码注入openmv中。
小车巡的是红线,所以颜色阈值要更改。
1、在文件示例中打开helloworld.py。
2、打开工具/机器视觉/阈值编辑器/缓冲区。
3、将我们需要寻迹的黑线调至全白,背景全黑即可,环境光亮度不同阈值也会不同,调好后将LAB阈值复制粘贴到openmv上(自己的代码的“THRESHOLD=()”命令中)就行。
4、对于openmv来说,引脚是已经配置好了,将小车和硬件搭好就可以驱动,但是我们是想利用STM32来驱动,所以要将数据通过串口发送给STM32。关于openmv底层驱动原理可以去官网找底层函数学习。
1、重点在于如何将openmv与stm32进行串口通信,只要完成好这一步,后面的调试都会很容易。
2、经过调试,小车在黑线左右不同边的时候openmv算出来的output数值是有正负数的(例如右正左负)
3、因为STM32不好处理负数,所以要将openmv计算出的负数值取绝对值+100,stm32通过判断其值是否大于100来判断其是否为负数。
1、Stm32配置好串口波特率,与openmv相匹配,串口代码是移植正点原子的,这里的话要注意正点原子的字符接收必须是以0x0d、0x0a结尾(必须以回车换行为结束符),否则接收不了,所以要在openmv发送OUTPUT后加上发送回车换行。
2、STM32接收到串口数据后存储到了USART_RX_BUF数组中,然后再定义一个数组source将BUF数据转移过去。
3、字符串转整形是C语言知识,网上百度也能找到相关代码,自己看着理解。
4、最后只需要将返回的sum放入电机控速的占空比中就可以修改速度了。
5、每辆车的电机转速、总量、电量等等因素都不一样,所以要调好巡线的话要在openmv中进行调整PID。
6、调节PID重点调节rhoPID的P参数就行了,下面的西塔PID可以不用更改。
代码如下(示例):
main.py(只用来识别红线,和pid.py搭配使用)
THRESHOLD = (42, 95, -128, 109, -124, 127) import sensor, image, time, os, tf,math from pyb import UART from pyb import LED from pid import PID rho_pid = PID(p=0.4, i=0) theta_pid = PID(p=0.001, i=0) sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQQVGA) sensor.set_windowing((320, 240)) sensor.skip_frames(time=2000) threshold_index = 0 thresholds = [(70, 4, 18, 127, -125, 127), (30, 100, -64, -8, -32, 32), (0, 30, 0, 64, -128, 0)] uart = UART(3, 115200) LED(1).on() LED(2).on() LED(3).on() crosstime=0 clock = time.clock() roi=(0,0,0,0) xun=0 flag=5 num=0 def xunxian(xun): if(xun==6): img = sensor.snapshot().binary([THRESHOLD]) line = img.get_regression([(100,100)], robust = True) if (line): rho_err = abs(line.rho())-img.width()/2 if line.theta()>90: theta_err = line.theta()-180 else: theta_err = line.theta() img.draw_line(line.line(), color = 127) if line.magnitude()>8: rho_output = rho_pid.get_pid(rho_err,1) theta_output = theta_pid.get_pid(theta_err,1) output = rho_output+theta_output print(output) while(True): clock.tick() if(flag==5): sensor.set_pixformat(sensor.RGB565) img = sensor.snapshot().lens_corr(1.8) found=0 for blob in img.find_blobs([thresholds[threshold_index]], pixels_threshold=200, area_threshold=200, merge=True): if blob.elongation() > 0.5: img.draw_edges(blob.min_corners(), color=(255,0,0)) img.draw_line(blob.major_axis_line(), color=(0,255,0)) img.draw_line(blob.minor_axis_line(), color=(0,0,255)) img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) found=1 if blob.w()<40: xunxian(6) else: output_str="[%3.d,%3.d]" % (0,1000000000000000000) uart.write(output_str) print(output_str) if found==0: output_str="[%3.d,%3.d]" % (0,10) uart.write(output_str) print(output_str)
pid.py
from pyb import millis from math import pi, isnan class PID: _kp = _ki = _kd = _integrator = _imax = 0 _last_error = _last_derivative = _last_t = 0 _RC = 1/(2 * pi * 20) def __init__(self, p=0, i=0, d=0, imax=0): self._kp = float(p) self._ki = float(i) self._kd = float(d) self._imax = abs(imax) self._last_derivative = float('nan') def get_pid(self, error, scaler): tnow = millis() dt = tnow - self._last_t output = 0 if self._last_t == 0 or dt > 1000: dt = 0 self.reset_I() self._last_t = tnow delta_time = float(dt) / float(1000) output += error * self._kp if abs(self._kd) > 0 and dt > 0: if isnan(self._last_derivative): derivative = 0 self._last_derivative = 0 else: derivative = (error - self._last_error) / delta_time derivative = self._last_derivative + \ ((delta_time / (self._RC + delta_time)) * \ (derivative - self._last_derivative)) self._last_error = error self._last_derivative = derivative output += self._kd * derivative output *= scaler if abs(self._ki) > 0 and dt > 0: self._integrator += (error * self._ki) * scaler * delta_time if self._integrator < -self._imax: self._integrator = -self._imax elif self._integrator > self._imax: self._integrator = self._imax output += self._integrator return output def reset_I(self): self._integrator = 0 self._last_derivative = float('nan')
helloworld_1.py
# Edge Impulse - OpenMV Image Classification Example THRESHOLD = (66, 20, 25, 67, 14, 52) # Grayscale threshold for dark things... import sensor, image, time, os, tf,math from pyb import UART from pyb import LED from pid import PID rho_pid = PID(p=0.4, i=0) theta_pid = PID(p=0.001, i=0) sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QQQVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((320, 240)) # Set 240x240 window. sensor.skip_frames(time=2000) # Let the camera adjust. threshold_index = 0 # 0 for red, 1 for green, 2 for blue # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green/blue things. You may wish to tune them... thresholds = [(70, 4, 18, 127, -125, 127), # generic_red_thresholds (30, 100, -64, -8, -32, 32), # generic_green_thresholds (0, 30, 0, 64, -128, 0)] # generic_blue_thresholds uart = UART(3, 115200) LED(1).on() LED(2).on() LED(3).on() crosstime=0 clock = time.clock() roi=(0,0,0,0) xun=0 flag=5#4识别数字 5巡线 6匹配数字 1继续寻找 0找到数字 num=0 #第一个数据判断左右 0直行 1左拐 2右拐 def xunxian(xun): if(xun==6): img = sensor.snapshot().binary([THRESHOLD]) line = img.get_regression([(100,100)], robust = True) if (line): rho_err = abs(line.rho())-img.width()/2 if line.theta()>90: theta_err = line.theta()-180 else: theta_err = line.theta() img.draw_line(line.line(), color = 127) #print(rho_err,line.magnitude(),rho_err) if line.magnitude()>8: #if -40<b_err<40 and -30<t_err<30: rho_output = rho_pid.get_pid(rho_err,1) theta_output = theta_pid.get_pid(theta_err,1) output = rho_output+theta_output if(output<0): output = abs(output) + 100 OUTPUT = str(round(output)) uart.write(OUTPUT) uart.write('\r\n') print(OUTPUT) while(True): clock.tick() # size = uart.any(); # if size != 0: # command = " " # command = uart.read() # flag = len(str(command)) #size = 0 #print(flag) if(flag==5): sensor.set_pixformat(sensor.RGB565) img = sensor.snapshot().lens_corr(1.8) #img = sensor.snapshot().binary([THRESHOLD]) found=0 for blob in img.find_blobs([thresholds[threshold_index]], pixels_threshold=200, area_threshold=200, merge=True): # These values depend on the blob not being circular - otherwise they will be shaky. if blob.elongation() > 0.5: img.draw_edges(blob.min_corners(), color=(255,0,0)) img.draw_line(blob.major_axis_line(), color=(0,255,0)) img.draw_line(blob.minor_axis_line(), color=(0,0,255)) # These values are stable all the time. img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) # Note - the blob rotation is unique to 0-180 only. img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) #print(blob.corners()) found=1 #找到红线 if blob.w()<40: #if xun=0: #output_str="[%3.d,%3.d]" % (0,xun) #巡线 xunxian(6) else: output_str=50000 #十字 切换至数字寻找数字 OUTPUT = str(round(output_str)) uart.write(OUTPUT) uart.write('\r\n') print(OUTPUT) print('Z') #print(blob.cx(),blob.w(),blob.rotation_deg()) if found==0: output_str=10000 #十字 切换至数字寻找数字 OUTPUT = str(round(output_str)) uart.write(OUTPUT) uart.write('\r\n') print(OUTPUT) print('T')
串口接收,正点原子有教程,需要将接受到的source中的数据,按照字符串转int函数进行转化。
也就是这段代码:
提示:原博客代码包:
文件链接:
Bai度网盘
链接:https://pan.baidu.com/s/1zNNBOC183Lvu9ocPBAdu9w
提取码:5642
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。