赞
踩
购买硬件之后在网卡后面有个短路帽要加上,否则无法开机。
nvidia jetson nano系统安装在SD卡中,请预先准备一个32G的SD卡
系统安装步骤请参考官网
Getting Started With Jetson Nano Developer Kit | NVIDIA Developer
注意:烧录完成会弹出好多个窗口,请求格式化,此处一定要点 取消 不要格式化,因为这是软件在为系统分盘。
命令行:opencv_version
输出:4.1.1
命令行:nvcc -V
输出:bash: nvcc command not found
解决:驱动其实已经安装了,我们需要加入环境变量
sudo vim ~./bashrc
或者也可以直接到本地电脑的Home
文件夹中找到.bashrc
文件,直接打开文件,在文件的最后位置添加上三行命令。(若进到Home
文件夹没看有.bashrc
文件,则点击一下Ctrl + H
就可以显示出.bashrc
)
在文件最后添加
export PATH=/usr/local/cuda-10.2/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
export CUDA_HOME=/usr/local/cuda
设置完成后可以正常看到nvcc -V 的信息
- sudo apt-get install git cmake
- sudo apt-get install python3-dev
- sudo apt-get install libhdf5-serial-dev hdf5-tools
- sudo apt-get install libatlas-base-dev gfortran
- sudo apt-get install python3-pip
- sudo -H pip3 install -U jetson-stats
- sudo systemctl restart jetson_stats.service
- reboot
- jtop
sudo apt-get update
sudo apt-get full-upgrade
sudo add-apt-repository "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports/ xenial main multiverse restricted universe"
sudo apt update
sudo vim ~/.bashrc
export CUDA_HOME=/usr/local/cuda-10.2
export LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:$LD_LIBRARY_PATH
export PATH=/usr/local/cuda-10.2/bin:$PATH
source ~/.bashrc
# test
nvcc -V
sudo apt-get install build-essential
sudo apt-get install cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo apt-get install python3-dev python3-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev
sudo apt-get install pkg-config
sudo apt-get install opencv3-pip
unzip opencv4.6-contrib-xfture
- build> cmake
- -D CMAKE_INSTALL_PREFIX=/usr/local
- -D CMAKE_BUILD_TYPE=Release
- -D OPENCV_EXTRA_MODULES_PATH=../opencv_contrib/modules
- -D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-10.2
- -D CUDA_ARCH_BIN=5.3
- -D CUDA_ARCH_PTX=""
- -D BUILD_PNG=OFF
- -D BUILD_TIFF=OFF
- -D BUILD_TBB=OFF
- -D BUILD_JPEG=OFF
- -D BUILD_JASPER=OFF
- -D BUILD_ZLIB=OFF
- -D BUILD_EXAMPLES=ON
- -D BUILD_opencv_java=OFF
- -D BUILD_opencv_python2=OFF
- -D BUILD_opencv_python3=ON
- -D ENABLE_PRECOMPILED_HEADERS=OFF
- -D WITH_OPENCL=OFF
- -D WITH_OPENMP=OFF
- -D WITH_FFMPEG=ON
- -D WITH_GSTREAMER=OFF
- -D WITH_GSTREAMER_0_10=OFF
- -D WITH_CUDA=ON
- -D WITH_GTK=ON
- -D WITH_VTK=OFF
- -D WITH_TBB=ON
- -D PYTHON_DEFAULT_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
- -D PYTHON3_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
- -D PYTHON3_NUMPY_INCLUDE_DIRS=$(python3 -c "import numpy; print (numpy.get_include())")
- -D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")
- -D WITH_1394=OFF
- -D WITH_OPENEXR=OFF
- -D INSTALL_C_EXAMPLES=ON -D INSTALL_TESTS=OFF
- ..
- build> make -j4
- build> sudo make install
- build> sudo python3 setup.py install
if "sudo python3 setup.py install " error :
pip3 install setuptools
pip3 install --upgrade setuptools
pip3 install --upgrade pip
do sudo python3 setup.py install again!
pip3 install Pillow
pip3 install pygame -i https://pypi.tuna.tsinghua.edu.cn/simple/
- CameraCall.py
-
- # encoding: utf-8
- import cv2 as cv
- import subprocess
-
-
- class CameraCallTool(object):
- cap = None
-
-
- def __init__(self, width, height, CamIndex):
- print(f'Init Camera Tool!')
- if CamIndex == -1:
- self.CallCameraOnBoard(width, height)
- elif CamIndex == 0:
- self.CallCameraOnCSI(width, height)
- else:
- print('no usb camera!')
-
- def CallCameraOnCSI(self, width, height):
- self.cap = cv.VideoCapture(0) #设置摄像头 0是默认的摄像头 如果你有多个摄像头的话呢,可以设置1,2,3....
- self.cap.set(cv.CAP_PROP_FRAME_WIDTH, width)
- self.cap.set(cv.CAP_PROP_FRAME_HEIGHT, height)
- self.cap.set(cv.CAP_PROP_EXPOSURE, 0.1)
-
-
- def Test(self):
- print('CameraCallTool Test')
-
-
- def CameraStatus(self):
- return self.cap.isOpened()
-
-
- def CallCameraOnBoard(self, width, height):
- gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
- if 'nvcamerasrc' in gst_elements:
- # On versions of L4T prior to 28.1, add 'flip-method=2' into gst_str
- gst_str = ('nvcamerasrc ! '
- 'video/x-raw(memory:NVMM), '
- 'width=(int)2592, height=(int)1458, '
- 'format=(string)I420, framerate=(fraction)30/1 ! '
- 'nvvidconv ! '
- 'video/x-raw, width=(int){}, height=(int){}, '
- 'format=(string)BGRx ! '
- 'videoconvert ! appsink').format(width, height)
- elif 'nvarguscamerasrc' in gst_elements:
- gst_str = ('nvarguscamerasrc ! '
- 'video/x-raw(memory:NVMM), '
- 'width=(int)1920, height=(int)1080, '
- 'format=(string)NV12, framerate=(fraction)30/1 ! '
- 'nvvidconv flip-method=2 ! '
- 'video/x-raw, width=(int){}, height=(int){}, '
- 'format=(string)BGRx ! '
- 'videoconvert ! appsink').format(width, height)
- else:
- raise RuntimeError('onboard camera source not found!')
- # return cv.VideoCapture(gst_str, cv.CAP_GSTREAMER)
- self.cap = cv.VideoCapture(gst_str, cv.CAP_GSTREAMER)
-
-
- def GetOneFrame(self):
- if self.cap.isOpened():
- ret, frame = self.cap.read()
- if ret is True:
- # 摄像头是和人对立的,将图像左右调换回来正常显示。
- frame = cv.flip(frame, 1)
- frame = cv.flip(frame, 0)
- return frame
- else:
- return None
- else:
- return None
-
-
- def __del__(self):
- if self.cap is not None:
- self.cap.release()
- print('CameraCallTool del')
- DnnClass.py
-
- # encoding: utf-8
- import numpy as np
- import cv2 as cv
- import os
- import time
-
-
- class MyDnn(object):
- # YOLO文件路径 G:\vs2017Project\yolov2-tiny-voc
- yolo_dir = './yolov3-tiny'
- # 过滤弱检测的最小概率
- CONFIDENCE = 0.5
- # 非最大值抑制阈值
- THRESHOLD = 0.4
-
-
- def __init__(self):
- print('import Dnn Tool Calss!')
- # YOLO文件路径
- self.yolo_dir_name = self.yolo_dir
- # 权重文件
- self.weightsPath = os.path.join(self.yolo_dir, 'yolov3.weights')
- # 配置文件
- self.configPath = os.path.join(self.yolo_dir, 'yolov3.cfg')
- # label名称
- self.labelsPath = os.path.join(self.yolo_dir_name, 'coco.names')
- # 加载网络、配置权重
- self.net = cv.dnn.readNetFromDarknet(self.configPath, self.weightsPath)
- # intel gpu
- # self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
- # self.net.setPreferableTarget(cv.dnn.DNN_TARGET_OPENCL)
- # cpu
- # self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
- # self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
- # nvidia gpu
- self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
- self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
- print("[INFO] loading YOLO from disk...") # # 可以打印下信息
-
-
- def importTest(self):
- print('dnn import test!')
-
-
- def DecodeImage(self, img):
- start = time.time()
- # 加载图片、转为blob格式、送入网络输入层
- # img = imagePath
- # net需要的输入是blob格式的,用blobFromImage这个函数来转格式
- blobImg = cv.dnn.blobFromImage(img, 1.0/255.0, (416, 416), None, True, False)
- # 调用setInput函数将图片送入输入层
- self.net.setInput(blobImg)
- # 前面的yolov3架构也讲了,yolo在每个scale都有输出,outInfo是每个scale的名字信息,供net.forward使用
- outInfo = self.net.getUnconnectedOutLayersNames()
- # 得到各个输出层的、各个检测框等信息,是二维结构。
- layerOutputs = self.net.forward(outInfo)
- end = time.time()
- print("[INFO] YOLO took {:.6f} seconds".format(end - start))
- # 拿到图片尺寸
- (H, W) = img.shape[:2]
- # 过滤layerOutputs
- # layerOutputs的第1维的元素内容: [center_x, center_y, width, height, objectness, N-class score data]
- # 过滤后的结果放入:
- # 所有边界框(各层结果放一起)
- boxes = []
- # 所有置信度
- confidences = []
- # 所有分类ID
- classIDs = []
- # # 1)过滤掉置信度低的框框
- # 各个输出层
- for out in layerOutputs:
- # 各个框框
- for detection in out:
- # 拿到置信度
- # 各个类别的置信度
- scores = detection[5:]
- # 最高置信度的id即为分类id
- classID = np.argmax(scores)
- # 拿到置信度
- confidence = scores[classID]
- # 根据置信度筛查
- if confidence > self.CONFIDENCE:
- # 将边界框放会图片尺寸
- box = detection[0:4] * np.array([W, H, W, H])
- (centerX, centerY, width, height) = box.astype("int")
- x = int(centerX - (width / 2))
- y = int(centerY - (height / 2))
- boxes.append([x, y, int(width), int(height)]) # 框
- confidences.append(float(confidence)) # 置信度
- classIDs.append(classID) # 分类ID
- # # 2)应用非最大值抑制(non-maxima suppression,nms)进一步筛掉
- # boxes中,保留的box的索引index存入idxs
- idxs = cv.dnn.NMSBoxes(boxes, confidences, self.CONFIDENCE, self.THRESHOLD)
- # 得到labels列表
- with open(self.labelsPath, 'rt') as f:
- labels = f.read().rstrip('\n').split('\n')
- np.random.seed(42)
- # 框框显示颜色,每一类有不同的颜色,每种颜色都是由RGB三个值组成的,所以size为(len(labels), 3)
- COLORS = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
- if len(idxs) > 0:
- # alarm pic
- for i in idxs.flatten(): # indxs是二维的,第0维是输出层,所以这里把它展平成1维
- (x, y) = (boxes[i][0], boxes[i][1])
- (w, h) = (boxes[i][2], boxes[i][3])
- color = [int(c) for c in COLORS[classIDs[i]]]
- # 线条粗细为2px
- cv.rectangle(img, (x, y), (x+w, y+h), color, 2)
- cv.rectangle(img, (x, y), (x+w, y+h), color, 2)
- # 线条粗细为2px
- text = "{}: {:.4f}".format(labels[classIDs[i]], confidences[i])
- # cv.FONT_HERSHEY_SIMPLEX字体风格、0.5字体大小、粗细2px
- cv.putText(img, text, (x, y-5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
- print('alarm pic!')
- cv.imshow('decode', img)
- return img
- else:
- # normal pic
- print('normal pic!')
- return None
- DnnMain.py
-
- # encoding: utf-8
- import cv2 as cv
- from DnnClass import MyDnn
- from CameraCall import CameraCallTool
- import pygame
- from PIL import Image, ImageDraw, ImageFont
- import threading
- import subprocess
-
-
- def LoadMusic():
- pygame.mixer.init()
- pygame.mixer.music.load('./audio/succ.wav')
- pygame.mixer.music.set_volume(0.5)
-
-
- def PlaySuccess():
- pygame.mixer.music.play()
-
-
- def DrawTextOnImage(img, text1, text2, left, top, textColor=(0,255,0), textSize=20):
- if(isinstance(img, np.ndarray)):
- img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
- # create a draw to draw text
- draw = ImageDraw.Draw(img)
- fontstyle = ImageFont.truetype('./font/simsun.ttc', textSize, encoding='utf-8')
- draw.text((left,top), text1, textColor, font=fontstyle)
- draw.text((left,int(top + textSize + 2)), text2, textColor, font=fontstyle)
- return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
-
-
- def CallCameraOnBoard(width, height):
- gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
- if 'nvcamerasrc' in gst_elements:
- # On versions of L4T prior to 28.1, add 'flip-method=2' into gst_str
- gst_str = ('nvcamerasrc ! '
- 'video/x-raw(memory:NVMM), '
- 'width=(int)2592, height=(int)1458, '
- 'format=(string)I420, framerate=(fraction)30/1 ! '
- 'nvvidconv ! '
- 'video/x-raw, width=(int){}, height=(int){}, '
- 'format=(string)BGRx ! '
- 'videoconvert ! appsink').format(width, height)
- elif 'nvarguscamerasrc' in gst_elements:
- gst_str = ('nvarguscamerasrc ! '
- 'video/x-raw(memory:NVMM), '
- 'width=(int)1920, height=(int)1080, '
- 'format=(string)NV12, framerate=(fraction)30/1 ! '
- 'nvvidconv flip-method=2 ! '
- 'video/x-raw, width=(int){}, height=(int){}, '
- 'format=(string)BGRx ! '
- 'videoconvert ! appsink').format(width, height)
- else:
- raise RuntimeError('onboard camera source not found!')
- return cv.VideoCapture(gst_str, cv.CAP_GSTREAMER)
-
-
- if __name__ == "__main__":
- print('camera onboard')
- DnnTool = MyDnn()
- cameraCallTool = CameraCallTool(1024, 768, 0)
- t = 0
- try:
- while cameraCallTool.CameraStatus():
- CameraFrame = cameraCallTool.GetOneFrame()
- if CameraFrame is not None:
- cv.imshow('Video', CameraFrame)
- if t == 0 or t.is_alive() is False:
- # image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- t = threading.Thread(target=DnnTool.DecodeImage, args=(CameraFrame, ))
- t.start()
- #判断退出的条件 当按下'Q'键的时候呢,就退出
- c = cv.waitKey(1)
- # 如果按下q 就截图保存并退出
- if c == ord('q'):
- saveFile = "Capture.jpg" # 带有中文的保存文件路径
- img_write = cv.imencode(".jpg", CameraFrame)[1].tofile(saveFile)
- break
- except:
- print('Camera Show Exception!')
- finally:
- del cameraCallTool
- cv.destroyAllWindows()
lost : audo font yolov3-tiny
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。