当前位置:   article > 正文

在CPU下使用opencv4.4 yolov4-tiny 对rtsp视频流进行实时检测_yolo读取rtsp视频流

yolo读取rtsp视频流

前置条件(安装最新的opencv4.4)教程链接:https://blog.csdn.net/qq_34717531/article/details/107763872

  1. import cv2
  2. import numpy as np
  3. import time
  4. # Load the YOLOv3 model with OpenCV.
  5. net = cv2.dnn.readNet("yolov4-tiny.weights", "yolov4-tiny.cfg")
  6. # Get the names of all layers in the network.
  7. layer_names = net.getLayerNames()
  8. # Extract the names of the output layers by finding their indices in layer_names.
  9. output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
  10. # Initialise a list to store the classes names.
  11. classes = []
  12. # Set each line in "coco.names" to an entry in the list, stripping whitespace.
  13. with open("coco.names", "r") as f:
  14. classes = [line.strip() for line in f.readlines()]
  15. # Define a font to be used when displaying class names.
  16. FONT = cv2.FONT_HERSHEY_PLAIN
  17. # Initialise a random color to represent each class.
  18. colors = np.random.uniform(0, 255, size=(len(classes), 3))
  19. # Define a confidence threshold for detections.
  20. conf_thresh = 0.5
  21. # Initialise a video capture object with the first camera.
  22. cap = cv2.VideoCapture("1.mp4")
  23. fps = int(cap.get(cv2.CAP_PROP_FPS))
  24. width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  25. height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  26. fourcc = cv2.VideoWriter_fourcc(*'XVID')
  27. videoWriter = cv2.VideoWriter('11.avi', fourcc, int(fps), (int(width),int(height)))
  28. # Initialise a frame counter and get the current time for FPS calculation purposes.
  29. frame_id = 0
  30. time_start = time.time()
  31. while True:
  32. # Read the current frame from the camera.
  33. _, frame = cap.read()
  34. # Add 1 to the frame count every time a frame is read.
  35. frame_id += 1
  36. # Pre-process the frame by applying the same scaling used when training the model, resizing to the size
  37. # expected by this particular YOLOv3 model, and swapping from BGR (used by OpenCV) to RGB (used by the model).
  38. blob = cv2.dnn.blobFromImage(frame, 1 / 255, (416, 416), swapRB=True)
  39. # Pass the processed frame through the neural network to get a prediction.
  40. net.setInput(blob)
  41. outs = net.forward(output_layers)
  42. # Initialise arrays for storing confidence, class ID and coordinate values for detected boxes.
  43. confidences = []
  44. class_ids = []
  45. boxes = []
  46. # Loop through all the detections in each of the three output scales of YOLOv3.
  47. for out in outs:
  48. for detection in out:
  49. # Get the class probabilities for this box detection.
  50. scores = detection[5:]
  51. # Find the class with the highest score for the box.
  52. class_id = np.argmax(scores)
  53. # Extract the score of that class.
  54. confidence = scores[class_id]
  55. # If that score is higher than the set threshold, execute the code below.
  56. if confidence > conf_thresh:
  57. # Get the shape of the unprocessed frame.
  58. height, width, channels = frame.shape
  59. # Use the detected box ratios to get box coordinates which apply to the input image.
  60. center_x = int(detection[0] * width)
  61. center_y = int(detection[1] * height)
  62. w = int(detection[2] * width)
  63. h = int(detection[3] * height)
  64. # Use the center, width and height coordinates to calculate the coordinates for the top left
  65. # point of the box, which is required for drawing boxes with OpenCV.
  66. x = int(center_x - w/2)
  67. y = int(center_y - h/2)
  68. # Populate the arrays with the information for this box.
  69. confidences.append(float(confidence))
  70. class_ids.append(class_id)
  71. boxes.append([x, y, w, h])
  72. # Apply non-max suppression to get rid of overlapping boxes.
  73. indexes = cv2.dnn.NMSBoxes(boxes, confidences, conf_thresh, 0.4)
  74. # Iterate through the detected boxes.
  75. for i in range(len(boxes)):
  76. # If the box remained after NMS.
  77. if i in indexes:
  78. # Extract the coordinates of the box.
  79. x, y, w, h = boxes[i]
  80. # Extract the class label from the class ID.
  81. label = str(classes[class_ids[i]])
  82. # Extract the confidence for the detected class.
  83. confidence = confidences[i]
  84. # Get the color for that class.
  85. color = colors[class_ids[i]]
  86. # Draw the box.
  87. cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
  88. # Display the class label and the confidence inside the box.
  89. cv2.putText(frame, label + " " + str(round(confidence, 2)), (x, y + 30), FONT, 2, color, 2)
  90. # Calculate the elapsed time since starting the loop.
  91. elapsed_time = time.time() - time_start
  92. # Calculate the average FPS performance to this point.
  93. fps = frame_id/elapsed_time
  94. # Display the FPS at the top left corner.
  95. cv2.putText(frame, "FPS: " + str(round(fps, 2)), (8, 30), FONT, 2, (0, 0, 0), 2)
  96. videoWriter.write(frame)
  97. # Show the frame.
  98. cv2.imshow("Camera", frame)
  99. # Wait at least 1ms for key event and record the pressed key.
  100. key = cv2.waitKey(1)
  101. # If the pressed key is ESC (27), break the loop.
  102. if key == 27:
  103. break
  104. # Release the camera and destroy all windows.
  105. cap.release()
  106. cv2.destroyAllWindows()

总结:yolov4-tiny比yolov3-tiny速度慢一点点(还是可以实时,我的cpu是 i7-9700K),效果却好很多。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/知新_RL/article/detail/618867
推荐阅读
相关标签
  

闽ICP备14008679号