当前位置:   article > 正文

yolov4 yolov4-tiny flask部署web服务_yolov4实战: flask web部署

yolov4实战: flask web部署

前篇参考:https://blog.csdn.net/qq_34717531/article/details/107818606

  1. # -*- coding: utf-8 -*-
  2. from flask import Flask, request, jsonify
  3. import cv2
  4. import numpy as np
  5. import os
  6. import time
  7. import json
  8. '''
  9. pathIn:原始图片的路径
  10. pathOut:结果图片的路径
  11. label_path:类别标签文件的路径
  12. config_path:模型配置文件的路径
  13. weights_path:模型权重文件的路径
  14. confidence_thre:0-1,置信度(概率/打分)阈值,即保留概率大于这个值的边界框,默认为0.5
  15. nms_thre:非极大值抑制的阈值,默认为0.3
  16. '''
  17. def yolo_detect(im=None,
  18. pathIn=None,
  19. label_path='./cfg/coco.names',
  20. config_path='./cfg/yolov4-tiny.cfg',
  21. weights_path='./cfg/yolov4-tiny.weights',
  22. confidence_thre=0.5,
  23. nms_thre=0.3):
  24. #加载类别标签文件
  25. LABELS = open(label_path).read().strip().split("\n")
  26. nclass = len(LABELS)
  27. # 为每个类别的边界框随机匹配相应颜色
  28. np.random.seed(42)
  29. COLORS = np.random.randint(0, 255, size=(nclass, 3), dtype='uint8')
  30. if pathIn == None:
  31. img = im
  32. else:
  33. img = cv2.imread(pathIn)
  34. # print(pathIn)
  35. # 载入图片并获取其维度
  36. filename = pathIn.split('/')[-1]
  37. name = filename.split('.')[0]
  38. (H, W) = img.shape[:2]
  39. # 加载模型配置和权重文件
  40. net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
  41. # 获取YOLO输出层的名字
  42. ln = net.getLayerNames()
  43. ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
  44. # 将图片构建成一个blob,设置图片尺寸,然后执行一次
  45. # YOLO前馈网络计算,最终获取边界框和相应概率
  46. blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (416, 416), swapRB=True, crop=False)
  47. net.setInput(blob)
  48. start = time.time()
  49. layerOutputs = net.forward(ln)
  50. end = time.time()
  51. # 初始化边界框,置信度(概率)以及类别
  52. boxes = []
  53. confidences = []
  54. classIDs = []
  55. # 迭代每个输出层,总共三个
  56. for output in layerOutputs:
  57. # 迭代每个检测
  58. for detection in output:
  59. # 提取类别ID和置信度
  60. scores = detection[5:]
  61. classID = np.argmax(scores)
  62. confidence = scores[classID]
  63. # 只保留置信度大于某值的边界框
  64. if confidence > confidence_thre:
  65. # 将边界框的坐标还原至与原图片相匹配,记住YOLO返回的是
  66. # 边界框的中心坐标以及边界框的宽度和高度
  67. box = detection[0:4] * np.array([W, H, W, H])
  68. (centerX, centerY, width, height) = box.astype("int")
  69. # 计算边界框的左上角位置
  70. x = int(centerX - (width / 2))
  71. y = int(centerY - (height / 2))
  72. # 更新边界框,置信度(概率)以及类别
  73. boxes.append([x, y, int(width), int(height)])
  74. confidences.append(float(confidence))
  75. classIDs.append(classID)
  76. # 使用非极大值抑制方法抑制弱、重叠边界框
  77. idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence_thre, nms_thre)
  78. lab = []
  79. loc = []
  80. data={}
  81. data["filename"]=filename
  82. data["counts"]=len(idxs)
  83. # 确保至少一个边界框
  84. if len(idxs) > 0:
  85. # 迭代每个边界框
  86. for i in idxs.flatten():
  87. # 提取边界框的坐标
  88. (x, y) = (boxes[i][0], boxes[i][1])
  89. (w, h) = (boxes[i][2], boxes[i][3])
  90. # 绘制边界框以及在左上角添加类别标签和置信度
  91. color = [int(c) for c in COLORS[classIDs[i]]]
  92. cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
  93. text = '{}: {:.3f}'.format(LABELS[classIDs[i]], confidences[i])
  94. (text_w, text_h), baseline = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)
  95. cv2.rectangle(img, (x, y-text_h-baseline), (x + text_w, y), color, -1)
  96. cv2.putText(img, text, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
  97. text_inf = text + ' ' + '(' + str(x) + ',' + str(y) + ')' + ' ' + '宽:' + str(w) + '高:' + str(h)
  98. info = {"label":LABELS[classIDs[i]],"confidences":confidences[i],"x":str(x),"y":str(y),"w":str(w),"h":str(h)}
  99. data["data"+str(i)]=info
  100. # print(filename,LABELS[classIDs[i]],confidences[i],str(x),str(y),str(w),str(h))
  101. loc.append([x, y, w, h])
  102. lab.append(text_inf)
  103. res = jsonify(data)
  104. return lab, img, loc, res
  105. # if __name__ == '__main__':
  106. # pathIn = './static/images/test1.jpg'
  107. # im = cv2.imread('./static/images/test2.jpg')
  108. # lab, img, loc = yolo_detect(pathIn=pathIn)
  109. # print(lab)

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/繁依Fanyi0/article/detail/83253
推荐阅读
相关标签
  

闽ICP备14008679号