当前位置:   article > 正文

使用yolov8的一些错误_modulenotfounderror: no module named 'ultralytics.

modulenotfounderror: no module named 'ultralytics.nn.modules.conv'; 'ultraly

出现这个报错的时候:

AutoInstall will run now for 'ultralytics.nn.modules.conv' but this feature will be removed in the future.
Recommend fixes are to train a new model using the latest 'ultralytics' package or to run a command with an official YOLOv8 model, i.e. 'yolo predict model=yolov8n.pt'
requirements: YOLOv8 requirement "ultralytics.nn.modules.conv" not found, attempting AutoUpdate...
ERROR: Could not find a version that satisfies the requirement ultralytics.nn.modules.conv (from versions: none)
ERROR: No matching distribution found for ultralytics.nn.modules.conv
requirements: ❌ Command 'pip install "ultralytics.nn.modules.conv"  ' returned non-zero exit status 1.File "/usr/local/ev_sdk/src/ultralytics/nn/tasks.py", line 351, in torch_safe_load
    return torch.load(file, map_location='cpu'), file  # load
  File "/opt/conda/lib/python3.7/site-packages/torch/serialization.py", line 712, in load
    return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
  File "/opt/conda/lib/python3.7/site-packages/torch/serialization.py", line 1046, in _load
    result = unpickler.load()
  File "/opt/conda/lib/python3.7/site-packages/torch/serialization.py", line 1039, in find_class
    return super().find_class(mod_name, name)
ModuleNotFoundError: No module named 'ultralytics.nn.modules.conv'; 'ultralytics.nn.modules' is not a package

检查 ultralytics包的版本,尽量是最新版本的,我因为原来是8.0.11,所以报错了。

查询问题以后发现,变成最新版本就没问题了

!pip install ultralytics==8.0.144

如果还是报错,要注意你运行的py文件要在yolo文件目录下面

由于yolov8训练出来的模型默认是保存在指定当前目录的runs/detect/train下面

如果要修改模型保存的路径可以在训练的时候加上project,如下所示:

yolo task=detect mode=train model=yolov8s.pt epochs=100 batch=2 data=datasets/helmet.yaml project=/project/train/models/resume

就可以实现训练的时候保存到指定的文件夹下面;

使用resume=True参数,就可以实现训练中断,然后再训练:

yolo task=detect mode=train model=E:/yolov8/ultralytics_ds_converter/ultralytics/please/train2/weights/best.pt epochs=100 batch=2 data=datasets/helmet.yaml project=E:/yolov8/ultralytics_ds_converter/ultralytics/please/  resume=True

但是要注意使用resume的模型要修改为你中断的模型里面,而且最好选择best模型,如果选择last.pt可能是损坏的,无法正常读取,会报下面的这个错误:

RuntimeError: PytorchStreamReader failed reading zip archive: failed finding central directory

在使用C++里面的onnxruntime来运行yolov8s.onnxwen文件的时候,出现了报错信息:

Ort::Env(OrtLoggingLevel::ORT_LOGGING_LEVEL_ERROR, "Yolov5-Seg");
报错信息如下:
引发了异常: 读取访问权限冲突。
Ort::GetApi(...) 返回 nullptr。

我找到的解决方案如下:

这个一般会是dll冲突问题导致的,win系统特有的问题,原因在于win10的system32下面自带有一个onnxruntime的dll,优先级比环境变量添加的路径高导致的,你可以修改权限删除,或者你可以将onnx的相关dll拷贝到你项目的exe下面去运行看下。

如果出现了下面这个错误:

是路径的问题,把\变成/,检测onnx文件的路径。

我使用了opencv4.5.2和onnxruntime1.4.1

onnxruntime1.4.1

完整代码如下:

  1. #include <onnxruntime_cxx_api.h>
  2. #include <opencv2/opencv.hpp>
  3. #include <fstream>
  4. using namespace cv;
  5. using namespace std;
  6. std::string labels_txt_file = "classes.txt";
  7. std::vector<std::string> readClassNames();
  8. std::vector<std::string> readClassNames()
  9. {
  10. std::vector<std::string> classNames;
  11. std::ifstream fp(labels_txt_file);
  12. if (!fp.is_open())
  13. {
  14. printf("could not open file...\n");
  15. exit(-1);
  16. }
  17. std::string name;
  18. while (!fp.eof())
  19. {
  20. std::getline(fp, name);
  21. if (name.length())
  22. classNames.push_back(name);
  23. }
  24. fp.close();
  25. return classNames;
  26. }
  27. int main(int argc, char** argv) {
  28. std::vector<std::string> labels = readClassNames();
  29. cv::Mat frame = cv::imread("E:/yolov8/dataset/images/.jpg");
  30. int ih = frame.rows;
  31. int iw = frame.cols;
  32. // 创建InferSession, 查询支持硬件设备
  33. // GPU Mode, 0 - gpu device id
  34. std::string onnxpath = "E:/yolov8/code/predict/predict/yolov8s.onnx";//E:/yolov8/ultralytics_ds_converter/ultralytics/runs/detect/train/weights/best.onnx
  35. std::wstring modelPath = std::wstring(onnxpath.begin(), onnxpath.end());
  36. Ort::SessionOptions session_options;
  37. Ort::Env env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "yolov8-onnx");
  38. session_options.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);
  39. std::cout << "onnxruntime inference try to use GPU Device" << std::endl;
  40. OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0);
  41. Ort::Session session_(env, modelPath.c_str(), session_options);
  42. std::vector<std::string> input_node_names;
  43. std::vector<std::string> output_node_names;
  44. size_t numInputNodes = session_.GetInputCount();
  45. size_t numOutputNodes = session_.GetOutputCount();
  46. Ort::AllocatorWithDefaultOptions allocator;
  47. input_node_names.reserve(numInputNodes);
  48. // 获取输入信息
  49. int input_w = 0;
  50. int input_h = 0;
  51. for (int i = 0; i < numInputNodes; i++) {
  52. auto input_name = session_.GetInputNameAllocated(i, allocator);
  53. input_node_names.push_back(input_name.get());
  54. Ort::TypeInfo input_type_info = session_.GetInputTypeInfo(i);
  55. auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
  56. auto input_dims = input_tensor_info.GetShape();
  57. input_w = input_dims[3];
  58. input_h = input_dims[2];
  59. std::cout << "input format: w = " << input_w << "h:" << input_h << std::endl;
  60. }
  61. // 获取输出信息
  62. int output_h = 0;
  63. int output_w = 0;
  64. Ort::TypeInfo output_type_info = session_.GetOutputTypeInfo(0);
  65. auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
  66. auto output_dims = output_tensor_info.GetShape();
  67. output_h = output_dims[1]; // 84
  68. output_w = output_dims[2]; // 8400
  69. std::cout << "output format : HxW = " << output_dims[1] << "x" << output_dims[2] << std::endl;
  70. for (int i = 0; i < numOutputNodes; i++) {
  71. auto out_name = session_.GetOutputNameAllocated(i, allocator);
  72. output_node_names.push_back(out_name.get());
  73. }
  74. std::cout << "input: " << input_node_names[0] << " output: " << output_node_names[0] << std::endl;
  75. // format frame
  76. int64 start = cv::getTickCount();
  77. int w = frame.cols;
  78. int h = frame.rows;
  79. int _max = std::max(h, w);
  80. cv::Mat image = cv::Mat::zeros(cv::Size(_max, _max), CV_8UC3);
  81. cv::Rect roi(0, 0, w, h);
  82. frame.copyTo(image(roi));
  83. // fix bug, boxes consistence!
  84. float x_factor = image.cols / static_cast<float>(input_w);
  85. float y_factor = image.rows / static_cast<float>(input_h);
  86. cv::Mat blob = cv::dnn::blobFromImage(image, 1 / 255.0, cv::Size(input_w, input_h), cv::Scalar(0, 0, 0), true, false);
  87. size_t tpixels = input_h * input_w * 3;
  88. std::array<int64_t, 4> input_shape_info{ 1, 3, input_h, input_w };
  89. // set input data and inference
  90. auto allocator_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
  91. Ort::Value input_tensor_ = Ort::Value::CreateTensor<float>(allocator_info, blob.ptr<float>(), tpixels, input_shape_info.data(), input_shape_info.size());
  92. const std::array<const char*, 1> inputNames = { input_node_names[0].c_str() };
  93. const std::array<const char*, 1> outNames = { output_node_names[0].c_str() };
  94. std::vector<Ort::Value> ort_outputs;
  95. try {
  96. ort_outputs = session_.Run(Ort::RunOptions{ nullptr }, inputNames.data(), &input_tensor_, 1, outNames.data(), outNames.size());
  97. }
  98. catch (std::exception e) {
  99. std::cout << e.what() << std::endl;
  100. }
  101. // output data
  102. const float* pdata = ort_outputs[0].GetTensorMutableData<float>();
  103. cv::Mat dout(output_h, output_w, CV_32F, (float*)pdata);
  104. cv::Mat det_output = dout.t(); // 8400x84
  105. // post-process
  106. std::vector<cv::Rect> boxes;
  107. std::vector<int> classIds;
  108. std::vector<float> confidences;
  109. for (int i = 0; i < det_output.rows; i++) {
  110. cv::Mat classes_scores = det_output.row(i).colRange(4, 84);
  111. cv::Point classIdPoint;
  112. double score;
  113. minMaxLoc(classes_scores, 0, &score, 0, &classIdPoint);
  114. // 置信度 0~1之间
  115. if (score > 0.25)
  116. {
  117. float cx = det_output.at<float>(i, 0);
  118. float cy = det_output.at<float>(i, 1);
  119. float ow = det_output.at<float>(i, 2);
  120. float oh = det_output.at<float>(i, 3);
  121. int x = static_cast<int>((cx - 0.5 * ow) * x_factor);
  122. int y = static_cast<int>((cy - 0.5 * oh) * y_factor);
  123. int width = static_cast<int>(ow * x_factor);
  124. int height = static_cast<int>(oh * y_factor);
  125. cv::Rect box;
  126. box.x = x;
  127. box.y = y;
  128. box.width = width;
  129. box.height = height;
  130. boxes.push_back(box);
  131. classIds.push_back(classIdPoint.x);
  132. confidences.push_back(score);
  133. }
  134. }
  135. // NMS
  136. std::vector<int> indexes;
  137. cv::dnn::NMSBoxes(boxes, confidences, 0.25, 0.45, indexes);
  138. for (size_t i = 0; i < indexes.size(); i++) {
  139. int index = indexes[i];
  140. int idx = classIds[index];
  141. cv::rectangle(frame, boxes[index], cv::Scalar(0, 0, 255), 2, 8);
  142. cv::rectangle(frame, cv::Point(boxes[index].tl().x, boxes[index].tl().y - 20),
  143. cv::Point(boxes[index].br().x, boxes[index].tl().y), cv::Scalar(0, 255, 255), -1);
  144. putText(frame, labels[idx], cv::Point(boxes[index].tl().x, boxes[index].tl().y), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 0, 0), 2, 8);
  145. cv::imshow("YOLOv8+ONNXRUNTIME 对象检测演示", frame);
  146. }
  147. // 计算FPS render it
  148. float t = (cv::getTickCount() - start) / static_cast<float>(cv::getTickFrequency());
  149. putText(frame, cv::format("FPS: %.2f", 1.0 / t), cv::Point(20, 40), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 0, 0), 2, 8);
  150. cv::imshow("YOLOv8+ONNXRUNTIME 对象检测演示", frame);
  151. cv::waitKey(0);
  152. session_options.release();
  153. session_.release();
  154. return 0;
  155. }

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/木道寻08/article/detail/738127
推荐阅读
相关标签
  

闽ICP备14008679号