• Windows C++ VS2022 OpenVINO 物体检测 Demo


    目录

    准备工作

    1、下载opencv

    2、下载OpenVino

     新建项目

    main.cpp代码 如下:

    我们直接使用 Release

    设置包含目录和库目录

    包含目录:

    库目录

     添加附加依赖项

    生成

    ​编辑

    ​编辑 配置环境变量

    或者复制Opencv和OpenVino的DLL到 C:\yolov8_det_openvino\x64\Release

    复制测试图片和模型

    运行exe

    效果

    代码参考

    下载 


    准备工作

    1、下载opencv

    地址:Releases - OpenCV

    我下载的是opencv-4.5.5,存放的路径为:

    2、下载OpenVino

    地址:https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.0.1/

    我存放的路径为:

    • C++预处理器所需的头文件:include文件夹
    • C++链接器所需的lib文件:lib文件夹
    • 可执行文件(*.exe)所需的动态链接库文件:bin文件夹
    • OpenVINO runtime第三方依赖库文件:3rdparty文件夹

     新建项目

    main.cpp代码 如下:

    class_names

    const std::vector class_names = {
        "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
        "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
        "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
        "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
        "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
        "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
        "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
        "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
        "hair drier", "toothbrush" };

    1. #include <iostream>
    2. #include <string>
    3. #include <vector>
    4. #include <openvino/openvino.hpp> //openvino header file
    5. #include <opencv2/opencv.hpp> //opencv header file
    6. #include <direct.h>
    7. #include <stdio.h>
    8. #include <time.h> 
    9. std::vector<cv::Scalar> colors = { cv::Scalar(0, 0, 255) , cv::Scalar(0, 255, 0) , cv::Scalar(255, 0, 0) ,
    10. cv::Scalar(255, 100, 50) , cv::Scalar(50, 100, 255) , cv::Scalar(255, 50, 100) };
    11. const std::vector<std::string> class_names = {
    12. "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
    13. "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
    14. "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
    15. "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
    16. "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
    17. "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
    18. "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
    19. "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
    20. "hair drier", "toothbrush" };
    21. using namespace cv;
    22. using namespace dnn;
    23. // Keep the ratio before resize
    24. Mat letterbox(const cv::Mat& source)
    25. {
    26. int col = source.cols;
    27. int row = source.rows;
    28. int _max = MAX(col, row);
    29. Mat result = Mat::zeros(_max, _max, CV_8UC3);
    30. source.copyTo(result(Rect(0, 0, col, row)));
    31. return result;
    32. }
    33. int main()
    34. {
    35. clock_t start, end;//定义clock_t变量
    36. std::cout << "共8步" << std::endl;
    37. char buffer[100];
    38. _getcwd(buffer, 100);
    39. std::cout << "当前路径:" << buffer << std::endl;
    40. // -------- Step 1. Initialize OpenVINO Runtime Core --------
    41. std::cout << "1. Initialize OpenVINO Runtime Core" << std::endl;
    42. ov::Core core;
    43. // -------- Step 2. Compile the Model --------
    44. std::cout << "2. Compile the Model" << std::endl;
    45. String model_path = String(buffer) + "\\yolov8s.xml";
    46. std::cout << "model_path:\t" << model_path << std::endl;
    47. ov::CompiledModel compiled_model;
    48. try {
    49. compiled_model = core.compile_model(model_path, "CPU");
    50. }
    51. catch (std::exception& e) {
    52. std::cout << "Compile the Model 异常:" << e.what() << std::endl;
    53. return 0;
    54. }
    55. //auto compiled_model = core.compile_model("C:\\MyPro\\yolov8\\yolov8s.xml", "CPU");
    56. // -------- Step 3. Create an Inference Request --------
    57. std::cout << "3. Create an Inference Request" << std::endl;
    58. ov::InferRequest infer_request = compiled_model.create_infer_request();
    59. // -------- Step 4.Read a picture file and do the preprocess --------
    60. std::cout << "4.Read a picture file and do the preprocess" << std::endl;
    61. String img_path = String(buffer) + "\\test.jpg";
    62. std::cout << "img_path:\t" << img_path << std::endl;
    63. Mat img = cv::imread(img_path);
    64. // Preprocess the image
    65. Mat letterbox_img = letterbox(img);
    66. float scale = letterbox_img.size[0] / 640.0;
    67. Mat blob = blobFromImage(letterbox_img, 1.0 / 255.0, Size(640, 640), Scalar(), true);
    68. // -------- Step 5. Feed the blob into the input node of the Model -------
    69. std::cout << "5. Feed the blob into the input node of the Model" << std::endl;
    70. // Get input port for model with one input
    71. auto input_port = compiled_model.input();
    72. // Create tensor from external memory
    73. ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blob.ptr(0));
    74. // Set input tensor for model with one input
    75. infer_request.set_input_tensor(input_tensor);
    76. start = clock();//开始时间
    77. // -------- Step 6. Start inference --------
    78. std::cout << "6. Start inference" << std::endl;
    79. infer_request.infer();
    80. end = clock();//结束时间
    81. std::cout << "inference time = " << double(end - start) << "ms" << std::endl;
    82. // -------- Step 7. Get the inference result --------
    83. std::cout << "7. Get the inference result" << std::endl;
    84. auto output = infer_request.get_output_tensor(0);
    85. auto output_shape = output.get_shape();
    86. std::cout << "The shape of output tensor:\t" << output_shape << std::endl;
    87. int rows = output_shape[2]; //8400
    88. int dimensions = output_shape[1]; //84: box[cx, cy, w, h]+80 classes scores
    89. std::cout << "8. Postprocess the result " << std::endl;
    90. // -------- Step 8. Postprocess the result --------
    91. float* data = output.data<float>();
    92. Mat output_buffer(output_shape[1], output_shape[2], CV_32F, data);
    93. transpose(output_buffer, output_buffer); //[8400,84]
    94. float score_threshold = 0.25;
    95. float nms_threshold = 0.5;
    96. std::vector<int> class_ids;
    97. std::vector<float> class_scores;
    98. std::vector<Rect> boxes;
    99. // Figure out the bbox, class_id and class_score
    100. for (int i = 0; i < output_buffer.rows; i++) {
    101. Mat classes_scores = output_buffer.row(i).colRange(4, 84);
    102. Point class_id;
    103. double maxClassScore;
    104. minMaxLoc(classes_scores, 0, &maxClassScore, 0, &class_id);
    105. if (maxClassScore > score_threshold) {
    106. class_scores.push_back(maxClassScore);
    107. class_ids.push_back(class_id.x);
    108. float cx = output_buffer.at<float>(i, 0);
    109. float cy = output_buffer.at<float>(i, 1);
    110. float w = output_buffer.at<float>(i, 2);
    111. float h = output_buffer.at<float>(i, 3);
    112. int left = int((cx - 0.5 * w) * scale);
    113. int top = int((cy - 0.5 * h) * scale);
    114. int width = int(w * scale);
    115. int height = int(h * scale);
    116. boxes.push_back(Rect(left, top, width, height));
    117. }
    118. }
    119. //NMS
    120. std::vector<int> indices;
    121. NMSBoxes(boxes, class_scores, score_threshold, nms_threshold, indices);
    122. // -------- Visualize the detection results -----------
    123. for (size_t i = 0; i < indices.size(); i++) {
    124. int index = indices[i];
    125. int class_id = class_ids[index];
    126. rectangle(img, boxes[index], colors[class_id % 6], 2, 8);
    127. std::string label = class_names[class_id] + ":" + std::to_string(class_scores[index]).substr(0, 4);
    128. Size textSize = cv::getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, 0);
    129. Rect textBox(boxes[index].tl().x, boxes[index].tl().y - 15, textSize.width, textSize.height + 5);
    130. cv::rectangle(img, textBox, colors[class_id % 6], FILLED);
    131. putText(img, label, Point(boxes[index].tl().x, boxes[index].tl().y - 5), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 255, 255));
    132. }
    133. //namedWindow("YOLOv8 OpenVINO Inference C++ Demo", WINDOW_AUTOSIZE);
    134. //imshow("YOLOv8 OpenVINO Inference C++ Demo", img);
    135. //waitKey(0);
    136. //destroyAllWindows();
    137. cv::imwrite("detection.png", img);
    138. std::cout << "detect success" << std::endl;
    139. system("pause");
    140. return 0;
    141. }

    我们直接使用 Release

    设置包含目录和库目录

    包含目录:

    1. C:\Program Files\opencv-4.5.5\build\include;
    2. C:\Program Files\opencv-4.5.5\build\include\opencv2;
    3. C:\Program Files\openvino_2023.0.1.11005\runtime\include;
    4. C:\Program Files\openvino_2023.0.1.11005\runtime\include\ie;

    库目录

    1. C:\Program Files\opencv-4.5.5\build\x64\vc15\lib;
    2. C:\Program Files\openvino_2023.0.1.11005\runtime\lib\intel64\Release;

     添加附加依赖项

    1. openvino.lib
    2. opencv_world455.lib

    生成

     配置环境变量

    或者复制Opencv和OpenVino的DLL到 C:\yolov8_det_openvino\x64\Release

    复制测试图片和模型

    运行exe

    效果

    代码参考

    GitHub - openvino-book/yolov8_openvino_cpp: YOLOv8 Inference C++ sample code based on OpenVINO C++ API

    下载 

    exe程序下载

    Demo下载

  • 相关阅读:
    【首阳首板之主升三域洗盘域】洞察洗盘突破典型形态,心中不慌
    前端项目:小程序电商管理平台难点整理
    千兆光模块存在哪些局限性
    Mac中LaTex无法编译的问题
    [附源码]计算机毕业设计打印助手平台
    SRE 的工作介绍
    Vue非单文件组件
    JAVA学习——day02
    纳尼?华为首席架构师只用434页笔记,就将网络协议给拿下了
    数字化校园包括哪些内容呢,应该从何入手?_光点科技
  • 原文地址:https://blog.csdn.net/lw112190/article/details/132827809