• MNN实践[C++版本]


    1. Yolov5-lite

    1.1 安装依赖库

    • opencv
    • protobuf
    • cmake

    1.2 编译libMNN.so动态库

    $ git clone https://github.com/alibaba/MNN.git
    $ cd MNN
    $ mkdir bulid && cd build
    $ sudo cmake ..
    $ sudo make
    
    • 1
    • 2
    • 3
    • 4
    • 5

    1.3 编译当前项目

    $ cd mnn_demo
    $ mkdir bulid && cd build
    $ sudo cmake ..
    $ sudo make
    
    • 1
    • 2
    • 3
    • 4

    1.4 下载MNN格式模型文件

    $ mkdir model_zoo && cd model_zoo
    # wget v5lite-s.mnn or v5lite-s-int4.mnn into model_zoo
    
    • 1
    • 2

    v5lite-s.mnn: https://drive.google.com/file/d/10dBsY0T19Kyz2sZ4ebfpsb6dnG58pmYq/view?usp=sharing
    v5lite-s-int4.mnn: https://drive.google.com/file/d/1v90z5sWx6rTnrF9jejugZup2YuIuXObR/view?usp=sharing

    1.5 运行测试

    $./yolov5
    # 默认输入图像名字1860.jpg, 保存图像名字output.jpg
    # 默认的检测类别总数80, 类别名字如下所示:
    # "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
    # "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
    # "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
    # "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
    # "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
    # "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
    # "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
    # "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
    # "hair drier", "toothbrush"
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12

    output

    2. 基于YoloV5的透字检测模型实践

    2.1 安装依赖库

    • opencv
    • protobuf
    • cmake

    2.2 编译libMNN.so动态库

    $ git clone https://github.com/alibaba/MNN.git
    $ cd MNN
    $ mkdir bulid && cd build
    $ sudo cmake ..
    $ sudo make
    $ cp libMNN.so to/your/project/path
    $ cd ../include
    $ cp -R MNN to/your/project/include/folder
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8

    例如: 我的项目路径在 /home/tianzx/ai_model/test_ai_interface/mnn/demo/yolov5_cpp

    $ tree . --filelimit=10 --dirsfirst
    .
    ├── include
    │   ├── MNN [13 entries exceeds filelimit, not opening dir]
    │   ├── util.h
    │   └── Yolo.h
    ├── model_zoo [11 entries exceeds filelimit, not opening dir]
    ├── src
    │   ├── main.cpp
    │   ├── util.cpp
    │   └── Yolo.cpp
    ├── CMakeLists.txt
    ├── libMNN.so
    ├── test_detect.jpg
    ├── test.jpg
    ├── README.md
    └── yolov5
    
    4 directories, 11 files
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19

    2.3 模型文件转换

    将yolov5l模型文件perspec_yolov5l_v0.0.1.pt转换perspec_yolov5l_v0.0.1.mnn

    2.3.1 pt转onnx
    • perspec_yolov5l_v0.0.1.pt 通过 https://github.com/ultralytics/yolov5 训练得到.
    • pip3 install onnx-simplifier
    • python export.py --weights runs/train/perspec_bestweights/perspec_yolov5l_v0.0.1.pt --include onnx --simplify --train
    2.3.2 onnx转mnn
    $ cd MNN/build
    $ ./MNNConvert -f ONNX --modelFile path/to/perspec_yolov5l_v0.0.1.onnx --MNNModel path/to/perspec_yolov5l_v0.0.1.mnn --bizCode biz
    
    • 1
    • 2
    2.3.3 移动mnn文件

    $ mv path/to/perspec_yolov5l_v0.0.1.mnn /home/tianzx/ai_model/test_ai_interface/mnn/demo/yolov5_cpp/model_zoo/

    2.4 修改main.cpp文件

    • draw_box函数
      • 修改检测类别变量定义int CNUM=1, 检测类别名字变量定义static const char* class_names[] = { “perspective” };
    • main函数
      • 新增模型路径参数model_name, 输入图像参数image_name, 保存图像路径参数save_image_name
      • int num_classes = 1; int net_size = 640; int INPUT_SIZE = 640;
      • float threshold = 0.25; float nms_threshold = 0.45;
      • std::vector yolov5s_layers{

    {“619”, 32, {{116, 90}, {156, 198}, {373, 326}}},
    {“599”, 16, {{30, 61}, {62, 45}, {59, 119}}},
    {“output”, 8, {{10, 13}, {16, 30}, {33, 23}}},
    };
    重点: yolov5s_layers 变量的定义和yolov5l的模型结构yolov5l.yaml 有关. https://github.com/ultralytics/yolov5/blob/master/models/yolov5l.yaml

    yolov5l.yaml

    https://netron.app/ 中打开 perspec_yolov5l_v0.0.1.mnn 文件, 同时点击键盘上 Ctrl+F, 打开右侧的 FIND 侧边, 点击鼠标拖动滚动条一直到最后, 可以看到下图所示:
    在这里插入图片描述
    分别点击带向右箭头的599, 619, output, 并对比yolov5l.yaml中的head 和 anchors, 可以得出如下表格所示内容:

    输出节点对应卷积权重的大小yolov5l.yaml的headyolov5l.yaml的anchors
    61918x1024x1x1[-1, 3, C3, [1024, False]], # 23 (P5/32-large)[116,90, 156,198, 373,326] # P5/32
    59918x512x1x1[-1, 3, C3, [512, False]], # 20 (P4/16-medium)[30,61, 62,45, 59,119] # P4/16
    output18x256x1x1[-1, 3, C3, [256, False]], # 17 (P3/8-small)[10,13, 16,30, 33,23] # P3/8
    #include 
    #include 
    #include
    
    #include 
    #include 
    #include 
    #include 
    
    #include "Yolo.h"
    
    void show_shape(std::vector<int> shape)
    {
        std::cout<<shape[0]<<" "<<shape[1]<<" "<<shape[2]<<" "<<shape[3]<<" "<<shape[4]<<" "<<std::endl;
    
    }
    
    void scale_coords(std::vector<BoxInfo> &boxes, int w_from, int h_from, int w_to, int h_to)
    {
        float w_ratio = float(w_to)/float(w_from);
        float h_ratio = float(h_to)/float(h_from);
    
    
        for(auto &box: boxes)
            {
                box.x1 *= w_ratio;
                box.x2 *= w_ratio;
                box.y1 *= h_ratio;
                box.y2 *= h_ratio;
            }
        return ;
    }
    
    cv::Mat draw_box(cv::Mat & cv_mat, std::vector<BoxInfo> &boxes)
    {
        int CNUM = 1;
        static const char* class_names[] = {
        "perspective"
    };
        cv::RNG rng(0xFFFFFFFF);
        cv::Scalar_<int> randColor[CNUM];
        for (int i = 0; i < CNUM; i++)
            rng.fill(randColor[i], cv::RNG::UNIFORM, 0, 256);
    
        for(auto box : boxes)
            {
                int width = box.x2-box.x1;
                int height = box.y2-box.y1;
                int id = box.id;
                char text[256];
                cv::Point p = cv::Point(box.x1, box.y1-5);
                cv::Rect rect = cv::Rect(box.x1, box.y1, width, height);
                cv::rectangle(cv_mat, rect, cv::Scalar(0, 0, 255));
                sprintf(text, "%s %.1f%%", class_names[box.label], box.score * 100);
                cv::putText(cv_mat, text, p, cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255));
            }
        return cv_mat;
    }
    
    
    void help() {
        std::cout << "Usage:\n";
        std::cout << "./yolov5   \n";
        std::cout << "./yolov5 model_zoo/perspec_yolov5l_v0.0.1.mnn M9255760001311204045_-2_crop.jpg M9255760001311204045_-2_crop_detect.jpg";
    }
    
    // https://github.com/ppogg/YOLOv5-Lite/tree/master/cpp_demo/mnn
    int main(int argc, char* argv[])
    {
    if (argc != 4)
    {
        help();
        std::cout << "";
        return -1;
    }
    
    // Function type
    std::string model_name = argv[1];
    std::string image_name = argv[2];
    std::string save_image_name = argv[3];
    
    // [IMPORTANT] 
    int num_classes = 1;
    int net_size    = 640;
    std::vector<YoloLayerData> yolov5s_layers{
        {"619",    32, {{116, 90}, {156, 198}, {373, 326}}},
        {"599",    16, {{30,  61}, {62,  45},  {59,  119}}},
        {"output", 8,  {{10,  13}, {16,  30},  {33,  23}}},
        };
    
    std::vector<YoloLayerData> & layers = yolov5s_layers;
    
    std::shared_ptr<MNN::Interpreter> net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_name.c_str()));
    if (nullptr == net) {
    return 0;
    }
    
    MNN::ScheduleConfig config;
    config.numThread = 1;
    config.type      = static_cast<MNNForwardType>(MNN_FORWARD_CPU);
    MNN::BackendConfig backendConfig;
    backendConfig.precision = (MNN::BackendConfig::PrecisionMode)2;
    // backendConfig.precision =  MNN::PrecisionMode Precision_Normal; // static_cast(Precision_Normal);
    config.backendConfig = &backendConfig;
    MNN::Session *session = net->createSession(config);
    
    
    // [IMPORTANT] load image and preprocessing
    int INPUT_SIZE = 640;
    cv::Mat raw_image      = cv::imread(image_name.c_str());
    cv::Mat image;
    cv::resize(raw_image, image, cv::Size(INPUT_SIZE, INPUT_SIZE));
    image.convertTo(image, CV_32FC3);
    image = image /255.0f;
    
    // wrapping input tensor, convert nhwc to nchw    
    std::vector<int> dims{1, INPUT_SIZE, INPUT_SIZE, 3};
    auto nhwc_Tensor = MNN::Tensor::create<float>(dims, NULL, MNN::Tensor::TENSORFLOW);
    auto nhwc_data   = nhwc_Tensor->host<float>();
    auto nhwc_size   = nhwc_Tensor->size();
    std::memcpy(nhwc_data, image.data, nhwc_size);
    
    auto inputTensor = net->getSessionInput(session, nullptr);
    inputTensor->copyFromHostTensor(nhwc_Tensor);
    
    // run network
    clock_t startTime,endTime;
    startTime = clock();//计时开始
    net->runSession(session);
    endTime = clock();//计时结束
    cout << "The forward time is: " <<(double)(endTime - startTime) / 1000.0 << "ms" << endl;
    
    // get output data
    std::string output_tensor_name0 = layers[2].name ;
    std::string output_tensor_name1 = layers[1].name ;
    std::string output_tensor_name2 = layers[0].name ;
    
    MNN::Tensor *tensor_scores  = net->getSessionOutput(session, output_tensor_name0.c_str());
    MNN::Tensor *tensor_boxes   = net->getSessionOutput(session, output_tensor_name1.c_str());
    MNN::Tensor *tensor_anchors = net->getSessionOutput(session, output_tensor_name2.c_str());
    
    MNN::Tensor tensor_scores_host(tensor_scores, tensor_scores->getDimensionType());
    MNN::Tensor tensor_boxes_host(tensor_boxes, tensor_boxes->getDimensionType());
    MNN::Tensor tensor_anchors_host(tensor_anchors, tensor_anchors->getDimensionType());
    
    tensor_scores->copyToHostTensor(&tensor_scores_host);
    tensor_boxes->copyToHostTensor(&tensor_boxes_host);
    tensor_anchors->copyToHostTensor(&tensor_anchors_host);
    
    std::vector<BoxInfo> result;
    std::vector<BoxInfo> boxes;
    
    yolocv::YoloSize yolosize = yolocv::YoloSize{INPUT_SIZE,INPUT_SIZE};
    
    // [IMPORTANT] 
    float threshold = 0.25;
    float nms_threshold = 0.45;
    
    boxes = decode_infer(tensor_scores_host, layers[2].stride,  yolosize, net_size, num_classes, layers[2].anchors, threshold);
    result.insert(result.begin(), boxes.begin(), boxes.end());
    
    boxes = decode_infer(tensor_boxes_host, layers[1].stride,  yolosize, net_size, num_classes, layers[1].anchors, threshold);
    result.insert(result.begin(), boxes.begin(), boxes.end());
    
    boxes = decode_infer(tensor_anchors_host, layers[0].stride,  yolosize, net_size, num_classes, layers[0].anchors, threshold);
    result.insert(result.begin(), boxes.begin(), boxes.end());
    
    nms(result, nms_threshold);
    
    // std::cout<
    scale_coords(result, INPUT_SIZE, INPUT_SIZE, raw_image.cols, raw_image.rows);
    cv::Mat frame_show = draw_box(raw_image, result);
    cv::imwrite(save_image_name, frame_show);
    
    return 0;
    }
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177

    2.5 编译当前项目

    • 修改CMakeLists.txt文件
    cmake_minimum_required(VERSION 3.5.1)
    project(yolov5)
    
    
    SET(CMAKE_BUILD_TYPE "Debug")
    SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb")
    # SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
    
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall")
    set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/)
    
    # Head files
    include_directories(
    /usr/local/opencv4/include/opencv4/opencv
    /usr/local/opencv4/include/opencv4
    ${PROJECT_SOURCE_DIR}/include/
    )
    
    file(GLOB OpenCV4LIBS /usr/local/opencv4/lib/*.so)
    file(GLOB MNNLIBS ${PROJECT_SOURCE_DIR}/libMNN.so)
    file(GLOB SOURCE_FILES src/*.cpp)
    # link_directories(${PROJECT_SOURCE_DIR}/ncnn-20210322-ubuntu-1804-shared/lib/)
    add_executable(${CMAKE_PROJECT_NAME} ${SOURCE_FILES})
    
    target_link_libraries (
    ${CMAKE_PROJECT_NAME}
    ${OpenCV4LIBS}
    ${MNNLIBS}
    pthread
    )
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    $ cd /home/tianzx/ai_model/test_ai_interface/mnn/demo/yolov5_cpp
    $ mkdir bulid && cd build
    $ sudo cmake ..
    $ sudo make
    $ cp ./yolov5 ../
    
    • 1
    • 2
    • 3
    • 4
    • 5

    2.6 运行

    $ ./yolov5 model_zoo/perspec_yolov5l_v0.0.1.mnn test.jpg test_detect.jpg

    test.jpg

    test_detect.jpg

    3. 参考

  • 相关阅读:
    目标追踪学习经验总结
    Shell基础语法——变量、数组、字符串
    《深入浅出Spring》SpringAOP 详解 ProxyFactoryBean
    编译ncurses-5.9出错
    PHP+茶叶商城系统 毕业设计-附源码211121
    diffusers-Tasks
    阿里巴巴按关键字搜索商品 API 返回值说明
    C++ 炼气期之数据是主角
    Flink SQL在线调试功能的实现
    Java摆烂基础学习二~运算符
  • 原文地址:https://blog.csdn.net/tianzhaixing/article/details/126828810