• 【slam十四讲第二版】【课本例题代码向】【第十一讲~回环检测】【DBoW3的安装】【创建字典】【相似度检测】【增加字典规模】


    【slam十四讲第二版】【课本例题代码向】【第十一讲~回环检测】【DBoW3的安装】【创建字典】【相似度检测】【增加字典规模】

    0 前言

    • 参考:
    1. 《视觉SLAM十四讲 第二版》笔记及课后习题(第十一讲)
    2. 视觉SLAM十四讲CH11代码解析及课后习题详解

    1 DBoW3的安装

    1. 首先获取安装包,事实证明,使用高博的gaoxiang12/slambook第一版里面的安装包才可以,安装包自取:链接: https://pan.baidu.com/s/1CfUpOdoVtaQoaMhcoNOt1w 提取码: 7gvi
    2. 安装过程
    cd DBow3/
    mkdir build
    cd build/
    
    cmake ..
    make -j4
    sudo make install
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7

    2 创建字典

    1. 该工程实现需要一个包含十张图片的数据集,自取:链接: https://pan.baidu.com/s/19I1holzTCyWhzdI1RpOBeg 提取码: khln
    2. 整个工程代码为:链接: https://pan.baidu.com/s/1NV4LZwt7FV-25ef0wNOHIg 提取码: oh59

    2.1 feature_training.cpp

    #include "DBoW3/DBoW3.h"//词袋支持头文件
    #include //opencv核心模块
    #include //gui模块
    #include //特征点头文件
    #include 
    #include 
    #include 
    
    using namespace cv;
    using namespace std;
    
    /***************************************************
     * 本节演示了如何根据data/目录下的十张图训练字典
     * ************************************************/
    
    int main( int argc, char** argv )
    {
        // read the image
        cout<<"reading images... "<<endl;//输出reading images...
        vector<Mat> images; //图像
        for ( int i=0; i<10; i++ )//遍历读取十张图像
        {
            string path = "./data/"+to_string(i+1)+".png";
            images.push_back( imread(path) );
        }
        // detect ORB features
        cout<<"detecting ORB features ... "<<endl;//输出detecting ORB features(正在检测ORB特征) ...
        Ptr< Feature2D > detector = ORB::create();
        vector<Mat> descriptors;//描述子
        for ( Mat& image:images )
        {
            vector<KeyPoint> keypoints; //关键点
            Mat descriptor;//描述子
            detector->detectAndCompute( image, Mat(), keypoints, descriptor );//检测和计算
            descriptors.push_back( descriptor );
        }
    
        // create vocabulary (创建字典)
        cout<<"creating vocabulary ... "<<endl;//输出(creating vocabulary ...)创建字典
        DBoW3::Vocabulary vocab;//默认构造函数 k=10,d=5
        vocab.create( descriptors );
        cout<<"vocabulary info: "<<vocab<<endl;//字典信息
        vocab.save( "vocabulary.yml.gz" );//保存字典压缩包
        cout<<"done"<<endl;//输出done
    
        return 0;
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47

    2.2 CMakeLists.txt

    cmake_minimum_required( VERSION 2.8 )
    project( loop_closure )
    
    set( CMAKE_BUILD_TYPE "Release" )
    set( CMAKE_CXX_FLAGS "-std=c++14 -O3" )
    
    # opencv
    find_package( OpenCV 3.1 REQUIRED )
    include_directories( ${OpenCV_INCLUDE_DIRS} )
    
    # dbow3
    # dbow3 is a simple lib so I assume you installed it in default directory
    set( DBoW3_INCLUDE_DIRS "/usr/local/include" )
    set( DBoW3_LIBS "/usr/local/lib/libDBoW3.a" )
    
    add_executable( feature_training src/feature_training.cpp )
    target_link_libraries( feature_training ${OpenCV_LIBS} ${DBoW3_LIBS} )
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18

    2.3 输出

    1. 会生成一个vocabulary.yml.gz文件,后面会有用,自取:链接: https://pan.baidu.com/s/1i4zAnHM2BeycBi_GwF2fnA 提取码: sbsf
      2.运行指令
     ./feature_training ../data/
    
    • 1
    /home/bupo/my_study/slam14/slam14_my/cap11/feature_training/cmake-build-debug/feature_training ./data
    reading images... 
    detecting ORB features ... 
    [ INFO:0] Initialize OpenCL runtime...
    creating vocabulary ... 
    vocabulary info: Vocabulary: k = 10, L = 5, Weighting = tf-idf, Scoring = L1-norm, Number of words = 4970
    done
    
    进程已结束,退出代码0
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 可以看到:分支数量k为10,深度L为5,单词数量为4983,没有达到最大容量。Weighting是权重,Scoring是评分

    3 相似度的计算

    1. 该工程实现需要一个包含十张图片的数据集,自取:链接: https://pan.baidu.com/s/19I1holzTCyWhzdI1RpOBeg 提取码: khln
    2. 前面这个工程会生成一个vocabulary.yml.gz文件,该工程会用到,自取:链接: https://pan.baidu.com/s/1i4zAnHM2BeycBi_GwF2fnA 提取码: sbsf
    3. 该工程自取:链接: 链接: https://pan.baidu.com/s/12E6fvy2DV9-BqOuXtFigYw 提取码: hmp2

    3.1 loop_closure.cpp

    #include "DBoW3/DBoW3.h"//词袋支持头文件
    #include //opencv核心模块
    #include //gui模块
    #include //特征点头文件
    #include 
    #include 
    #include 
    
    using namespace cv;
    using namespace std;
    
    /***************************************************
     * 本节演示了如何根据前面训练的字典计算相似性评分
     * ************************************************/
    int main(int argc, char **argv) {
        if (argc != 2) {
            cout << "Usage: 需要字典" << endl;
            return 1;
        }
        string zidian_file = argv[1];
        DBoW3::Vocabulary vocab(zidian_file);
    
        // read the images and database(读取图像和数据库)
        cout << "reading database" << endl;//输出reading database(读取数据)
        //DBoW3::Vocabulary vocab("../src/vocabulary.yml.gz");//vocabulary.yml.gz路径
        //DBoW3::Vocabulary vocab("../src/vocab_larger.yml.gz");  // use large vocab if you want:
        if (vocab.empty()) {
            cerr << "Vocabulary does not exist." << endl;//输出Vocabulary does not exist
            return 1;
        }
        cout << "reading images... " << endl;//输出reading images...
        vector<Mat> images;
        for (int i = 0; i < 10; i++) {
            string path = "../data/" + to_string(i + 1) + ".png";//图像读取路径
            images.push_back(imread(path));
        }
    
        // NOTE: in this case we are comparing images with a vocabulary generated by themselves, this may lead to overfit. 这里我们用它们生成的字典比较它们本身的相似性,这可能会产生过拟合
        // detect ORB features
        cout << "detecting ORB features ... " << endl;//输出detecting ORB features ...(正在检测ORB特征)
        Ptr<Feature2D> detector = ORB::create();//默认图像500个特征点
        vector<Mat> descriptors;//描述子  将10张图像提取ORB特征并存放在vector容器里
        for (Mat &image:images) {
            vector<KeyPoint> keypoints;//关键点
            Mat descriptor;//描述子
            detector->detectAndCompute(image, Mat(), keypoints, descriptor);//检测和计算
            descriptors.push_back(descriptor);
        }
    
        // we can compare the images directly or we can compare one image to a database
        // images :
        cout << "comparing images with images " << endl;//输出comparing images with images
        for (int i = 0; i < images.size(); i++)
        {
            DBoW3::BowVector v1;
            //descriptors[i]表示图像i中所有的ORB描述子集合,函数transform()计算出用先前字典来描述的单词向量,每个向量中元素的值要么是0,表示图像i中没有这个单词;要么是该单词的权重
            //BoW描述向量中含有每个单词的ID和权重,两者构成了整个稀疏的向量
            //当比较两个向量时,DBoW3会为我们计算一个分数
            vocab.transform(descriptors[i], v1);
            for (int j = i; j < images.size(); j++)
            {
                DBoW3::BowVector v2;
                vocab.transform(descriptors[j], v2);
                double score = vocab.score(v1, v2);//p296式(11.9)
                cout << "image " << i << " vs image " << j << " : " << score << endl;//输出一幅图像与另外一幅图像之间的相似度评分
            }
            cout << endl;
        }
    
        // or with database
        //在进行数据库查询时,DBoW对上面的分数进行排序,给出最相似的结果
        cout << "comparing images with database " << endl;
        DBoW3::Database db(vocab, false, 0);
        for (int i = 0; i < descriptors.size(); i++)
            db.add(descriptors[i]);
        cout << "database info: " << db << endl;//输出database info(数据库信息)为
        for (int i = 0; i < descriptors.size(); i++)
        {
            DBoW3::QueryResults ret;
            db.query(descriptors[i], ret, 4);      // max result=4
            cout << "searching for image " << i << " returns " << ret << endl << endl;
        }
        cout << "done." << endl;
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84

    3.2 CMakeLists.txt

    cmake_minimum_required( VERSION 2.8 )
    project( loop_closure )
    
    set( CMAKE_BUILD_TYPE "Release" )
    set( CMAKE_CXX_FLAGS "-std=c++14 -O3" )
    
    # opencv
    find_package( OpenCV 3.1 REQUIRED )
    include_directories( ${OpenCV_INCLUDE_DIRS} )
    
    # dbow3
    # dbow3 is a simple lib so I assume you installed it in default directory
    set( DBoW3_INCLUDE_DIRS "/usr/local/include" )
    set( DBoW3_LIBS "/usr/local/lib/libDBoW3.a" )
    
    add_executable( loop_closure src/loop_closure.cpp )
    target_link_libraries( loop_closure ${OpenCV_LIBS} ${DBoW3_LIBS} )
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18

    3.3 输出

    1. 运行指令
    ./loop_closure 
    
    • 1
    /home/bupo/my_study/slam14/slam14_my/cap11/loop_closure/cmake-build-debug/loop_closure
    reading database
    reading images... 
    detecting ORB features ... 
    [ INFO:0] Initialize OpenCL runtime...
    comparing images with images 
    image 0 vs image 0 : 1
    image 0 vs image 1 : 0.0367529
    image 0 vs image 2 : 0.0277822
    image 0 vs image 3 : 0.0281337
    image 0 vs image 4 : 0.0335461
    image 0 vs image 5 : 0.0427682
    image 0 vs image 6 : 0.038458
    image 0 vs image 7 : 0.0305787
    image 0 vs image 8 : 0.0295247
    image 0 vs image 9 : 0.0621202
    
    image 1 vs image 1 : 1
    image 1 vs image 2 : 0.0371357
    image 1 vs image 3 : 0.0300564
    image 1 vs image 4 : 0.0339359
    image 1 vs image 5 : 0.0412664
    image 1 vs image 6 : 0.0226204
    image 1 vs image 7 : 0.0304568
    image 1 vs image 8 : 0.0426928
    image 1 vs image 9 : 0.033033
    
    image 2 vs image 2 : 1
    image 2 vs image 3 : 0.0318231
    image 2 vs image 4 : 0.0271078
    image 2 vs image 5 : 0.0269704
    image 2 vs image 6 : 0.0233219
    image 2 vs image 7 : 0.0497462
    image 2 vs image 8 : 0.0374033
    image 2 vs image 9 : 0.0292551
    
    image 3 vs image 3 : 1
    image 3 vs image 4 : 0.0348777
    image 3 vs image 5 : 0.0366132
    image 3 vs image 6 : 0.0424612
    image 3 vs image 7 : 0.0172669
    image 3 vs image 8 : 0.032024
    image 3 vs image 9 : 0.042369
    
    image 4 vs image 4 : 1
    image 4 vs image 5 : 0.0627885
    image 4 vs image 6 : 0.041152
    image 4 vs image 7 : 0.0233412
    image 4 vs image 8 : 0.0198614
    image 4 vs image 9 : 0.0288873
    
    image 5 vs image 5 : 1
    image 5 vs image 6 : 0.0319993
    image 5 vs image 7 : 0.0236407
    image 5 vs image 8 : 0.0263738
    image 5 vs image 9 : 0.0306995
    
    image 6 vs image 6 : 1
    image 6 vs image 7 : 0.0345444
    image 6 vs image 8 : 0.0376772
    image 6 vs image 9 : 0.0297798
    
    image 7 vs image 7 : 1
    image 7 vs image 8 : 0.0315193
    image 7 vs image 9 : 0.0284877
    
    image 8 vs image 8 : 1
    image 8 vs image 9 : 0.040886
    
    image 9 vs image 9 : 1
    
    comparing images with database 
    database info: Database: Entries = 10, Using direct index = no. Vocabulary: k = 10, L = 5, Weighting = tf-idf, Scoring = L1-norm, Number of words = 4983
    searching for image 0 returns 4 results:
    <EntryId: 0, Score: 1>
    <EntryId: 9, Score: 0.0621202>
    <EntryId: 5, Score: 0.0427682>
    <EntryId: 6, Score: 0.038458>
    
    searching for image 1 returns 4 results:
    <EntryId: 1, Score: 1>
    <EntryId: 8, Score: 0.0426928>
    <EntryId: 5, Score: 0.0412664>
    <EntryId: 2, Score: 0.0371357>
    
    searching for image 2 returns 4 results:
    <EntryId: 2, Score: 1>
    <EntryId: 7, Score: 0.0497462>
    <EntryId: 8, Score: 0.0374033>
    <EntryId: 1, Score: 0.0371357>
    
    searching for image 3 returns 4 results:
    <EntryId: 3, Score: 1>
    <EntryId: 6, Score: 0.0424612>
    <EntryId: 9, Score: 0.042369>
    <EntryId: 5, Score: 0.0366132>
    
    searching for image 4 returns 4 results:
    <EntryId: 4, Score: 1>
    <EntryId: 5, Score: 0.0627885>
    <EntryId: 6, Score: 0.041152>
    <EntryId: 3, Score: 0.0348777>
    
    searching for image 5 returns 4 results:
    <EntryId: 5, Score: 1>
    <EntryId: 4, Score: 0.0627885>
    <EntryId: 0, Score: 0.0427682>
    <EntryId: 1, Score: 0.0412664>
    
    searching for image 6 returns 4 results:
    <EntryId: 6, Score: 1>
    <EntryId: 3, Score: 0.0424612>
    <EntryId: 4, Score: 0.041152>
    <EntryId: 0, Score: 0.038458>
    
    searching for image 7 returns 4 results:
    <EntryId: 7, Score: 1>
    <EntryId: 2, Score: 0.0497462>
    <EntryId: 6, Score: 0.0345444>
    <EntryId: 8, Score: 0.0315193>
    
    searching for image 8 returns 4 results:
    <EntryId: 8, Score: 1>
    <EntryId: 1, Score: 0.0426928>
    <EntryId: 9, Score: 0.040886>
    <EntryId: 6, Score: 0.0376772>
    
    searching for image 9 returns 4 results:
    <EntryId: 9, Score: 1>
    <EntryId: 0, Score: 0.0621202>
    <EntryId: 3, Score: 0.042369>
    <EntryId: 8, Score: 0.040886>
    
    done.
    
    进程已结束,退出代码0
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 可以看到不同的图像与相似图像的评分有多大差异。我们看到,明显相似的图1和图10(在C++中下标为0和9)其相似度评分约为0.0525,而其他图像约为0.02。

    4 增加字典规模

    1. 这里需要下载对应的数据集,可以去官网下载:https://vision.in.tum.de/data/datasets/rgbd-dataset/download#,也可以在网盘自取链接: https://pan.baidu.com/s/1bg6L8u9RF0YtpkzYX38MAw 提取码: 6dj4
    2. 整个工程除去数据集自取:链接: https://pan.baidu.com/s/1LIhMVq8FlUsOA0Sk1IlD5g 提取码: kv9b

    4.1 gen_vocab_large.cpp

    #include "DBoW3/DBoW3.h"//词袋支持头文件
    #include //opencv核心模块
    #include //gui模块
    #include //特征点头文件
    #include 
    #include 
    #include 
    
    using namespace cv;
    using namespace std;
    
    
    int main( int argc, char** argv )
    {
        String directoryPath = "/home/bupo/my_study/slam14/slam14_my/cap11/gen_vocab_large/rgbd_dataset_freiburg1_desk2/rgb";//图像路径
        vector<String> imagesPath;
        glob(directoryPath, imagesPath);
        // string dataset_dir = argv[1];
        // ifstream fin ( dataset_dir+"/home/liqiang/slambook2/ch11/rgbd_dataset_freiburg1_desk2/rgb" );
        // if ( !fin )
        // {
        //     cout<<"please generate the associate file called associate.txt!"<
        //     return 1;
        // }
    
        // vector rgb_files, depth_files;
        // vector rgb_times, depth_times;
        // while ( !fin.eof() )
        // {
        //     string rgb_time, rgb_file, depth_time, depth_file;
        //     fin>>rgb_time>>rgb_file>>depth_time>>depth_file;
        //     rgb_times.push_back ( atof ( rgb_time.c_str() ) );
        //     depth_times.push_back ( atof ( depth_time.c_str() ) );
        //     rgb_files.push_back ( dataset_dir+"/"+rgb_file );
        //     depth_files.push_back ( dataset_dir+"/"+depth_file );
    
        //     if ( fin.good() == false )
        //         break;
        // }
        // fin.close();
    
        cout<<"generating features ... "<<endl;//输出generating features (正在检测ORB特征)...
        vector<Mat> descriptors;//描述子
        Ptr< Feature2D > detector = ORB::create();
        int index = 1;
        for ( String path : imagesPath )
        {
            Mat image = imread(path);
            vector<KeyPoint> keypoints; //关键点
            Mat descriptor;//描述子
            detector->detectAndCompute( image, Mat(), keypoints, descriptor );
            descriptors.push_back( descriptor );
            cout<<"extracting features from image " << index++ <<endl;//输出extracting features from image(从图像中提取特征)
        }
        cout<<"extract total "<<descriptors.size()*500<<" features."<<endl;
    
        // create vocabulary
        cout<<"creating vocabulary, please wait ... "<<endl;//输出creating vocabulary, please wait (创建词典,请稍等)...
        DBoW3::Vocabulary vocab;
        vocab.create( descriptors );
        cout<<"vocabulary info: "<<vocab<<endl;
        vocab.save( "vocab_larger.yml.gz" );//保存词典
        cout<<"done"<<endl;
    
        return 0;
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66

    4.2 CMakeLists.txt

    cmake_minimum_required( VERSION 2.8 )
    project( loop_closure )
    
    set( CMAKE_BUILD_TYPE "Release" )
    set( CMAKE_CXX_FLAGS "-std=c++14 -O3" )
    
    # opencv
    find_package( OpenCV 3.1 REQUIRED )
    include_directories( ${OpenCV_INCLUDE_DIRS} )
    
    # dbow3
    # dbow3 is a simple lib so I assume you installed it in default directory
    set( DBoW3_INCLUDE_DIRS "/usr/local/include" )
    set( DBoW3_LIBS "/usr/local/lib/libDBoW3.a" )
    
    add_executable( gen_vocab src/gen_vocab_large.cpp )
    target_link_libraries( gen_vocab ${OpenCV_LIBS} ${DBoW3_LIBS} )
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17

    4.3 输出

    /home/bupo/my_study/slam14/slam14_my/cap11/gen_vocab_large/cmake-build-debug/gen_vocab
    generating features ... 
    [ INFO:0] Initialize OpenCL runtime...
    extracting features from image 1
    extracting features from image 2
    
    extracting features from image 639
    extracting features from image 640
    extract total 320000 features.
    creating vocabulary, please wait ... 
    vocabulary info: Vocabulary: k = 10, L = 5, Weighting = tf-idf, Scoring = L1-norm, Number of words = 89849
    done
    
    进程已结束,退出代码0
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 同时会生成文件vocab_larger.yml.gz,这个文件,是通过640个图片训练得到的字典,可以在loop_closure中,当然,如果你不想训练,可以直接自取我训练得到的结果:链接: https://pan.baidu.com/s/1-L7uYoCKmzrLRqmTxwxxMg 提取码: 66kc

    4.4 使用该字典进行回环

    • 对loop_closure修改字典文件,然后回环
    /home/bupo/my_study/slam14/slam14_my/cap11/loop_closure/cmake-build-debug/loop_closure
    reading database
    reading images... 
    detecting ORB features ... 
    [ INFO:0] Initialize OpenCL runtime...
    comparing images with images 
    image 0 vs image 0 : 1
    image 0 vs image 1 : 0.0264319
    image 0 vs image 2 : 0.0192686
    image 0 vs image 3 : 0.0174829
    image 0 vs image 4 : 0.00919418
    image 0 vs image 5 : 0.0196796
    image 0 vs image 6 : 0.0195631
    image 0 vs image 7 : 0.0156169
    image 0 vs image 8 : 0.0225417
    image 0 vs image 9 : 0.0543705
    
    image 1 vs image 1 : 1
    image 1 vs image 2 : 0.0305082
    image 1 vs image 3 : 0.0226657
    image 1 vs image 4 : 0.0116976
    image 1 vs image 5 : 0.0178306
    image 1 vs image 6 : 0.0277621
    image 1 vs image 7 : 0.0198653
    image 1 vs image 8 : 0.0267385
    image 1 vs image 9 : 0.0262276
    
    image 2 vs image 2 : 1
    image 2 vs image 3 : 0.0210716
    image 2 vs image 4 : 0.0280933
    image 2 vs image 5 : 0.0342187
    image 2 vs image 6 : 0.0247903
    image 2 vs image 7 : 0.0233946
    image 2 vs image 8 : 0.0221119
    image 2 vs image 9 : 0.0238046
    
    image 3 vs image 3 : 1
    image 3 vs image 4 : 0.0189549
    image 3 vs image 5 : 0.0257821
    image 3 vs image 6 : 0.0244571
    image 3 vs image 7 : 0.0196989
    image 3 vs image 8 : 0.0260922
    image 3 vs image 9 : 0.0240474
    
    image 4 vs image 4 : 1
    image 4 vs image 5 : 0.0398126
    image 4 vs image 6 : 0.0276248
    image 4 vs image 7 : 0.014187
    image 4 vs image 8 : 0.014109
    image 4 vs image 9 : 0.0167629
    
    image 5 vs image 5 : 1
    image 5 vs image 6 : 0.0176884
    image 5 vs image 7 : 0.0227182
    image 5 vs image 8 : 0.0249955
    image 5 vs image 9 : 0.0248106
    
    image 6 vs image 6 : 1
    image 6 vs image 7 : 0.0155511
    image 6 vs image 8 : 0.0323909
    image 6 vs image 9 : 0.0206828
    
    image 7 vs image 7 : 1
    image 7 vs image 8 : 0.0309826
    image 7 vs image 9 : 0.0322799
    
    image 8 vs image 8 : 1
    image 8 vs image 9 : 0.0163077
    
    image 9 vs image 9 : 1
    
    comparing images with database 
    database info: Database: Entries = 10, Using direct index = no. Vocabulary: k = 10, L = 5, Weighting = tf-idf, Scoring = L1-norm, Number of words = 89849
    searching for image 0 returns 4 results:
    <EntryId: 0, Score: 1>
    <EntryId: 9, Score: 0.0543705>
    <EntryId: 1, Score: 0.0264319>
    <EntryId: 8, Score: 0.0225417>
    
    searching for image 1 returns 4 results:
    <EntryId: 1, Score: 1>
    <EntryId: 2, Score: 0.0305082>
    <EntryId: 6, Score: 0.0277621>
    <EntryId: 8, Score: 0.0267385>
    
    searching for image 2 returns 4 results:
    <EntryId: 2, Score: 1>
    <EntryId: 5, Score: 0.0342187>
    <EntryId: 1, Score: 0.0305082>
    <EntryId: 4, Score: 0.0280933>
    
    searching for image 3 returns 4 results:
    <EntryId: 3, Score: 1>
    <EntryId: 8, Score: 0.0260922>
    <EntryId: 5, Score: 0.0257821>
    <EntryId: 6, Score: 0.0244571>
    
    searching for image 4 returns 4 results:
    <EntryId: 4, Score: 1>
    <EntryId: 5, Score: 0.0398126>
    <EntryId: 2, Score: 0.0280933>
    <EntryId: 6, Score: 0.0276248>
    
    searching for image 5 returns 4 results:
    <EntryId: 5, Score: 1>
    <EntryId: 4, Score: 0.0398126>
    <EntryId: 2, Score: 0.0342187>
    <EntryId: 3, Score: 0.0257821>
    
    searching for image 6 returns 4 results:
    <EntryId: 6, Score: 1>
    <EntryId: 8, Score: 0.0323909>
    <EntryId: 1, Score: 0.0277621>
    <EntryId: 4, Score: 0.0276248>
    
    searching for image 7 returns 4 results:
    <EntryId: 7, Score: 1>
    <EntryId: 9, Score: 0.0322799>
    <EntryId: 8, Score: 0.0309826>
    <EntryId: 2, Score: 0.0233946>
    
    searching for image 8 returns 4 results:
    <EntryId: 8, Score: 1>
    <EntryId: 6, Score: 0.0323909>
    <EntryId: 7, Score: 0.0309826>
    <EntryId: 1, Score: 0.0267385>
    
    searching for image 9 returns 4 results:
    <EntryId: 9, Score: 1>
    <EntryId: 0, Score: 0.0543705>
    <EntryId: 7, Score: 0.0322799>
    <EntryId: 1, Score: 0.0262276>
    done.
    进程已结束,退出代码0
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
  • 相关阅读:
    Nginx反向代理和负载均衡
    如何创建自己的小程序?
    亿图脑图MindMaster(Pro)
    动态控制表格的表头显隐
    SpringMVC使用(注解、获取各种参数的方式、视图模板、文件上传下载、国际化、RestFul)
    【Python】基础(学习笔记)
    ros缺少catkin_pkg
    c++动态创建二维数组和释放
    35 LRU缓存
    使用ApiFox衔接前后端开发人员,提升沟通效率实践
  • 原文地址:https://blog.csdn.net/qq_45954434/article/details/126055800