tensorRT classification model construction and inference

TensorRT classification model construction and reasoning sample code classifier.cpp

// tensorRT include
// header file for compilation
#include <NvInfer.h>

// header file for onnx parser
#include <NvOnnxParser.h>

// Runtime headers for inference
#include <NvInferRuntime.h>

// cuda include
#include <cuda_runtime.h>

// system include
#include <stdio.h>
#include <math.h>

#include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <functional>
#include <unistd.h>
#include <chrono>

#include <opencv2/opencv.hpp>

using namespace std;

#define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__)

bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line)
{
    if(code != cudaSuccess)
    {
        const char* err_name = cudaGetErrorName(code);
        const char* err_message = cudaGetErrorString(code);
        printf("runtime error %s:%d %s failed. \\
 code = %s, message = %s\\
", file, line, op, err_name, err_message);
        return false;
    }
    return true;
}


class TRTLogger : public nvinfer1::ILogger
{
public:
    virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override
    {
        if(severity <= Severity::kINFO)
        {
            // Print colored characters, the format is as follows:
            // printf("\033[47;33m printed text\033[0m");
            // where \033[ is the start tag
            // 47 is the background color
            // ; delimiter
            // 33 text color
            // m starts to mark the end
            // \033[0m is the termination marker
            // Among them, the background color or text color can not be written
            // Some color codes https://blog.csdn.net/ericbar/article/details/79652086
            if(severity == Severity::kWARNING)
            {
                printf("\033[33m%s: %s\033[0m\\
", severity_string(severity), msg);
            }
            else if(severity <= Severity::kERROR)
            {
                printf("\033[31m%s: %s\033[0m\\
", severity_string(severity), msg);
            }
            else
            {
                printf("%s: %s\\
", severity_string(severity), msg);
            }
        }
    }

    inline const char* severity_string(nvinfer1::ILogger::Severity t)
    {
        switch(t)
        {
            case nvinfer1::ILogger::Severity::kINTERNAL_ERROR: return "internal_error";
            case nvinfer1::ILogger::Severity::kERROR: return "error";
            case nvinfer1::ILogger::Severity::kWARNING: return "warning";
            case nvinfer1::ILogger::Severity::kINFO: return "info";
            case nvinfer1::ILogger::Severity::kVERBOSE: return "verbose";
            default: return "unknown";
        }
    }
};

// Manage the pointer parameters returned by nv through smart pointers
// The memory is automatically released to avoid leaks
template<typename_T>
shared_ptr<_T> make_nvshared(_T* ptr)
{
    return shared_ptr<_T>(ptr, [](_T* p){p->destroy();});
}

bool exists(const string & path)
{
    return access(path.c_str(), R_OK) == 0;
}


bool build_model(std::string & onnx_model_file, std::string &engine_file, int max_batch_size=10)
{
    if(not exists(onnx_model_file))
    {
        printf("%s not has exists.\\
", onnx_model_file.c_str());
        return false;
    }

    TRTLogger logger;

    // This is the basic required component
    auto builder = make_nvshared(nvinfer1::createInferBuilder(logger));
    auto config = make_nvshared(builder->createBuilderConfig());
    auto network = make_nvshared(builder->createNetworkV2(1));

    // The results parsed by the onnxparser parser will be filled in the network, and added in a way similar to addConv
    auto parser = make_nvshared(nvonnxparser::createParser(*network, logger));
    if(!parser->parseFromFile(onnx_model_file.c_str(), 1))
    {
        printf("Failed to parse %s\\
", onnx_model_file.c_str());
        return false;
    }
    
    
    printf("Workspace Size = %.2f MB\\
", (1 << 28) / 1024.0f / 1024.0f);
    config->setMaxWorkspaceSize(1 << 28);

    // If the model has multiple inputs, multiple profiles are required
    auto profile = builder->createOptimizationProfile();
    auto input_tensor = network->getInput(0);
    auto input_dims = input_tensor->getDimensions();
    
    // Configure the minimum, optimal, and maximum ranges
    input_dims.d[0] = 1;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
    input_dims.d[0] = max_batch_size;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
    config->addOptimizationProfile(profile);

    auto engine = make_nvshared(builder->buildEngineWithConfig(*network, *config));
    if(engine == nullptr)
    {
        printf("Build engine failed.\\
");
        return false;
    }

    // Serialize the model and save it as a file
    auto model_data = make_nvshared(engine->serialize());
    FILE* f = fopen(engine_file.c_str(), "wb");
    fwrite(model_data->data(), 1, model_data->size(), f);
    fclose(f);

    // The order of uninstallation is reversed according to the order of construction
    printf("Done.\\
");
    return true;
}

///

vector<unsigned char> load_file(const string & file)
{
    ifstream in(file, ios::in | ios::binary);
    if (!in.is_open())
        return {};

    in.seekg(0, ios::end);
    size_t length = in.tellg();

    std::vector<uint8_t> data;
    if (length > 0)
    {
        in.seekg(0, ios::beg);
        data.resize(length);

        in.read((char*) & amp;data[0], length);
    }
    in. close();
    return data;
}

vector<string> load_labels(const char* file)
{
    vector<string> lines;

    ifstream in(file, ios::in | ios::binary);
    if (!in.is_open())
    {
        printf("open %d failed.\\
", file);
        return lines;
    }
    
    string line;
    while(getline(in, line))
    {
        lines. push_back(line);
    }
    in. close();
    return lines;
}

void inference(std::string & engine_file)
{

    TRTLogger logger;
    auto engine_data = load_file(engine_file);
    auto runtime = make_nvshared(nvinfer1::createInferRuntime(logger));
    auto engine = make_nvshared(runtime->deserializeCudaEngine(engine_data.data(), engine_data.size()));
    if(engine == nullptr)
    {
        printf("Deserialize cuda engine failed.\\
");
        runtime->destroy();
        return;
    }

    cudaStream_t stream = nullptr;
    checkRuntime(cudaStreamCreate( & stream));
    auto execution_context = make_nvshared(engine->createExecutionContext());

    int input_batch = 1;
    int input_channel = 3;
    int input_height = 224;
    int input_width = 224;
    int input_numel = input_batch * input_channel * input_height * input_width;
    float* input_data_host = nullptr;
    float* input_data_device = nullptr;
    checkRuntime(cudaMallocHost( & input_data_host, input_numel * sizeof(float)));
    checkRuntime(cudaMalloc( & amp; input_data_device, input_numel * sizeof(float)));

    ///
    // image to float
    auto image = cv::imread("./images/0.jpg");
    float mean[] = {0.406, 0.456, 0.485};
    float std[] = {0.225, 0.224, 0.229};

    // Corresponding to the code part of pytorch
    cv::resize(image, image, cv::Size(input_width, input_height));
    int image_area = image.cols * image.rows;
    unsigned char* pimage = image.data;
    float* phost_b = input_data_host + image_area * 0;
    float* phost_g = input_data_host + image_area * 1;
    float* phost_r = input_data_host + image_area * 2;
    for(int i = 0; i < image_area; + + i, pimage + = 3){
        // Note that the order rgb here is swapped
        *phost_r + + = (pimage[0] / 255.0f - mean[0]) / std[0];
        *phost_g + + = (pimage[1] / 255.0f - mean[1]) / std[1];
        *phost_b + + = (pimage[2] / 255.0f - mean[2]) / std[2];
    }
    ///
    checkRuntime(cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream));

    // 3x3 input, corresponding to 3x3 output
    const int num_classes = 512;
    float output_data_host[num_classes];
    float* output_data_device = nullptr;
    checkRuntime(cudaMalloc( & amp; output_data_device, sizeof(output_data_host)));

    // Specify the data input size used for current reasoning
    auto input_dims = execution_context->getBindingDimensions(0);
    input_dims.d[0] = input_batch;

    // When setting the current inference, the input size
    execution_context->setBindingDimensions(0, input_dims);
    float* bindings[] = {input_data_device, output_data_device};
    bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);
    checkRuntime(cudaMemcpyAsync(output_data_host, output_data_device, sizeof(output_data_host), cudaMemcpyDeviceToHost, stream));
    checkRuntime(cudaStreamSynchronize(stream));

    float* prob = output_data_host;
    int predict_label = std::max_element(prob, prob + num_classes) - prob; // Determine the subscript of the predicted category
    auto labels = load_labels("labels.imagenet.txt");
    auto predict_name = labels[predict_label];
    float confidence = prob[predict_label]; // Get the confidence of the predicted value
    printf("Predict: %s, confidence = %f, label = %d\\
", predict_name.c_str(), confidence, predict_label);

    checkRuntime(cudaStreamDestroy(stream));
    checkRuntime(cudaFreeHost(input_data_host));
    checkRuntime(cudaFree(input_data_device));
    checkRuntime(cudaFree(output_data_device));
}

int main()
{
    std::string onnx_model_file = "./models/pplcnet.onnx";
    std::string engine_file = "./models/pplcnet_test.engine";

    if (not exists(engine_file))
    {
        if(!build_model(onnx_model_file, engine_file))
        {
            return -1;
        }
    }

    inference(engine_file);
    return 0;
}

CMakeLists.txt

cmake_minimum_required(VERSION 3.10)
project(pro VERSION 1.0.0 LANGUAGES C CXX CUDA)

option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Debug)
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/workspace/bin)

set(CUDA_GEN_CODE "-gencode=arch=compute_86,code=sm_86")
set(OpenCV_DIR "/opt/opencv4.7.0/lib/cmake/opencv4/")
set(CUDA_DIR "/usr/local/cuda-11.8/")
set(CUDNN_DIR "/usr/local/cuda-11.8/")
set(TENSORRT_DIR "/opt/TensorRT-8.6.1.6")

find_package(CUDA REQUIRED)
find_package(OpenCV)

include_directories(
    ${CUDA_DIR}/include
    ${CUDNN_DIR}/include
    ${TENSORRT_DIR}/include
)

link_directories(
    ${CUDA_DIR}/lib64
    ${CUDNN_DIR}/lib64
    ${TENSORRT_DIR}/lib
)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -O0 -Wfatal-errors -pthread -w -g")
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c + + 11 -O0 -Xcompiler -fPIC -g -w ${CUDA_GEN_CODE}")

set(CUDA_LIBS
    cuda
    cublas
    cudart
    cudnn
)

set(TRT_LIBS
    nvinfer
    nvinfer_plugin
    nvonnxparser
)

set(srcs
    ${PROJECT_SOURCE_DIR}/src/classifier.cpp
)

add_executable(pro ${srcs})

target_link_libraries(pro ${TRT_LIBS} ${CUDA_LIBS} pthread stdc++ + dl)
target_link_libraries(pro ${OpenCV_LIBS})