1 Star 0 Fork 0

chenyuming/yolov5_sort_cpp

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
yolov5_helmet.cpp 24.21 KB
一键复制 编辑 原始数据 按行查看 历史
chenyuming 提交于 2021-09-12 02:32 . init project
// #include <iostream>
// #include <chrono>
// #include <cmath>
// #include "cuda_utils.h"
// #include "logging.h"
// #include "common.hpp"
// #include "utils.h"
// #include "calibrator.h"
// #include <string>
// using namespace std;
// // using namespace cv;
// #define USE_FP16 // set USE_INT8 or USE_FP16 or USE_FP32
// #define DEVICE 0 // GPU id
// #define NMS_THRESH 0.4
// #define CONF_THRESH 0.5
// #define BATCH_SIZE 1
// // stuff we know about the network and the input/output blobs
// static const int INPUT_H = Yolo::INPUT_H;
// static const int INPUT_W = Yolo::INPUT_W;
// static const int CLASS_NUM = Yolo::CLASS_NUM;
// static const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1; // we assume the yololayer outputs no more than MAX_OUTPUT_BBOX_COUNT boxes that conf >= 0.1
// const char* INPUT_BLOB_NAME = "data";
// const char* OUTPUT_BLOB_NAME = "prob";
// static Logger gLogger;
// static int get_width(int x, float gw, int divisor = 8) {
// return int(ceil((x * gw) / divisor)) * divisor;
// }
// static int get_depth(int x, float gd) {
// if (x == 1) return 1;
// int r = round(x * gd);
// if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0) {
// --r;
// }
// return std::max<int>(r, 1);
// }
// ICudaEngine* build_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) {
// INetworkDefinition* network = builder->createNetworkV2(0U);
// // Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME
// ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{ 3, INPUT_H, INPUT_W });
// assert(data);
// std::map<std::string, Weights> weightMap = loadWeights(wts_name);
// /* ------ yolov5 backbone------ */
// auto focus0 = focus(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0");
// auto conv1 = convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
// auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
// auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
// auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4");
// auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
// auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
// auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
// auto spp8 = SPP(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, 9, 13, "model.8");
// /* ------ yolov5 head ------ */
// auto bottleneck_csp9 = C3(network, weightMap, *spp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.9");
// auto conv10 = convBlock(network, weightMap, *bottleneck_csp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10");
// auto upsample11 = network->addResize(*conv10->getOutput(0));
// assert(upsample11);
// upsample11->setResizeMode(ResizeMode::kNEAREST);
// upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions());
// ITensor* inputTensors12[] = { upsample11->getOutput(0), bottleneck_csp6->getOutput(0) };
// auto cat12 = network->addConcatenation(inputTensors12, 2);
// auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13");
// auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14");
// auto upsample15 = network->addResize(*conv14->getOutput(0));
// assert(upsample15);
// upsample15->setResizeMode(ResizeMode::kNEAREST);
// upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());
// ITensor* inputTensors16[] = { upsample15->getOutput(0), bottleneck_csp4->getOutput(0) };
// auto cat16 = network->addConcatenation(inputTensors16, 2);
// auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17");
// /* ------ detect ------ */
// IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]);
// auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18");
// ITensor* inputTensors19[] = { conv18->getOutput(0), conv14->getOutput(0) };
// auto cat19 = network->addConcatenation(inputTensors19, 2);
// auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20");
// IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]);
// auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21");
// ITensor* inputTensors22[] = { conv21->getOutput(0), conv10->getOutput(0) };
// auto cat22 = network->addConcatenation(inputTensors22, 2);
// auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
// IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]);
// auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2});
// yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
// network->markOutput(*yolo->getOutput(0));
// // Build engine
// builder->setMaxBatchSize(maxBatchSize);
// config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
// #if defined(USE_FP16)
// config->setFlag(BuilderFlag::kFP16);
// #elif defined(USE_INT8)
// std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
// assert(builder->platformHasFastInt8());
// config->setFlag(BuilderFlag::kINT8);
// Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
// config->setInt8Calibrator(calibrator);
// #endif
// std::cout << "Building engine, please wait for a while..." << std::endl;
// ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
// std::cout << "Build engine successfully!" << std::endl;
// // Don't need the network any more
// network->destroy();
// // Release host memory
// for (auto& mem : weightMap)
// {
// free((void*)(mem.second.values));
// }
// return engine;
// }
// ICudaEngine* build_engine_p6(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) {
// INetworkDefinition* network = builder->createNetworkV2(0U);
// // Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME
// ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{ 3, INPUT_H, INPUT_W });
// assert(data);
// std::map<std::string, Weights> weightMap = loadWeights(wts_name);
// /* ------ yolov5 backbone------ */
// auto focus0 = focus(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0");
// auto conv1 = convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
// auto c3_2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
// auto conv3 = convBlock(network, weightMap, *c3_2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
// auto c3_4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4");
// auto conv5 = convBlock(network, weightMap, *c3_4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
// auto c3_6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
// auto conv7 = convBlock(network, weightMap, *c3_6->getOutput(0), get_width(768, gw), 3, 2, 1, "model.7");
// auto c3_8 = C3(network, weightMap, *conv7->getOutput(0), get_width(768, gw), get_width(768, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
// auto conv9 = convBlock(network, weightMap, *c3_8->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.9");
// auto spp10 = SPP(network, weightMap, *conv9->getOutput(0), get_width(1024, gw), get_width(1024, gw), 3, 5, 7, "model.10");
// auto c3_11 = C3(network, weightMap, *spp10->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.11");
// /* ------ yolov5 head ------ */
// auto conv12 = convBlock(network, weightMap, *c3_11->getOutput(0), get_width(768, gw), 1, 1, 1, "model.12");
// auto upsample13 = network->addResize(*conv12->getOutput(0));
// assert(upsample13);
// upsample13->setResizeMode(ResizeMode::kNEAREST);
// upsample13->setOutputDimensions(c3_8->getOutput(0)->getDimensions());
// ITensor* inputTensors14[] = { upsample13->getOutput(0), c3_8->getOutput(0) };
// auto cat14 = network->addConcatenation(inputTensors14, 2);
// auto c3_15 = C3(network, weightMap, *cat14->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.15");
// auto conv16 = convBlock(network, weightMap, *c3_15->getOutput(0), get_width(512, gw), 1, 1, 1, "model.16");
// auto upsample17 = network->addResize(*conv16->getOutput(0));
// assert(upsample17);
// upsample17->setResizeMode(ResizeMode::kNEAREST);
// upsample17->setOutputDimensions(c3_6->getOutput(0)->getDimensions());
// ITensor* inputTensors18[] = { upsample17->getOutput(0), c3_6->getOutput(0) };
// auto cat18 = network->addConcatenation(inputTensors18, 2);
// auto c3_19 = C3(network, weightMap, *cat18->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.19");
// auto conv20 = convBlock(network, weightMap, *c3_19->getOutput(0), get_width(256, gw), 1, 1, 1, "model.20");
// auto upsample21 = network->addResize(*conv20->getOutput(0));
// assert(upsample21);
// upsample21->setResizeMode(ResizeMode::kNEAREST);
// upsample21->setOutputDimensions(c3_4->getOutput(0)->getDimensions());
// ITensor* inputTensors21[] = { upsample21->getOutput(0), c3_4->getOutput(0) };
// auto cat22 = network->addConcatenation(inputTensors21, 2);
// auto c3_23 = C3(network, weightMap, *cat22->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
// auto conv24 = convBlock(network, weightMap, *c3_23->getOutput(0), get_width(256, gw), 3, 2, 1, "model.24");
// ITensor* inputTensors25[] = { conv24->getOutput(0), conv20->getOutput(0) };
// auto cat25 = network->addConcatenation(inputTensors25, 2);
// auto c3_26 = C3(network, weightMap, *cat25->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.26");
// auto conv27 = convBlock(network, weightMap, *c3_26->getOutput(0), get_width(512, gw), 3, 2, 1, "model.27");
// ITensor* inputTensors28[] = { conv27->getOutput(0), conv16->getOutput(0) };
// auto cat28 = network->addConcatenation(inputTensors28, 2);
// auto c3_29 = C3(network, weightMap, *cat28->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.29");
// auto conv30 = convBlock(network, weightMap, *c3_29->getOutput(0), get_width(768, gw), 3, 2, 1, "model.30");
// ITensor* inputTensors31[] = { conv30->getOutput(0), conv12->getOutput(0) };
// auto cat31 = network->addConcatenation(inputTensors31, 2);
// auto c3_32 = C3(network, weightMap, *cat31->getOutput(0), get_width(2048, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.32");
// /* ------ detect ------ */
// IConvolutionLayer* det0 = network->addConvolutionNd(*c3_23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.0.weight"], weightMap["model.33.m.0.bias"]);
// IConvolutionLayer* det1 = network->addConvolutionNd(*c3_26->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.1.weight"], weightMap["model.33.m.1.bias"]);
// IConvolutionLayer* det2 = network->addConvolutionNd(*c3_29->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.2.weight"], weightMap["model.33.m.2.bias"]);
// IConvolutionLayer* det3 = network->addConvolutionNd(*c3_32->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.3.weight"], weightMap["model.33.m.3.bias"]);
// auto yolo = addYoLoLayer(network, weightMap, "model.33", std::vector<IConvolutionLayer*>{det0, det1, det2, det3});
// yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
// network->markOutput(*yolo->getOutput(0));
// // Build engine
// builder->setMaxBatchSize(maxBatchSize);
// config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
// #if defined(USE_FP16)
// config->setFlag(BuilderFlag::kFP16);
// #elif defined(USE_INT8)
// std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
// assert(builder->platformHasFastInt8());
// config->setFlag(BuilderFlag::kINT8);
// Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
// config->setInt8Calibrator(calibrator);
// #endif
// std::cout << "Building engine, please wait for a while..." << std::endl;
// ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
// std::cout << "Build engine successfully!" << std::endl;
// // Don't need the network any more
// network->destroy();
// // Release host memory
// for (auto& mem : weightMap)
// {
// free((void*)(mem.second.values));
// }
// return engine;
// }
// void APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream, bool& is_p6, float& gd, float& gw, std::string& wts_name) {
// // Create builder
// IBuilder* builder = createInferBuilder(gLogger);
// IBuilderConfig* config = builder->createBuilderConfig();
// // Create model to populate the network, then set the outputs and create an engine
// ICudaEngine *engine = nullptr;
// if (is_p6) {
// engine = build_engine_p6(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
// } else {
// engine = build_engine(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
// }
// assert(engine != nullptr);
// // Serialize the engine
// (*modelStream) = engine->serialize();
// // Close everything down
// engine->destroy();
// builder->destroy();
// config->destroy();
// }
// void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* input, float* output, int batchSize) {
// // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
// CUDA_CHECK(cudaMemcpyAsync(buffers[0], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
// context.enqueue(batchSize, buffers, stream, nullptr);
// CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
// cudaStreamSynchronize(stream);
// }
// bool parse_args(int argc, char** argv, std::string& engine, int& usb) {
// if(argc<4)
// return false;
// if (std::string(argv[1]) == "-d" && argc == 4) {
// engine = std::string(argv[2]);
// usb = atoi(argv[3]);
// } else {
// return false;
// }
// return true;
// }
// cv::Mat show_fps(cv::Mat img, float fps){
// // string fps_s = to_string(fps).substr(0, 5);
// char text_s[10];
// sprintf(text_s, "%.2f", fps);
// string fps_s = text_s;
// fps_s = "FPS: " + fps_s;
// putText(img, fps_s, cv::Point(20,20), cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(0, 255, 255), 1);
// return img;
// }
// vector<string> get_categories(){
// vector<string> categories = {"person", "head", "helmet"};
// return categories;
// }
// vector<cv::Scalar> gen_colors(int num_colors){
// vector<int> bgrs;
// if(num_colors<=27)
// bgrs = {0, 125, 255};
// else if(num_colors<=125)
// bgrs= {0, 64, 128, 178, 255};
// else
// cout<<"num_classes > 125"<<endl;
// assert(num_colors<=125);
// vector<cv::Scalar> all_colors;
// cv::Scalar color;
// for(int b=0; b<bgrs.size(); b++)
// for(int g=0; g<bgrs.size(); g++)
// for(int r=0; r<bgrs.size(); r++){
// color = cv::Scalar(bgrs.at(b), bgrs.at(g), bgrs.at(r));
// all_colors.push_back(color);
// }
// // std::srand(unsigned(time(0)));
// // default setting seed
// random_shuffle(all_colors.begin(), all_colors.end());
// vector<cv::Scalar> colors;
// for(int i=0; i<num_colors; i++)
// colors.push_back(all_colors[i]);
// return colors;
// }
// // ./yolov5_cym -d /home/cym/CYM/models/yolov5_helmet/yolov5m-4.0.engine usb
// int main(int argc, char** argv) {
// cudaSetDevice(DEVICE);
// std::string wts_name = "";
// std::string engine_name = "";
// int usb = 0;
// if (!parse_args(argc, argv, engine_name, usb)) {
// std::cerr << "arguments not right!" << std::endl;
// std::cerr << "./yolov5 -d [string(engine)] [int(usb)] // deserialize plan file and run inference" << std::endl;
// return -1;
// }
// // deserialize the .engine and run inference
// std::ifstream file(engine_name, std::ios::binary);
// if (!file.good()) {
// std::cerr << "read " << engine_name << " error!" << std::endl;
// return -1;
// }
// char *trtModelStream = nullptr;
// size_t size = 0;
// file.seekg(0, file.end);
// size = file.tellg();
// file.seekg(0, file.beg);
// trtModelStream = new char[size];
// assert(trtModelStream);
// file.read(trtModelStream, size);
// file.close();
// // prepare input data ---------------------------
// static float data[BATCH_SIZE * 3 * INPUT_H * INPUT_W];
// //for (int i = 0; i < 3 * INPUT_H * INPUT_W; i++)
// // data[i] = 1.0;
// static float prob[BATCH_SIZE * OUTPUT_SIZE];
// IRuntime* runtime = createInferRuntime(gLogger);
// assert(runtime != nullptr);
// ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
// assert(engine != nullptr);
// IExecutionContext* context = engine->createExecutionContext();
// assert(context != nullptr);
// delete[] trtModelStream;
// assert(engine->getNbBindings() == 2);
// void* buffers[2];
// // In order to bind the buffers, we need to know the names of the input and output tensors.
// // Note that indices are guaranteed to be less than IEngine::getNbBindings()
// const int inputIndex = engine->getBindingIndex(INPUT_BLOB_NAME);
// const int outputIndex = engine->getBindingIndex(OUTPUT_BLOB_NAME);
// assert(inputIndex == 0);
// assert(outputIndex == 1);
// // Create GPU buffers on device
// CUDA_CHECK(cudaMalloc(&buffers[inputIndex], BATCH_SIZE * 3 * INPUT_H * INPUT_W * sizeof(float)));
// CUDA_CHECK(cudaMalloc(&buffers[outputIndex], BATCH_SIZE * OUTPUT_SIZE * sizeof(float)));
// // Create stream
// cudaStream_t stream;
// CUDA_CHECK(cudaStreamCreate(&stream));
// vector<string> categories = get_categories();
// vector<cv::Scalar>colors = gen_colors(categories.size());
// std::vector<int> target_cls_id = {1, 2};
// int frame_count = 0;
// cv::VideoCapture capture;
// capture.open(usb);
// if(!capture.isOpened()){
// printf("could not load video data...\n");
// return -1;
// }
// float fps = 0.0;
// auto fps_start = std::chrono::system_clock::now();
// while(true) {
// frame_count++;
// cv::Mat img;
// capture >> img;
// if (img.empty()){ printf("Could not capture images\n"); break; }
// cv::Mat pr_img = preprocess_img(img, INPUT_W, INPUT_H); // letterbox BGR to RGB
// int i = 0;
// for (int row = 0; row < INPUT_H; ++row) {
// uchar* uc_pixel = pr_img.data + row * pr_img.step;
// for (int col = 0; col < INPUT_W; ++col) {
// data[i] = (float)uc_pixel[2] / 255.0;
// data[i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
// data[i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
// uc_pixel += 3; ++i;
// }
// }
// // Run inference
// auto start = std::chrono::system_clock::now();
// doInference(*context, stream, buffers, data, prob, BATCH_SIZE);
// auto end = std::chrono::system_clock::now();
// printf("Frame: %d. The inference time is: %dms. FPS:%.3f.\n", frame_count, std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count(), fps);
// // std::vector<Yolo::Detection> batch_res;
// // nms(batch_res, &prob[0], CONF_THRESH, NMS_THRESH);
// std::vector<float> merge_cls_id = {1,2};
// std::vector<Yolo::Detection> batch_res;
// nms_merge_cls_id(batch_res, &prob[0], merge_cls_id, CONF_THRESH, NMS_THRESH);
// // // merge_cls_id: head and helmet共同做nms.
// // std::vector<Yolo::Detection> res;
// // nms(res, &prob[0], CONF_THRESH, NMS_THRESH);
// // std::vector<Yolo::Detection> batch_res;
// // std::vector<float> merge_cls_id = {1,2};
// // nms_merge(batch_res, res, merge_cls_id, CONF_THRESH, NMS_THRESH);
// // draw bounding box
// for (size_t j = 0; j < batch_res.size(); j++) {
// cv::Rect r = get_rect(img, batch_res[j].bbox);
// int class_id = (int)batch_res[j].class_id;
// if(std::find(target_cls_id.begin(), target_cls_id.end(), class_id) != target_cls_id.end()){
// cv::rectangle(img, r, colors[class_id], 2);
// char text[10]; sprintf(text, "%.2f", batch_res[j].conf);
// string text_s = text; text_s = categories[class_id] + ": "+text_s;
// cv::putText(img, text_s, cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2);
// }
// }
// auto fps_end = std::chrono::system_clock::now();
// float all_time = std::chrono::duration_cast<std::chrono::milliseconds>(fps_end - fps_start).count();
// float curr_fps = 1000.0/all_time;
// if(fps==0.0) fps = curr_fps; else fps = fps*0.95 + curr_fps*0.05;
// fps_start = fps_end;
// img = show_fps(img, fps);
// cv::imshow("video", img);
// int key = cv::waitKey(1);
// if(key==27)
// break;
// }
// // Release stream and buffers
// cudaStreamDestroy(stream);
// CUDA_CHECK(cudaFree(buffers[inputIndex]));
// CUDA_CHECK(cudaFree(buffers[outputIndex]));
// // Destroy the engine
// context->destroy();
// engine->destroy();
// runtime->destroy();
// return 0;
// }
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/orzchenyuming/yolov5_sort_cpp.git
[email protected]:orzchenyuming/yolov5_sort_cpp.git
orzchenyuming
yolov5_sort_cpp
yolov5_sort_cpp
master

搜索帮助