代码拉取完成,页面将自动刷新
#include <map>
#include <sstream>
#include <vector>
#include <algorithm>
#include <opencv2/opencv.hpp>
#include "common.hpp"
#include "logging.h"
//#include "yololayer.h"
#include "cuda_utils.h"
#include "utils.h"
#include "inference_utils.hpp"
//#include "SortTracker.h"
bool parse_args(int argc, char** argv, std::string& engine, int& usb, int& side, bool& is_save) {
std::cout<<"argc: "<<argc<<std::endl<<"argv: [";
for(size_t i=0; i<argc; i++)
std::cout<<argv[i]<<",";
std::cout<<"]"<<std::endl;
if(argc<6)
return false;
if (std::string(argv[1]) == "-d" && argc == 6) {
engine = std::string(argv[2]);
usb = atoi(argv[3]);
side = atoi(argv[4]);
if(std::string(argv[5]) == "true")
is_save = true;
else if(std::string(argv[5]) == "false")
is_save = false;
else
return false;
} else {
return false;
}
return true;
}
std::string get_str_time(){
// get current timestamp, and convert to string.
auto time_now = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
std::stringstream ss;
ss<<std::put_time(std::localtime(&time_now), "%Y-%m-%d-%H-%M-%S");
std::string str_time = ss.str();
return str_time;
}
static int get_width(int x, float gw, int divisor = 8) {
return int(ceil((x * gw) / divisor)) * divisor;
}
static int get_depth(int x, float gd) {
if (x == 1) return 1;
int r = round(x * gd);
if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0) {
--r;
}
return std::max<int>(r, 1);
}
std::vector<std::string> get_categories(){
std::vector<std::string> categories = {
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush"
};
return categories;
}
std::vector<cv::Scalar> gen_colors(int num_colors){
std::vector<int> bgrs;
if(num_colors<=27)
bgrs = {0, 125, 255};
else if(num_colors<=125)
bgrs= {0, 64, 128, 178, 255};
else
std::cout<<"ERROR: num_classes > 125"<<std::endl;
assert(num_colors<=125);
std::vector<cv::Scalar> all_colors;
cv::Scalar color;
for(int b=0; b<bgrs.size(); b++)
for(int g=0; g<bgrs.size(); g++)
for(int r=0; r<bgrs.size(); r++){
color = cv::Scalar(bgrs.at(b), bgrs.at(g), bgrs.at(r));
all_colors.push_back(color);
}
// std::srand(unsigned(time(0)));
// default setting seed
random_shuffle(all_colors.begin(), all_colors.end());
std::vector<cv::Scalar> colors;
for(int i=0; i<num_colors; i++)
colors.push_back(all_colors[i]);
return colors;
}
void draw_bbox(cv::Mat& img, std::vector<Yolo::Detection> batch_res, std::vector<int> target_cls_id, std::vector<std::string> categories, std::vector<cv::Scalar> colors){
// draw bounding box
for (size_t j = 0; j < batch_res.size(); j++) {
cv::Rect r = get_rect(img, batch_res[j].bbox);
int class_id = (int)batch_res[j].class_id;
if(std::find(target_cls_id.begin(), target_cls_id.end(), class_id) != target_cls_id.end()){
cv::rectangle(img, r, colors[class_id+1], 2);
char text[10]; sprintf(text, "%.2f", batch_res[j].conf);
std::string text_s = text; text_s = categories[class_id] + ": "+text_s;
cv::putText(img, text_s, cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2);
}
}
}
void draw_bbox(cv::Mat& img, std::vector<TrackingBox> detTrackers, std::vector<int> target_cls_id, std::vector<std::string> categories, std::vector<cv::Scalar> colors){
// draw bounding box
for (auto & detTracker : detTrackers) {
cv::Rect r = detTracker.box;
// std::cout<<r.x<<" "<<r.y<<" "<<r.width<<" "<<r.height<<" class_id:"<<detTracker.class_id<<std::endl;
int class_id = (int)detTracker.class_id;
if(std::find(target_cls_id.begin(), target_cls_id.end(), class_id) != target_cls_id.end()){
cv::rectangle(img, r, colors[class_id+1], 2);
char text[10]; sprintf(text, "%.2f", detTracker.conf);
std::string text_s = text;
std::string obj_id = std::to_string(detTracker.id);
text_s = categories[class_id] + "_id_" + obj_id + ": "+text_s;
cv::putText(img, text_s, cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2);
}
}
}
void show_fps(cv::Mat& img, float fps){
// string fps_s = to_string(fps).substr(0, 5);
char text_s[10];
sprintf(text_s, "%.2f", fps);
std::string fps_s = text_s;
fps_s = "FPS: " + fps_s;
putText(img, fps_s, cv::Point(20,20), cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(0, 255, 255), 1);
return ;
}
ICudaEngine* build_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) {
INetworkDefinition* network = builder->createNetworkV2(0U);
// Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME
ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{ 3, INPUT_H, INPUT_W });
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
/* ------ yolov5 backbone------ */
auto focus0 = focus(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0");
auto conv1 = convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
auto spp8 = SPP(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, 9, 13, "model.8");
/* ------ yolov5 head ------ */
auto bottleneck_csp9 = C3(network, weightMap, *spp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.9");
auto conv10 = convBlock(network, weightMap, *bottleneck_csp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10");
auto upsample11 = network->addResize(*conv10->getOutput(0));
assert(upsample11);
upsample11->setResizeMode(ResizeMode::kNEAREST);
upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions());
ITensor* inputTensors12[] = { upsample11->getOutput(0), bottleneck_csp6->getOutput(0) };
auto cat12 = network->addConcatenation(inputTensors12, 2);
auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13");
auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14");
auto upsample15 = network->addResize(*conv14->getOutput(0));
assert(upsample15);
upsample15->setResizeMode(ResizeMode::kNEAREST);
upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());
ITensor* inputTensors16[] = { upsample15->getOutput(0), bottleneck_csp4->getOutput(0) };
auto cat16 = network->addConcatenation(inputTensors16, 2);
auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17");
/* ------ detect ------ */
IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]);
auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18");
ITensor* inputTensors19[] = { conv18->getOutput(0), conv14->getOutput(0) };
auto cat19 = network->addConcatenation(inputTensors19, 2);
auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20");
IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]);
auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21");
ITensor* inputTensors22[] = { conv21->getOutput(0), conv10->getOutput(0) };
auto cat22 = network->addConcatenation(inputTensors22, 2);
auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]);
auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2});
yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
network->markOutput(*yolo->getOutput(0));
// Build engine
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap)
{
free((void*)(mem.second.values));
}
return engine;
}
ICudaEngine* build_engine_p6(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) {
INetworkDefinition* network = builder->createNetworkV2(0U);
// Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME
ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{ 3, INPUT_H, INPUT_W });
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
/* ------ yolov5 backbone------ */
auto focus0 = focus(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0");
auto conv1 = convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto c3_2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *c3_2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto c3_4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *c3_4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto c3_6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *c3_6->getOutput(0), get_width(768, gw), 3, 2, 1, "model.7");
auto c3_8 = C3(network, weightMap, *conv7->getOutput(0), get_width(768, gw), get_width(768, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
auto conv9 = convBlock(network, weightMap, *c3_8->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.9");
auto spp10 = SPP(network, weightMap, *conv9->getOutput(0), get_width(1024, gw), get_width(1024, gw), 3, 5, 7, "model.10");
auto c3_11 = C3(network, weightMap, *spp10->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.11");
/* ------ yolov5 head ------ */
auto conv12 = convBlock(network, weightMap, *c3_11->getOutput(0), get_width(768, gw), 1, 1, 1, "model.12");
auto upsample13 = network->addResize(*conv12->getOutput(0));
assert(upsample13);
upsample13->setResizeMode(ResizeMode::kNEAREST);
upsample13->setOutputDimensions(c3_8->getOutput(0)->getDimensions());
ITensor* inputTensors14[] = { upsample13->getOutput(0), c3_8->getOutput(0) };
auto cat14 = network->addConcatenation(inputTensors14, 2);
auto c3_15 = C3(network, weightMap, *cat14->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.15");
auto conv16 = convBlock(network, weightMap, *c3_15->getOutput(0), get_width(512, gw), 1, 1, 1, "model.16");
auto upsample17 = network->addResize(*conv16->getOutput(0));
assert(upsample17);
upsample17->setResizeMode(ResizeMode::kNEAREST);
upsample17->setOutputDimensions(c3_6->getOutput(0)->getDimensions());
ITensor* inputTensors18[] = { upsample17->getOutput(0), c3_6->getOutput(0) };
auto cat18 = network->addConcatenation(inputTensors18, 2);
auto c3_19 = C3(network, weightMap, *cat18->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.19");
auto conv20 = convBlock(network, weightMap, *c3_19->getOutput(0), get_width(256, gw), 1, 1, 1, "model.20");
auto upsample21 = network->addResize(*conv20->getOutput(0));
assert(upsample21);
upsample21->setResizeMode(ResizeMode::kNEAREST);
upsample21->setOutputDimensions(c3_4->getOutput(0)->getDimensions());
ITensor* inputTensors21[] = { upsample21->getOutput(0), c3_4->getOutput(0) };
auto cat22 = network->addConcatenation(inputTensors21, 2);
auto c3_23 = C3(network, weightMap, *cat22->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
auto conv24 = convBlock(network, weightMap, *c3_23->getOutput(0), get_width(256, gw), 3, 2, 1, "model.24");
ITensor* inputTensors25[] = { conv24->getOutput(0), conv20->getOutput(0) };
auto cat25 = network->addConcatenation(inputTensors25, 2);
auto c3_26 = C3(network, weightMap, *cat25->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.26");
auto conv27 = convBlock(network, weightMap, *c3_26->getOutput(0), get_width(512, gw), 3, 2, 1, "model.27");
ITensor* inputTensors28[] = { conv27->getOutput(0), conv16->getOutput(0) };
auto cat28 = network->addConcatenation(inputTensors28, 2);
auto c3_29 = C3(network, weightMap, *cat28->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.29");
auto conv30 = convBlock(network, weightMap, *c3_29->getOutput(0), get_width(768, gw), 3, 2, 1, "model.30");
ITensor* inputTensors31[] = { conv30->getOutput(0), conv12->getOutput(0) };
auto cat31 = network->addConcatenation(inputTensors31, 2);
auto c3_32 = C3(network, weightMap, *cat31->getOutput(0), get_width(2048, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.32");
/* ------ detect ------ */
IConvolutionLayer* det0 = network->addConvolutionNd(*c3_23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.0.weight"], weightMap["model.33.m.0.bias"]);
IConvolutionLayer* det1 = network->addConvolutionNd(*c3_26->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.1.weight"], weightMap["model.33.m.1.bias"]);
IConvolutionLayer* det2 = network->addConvolutionNd(*c3_29->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.2.weight"], weightMap["model.33.m.2.bias"]);
IConvolutionLayer* det3 = network->addConvolutionNd(*c3_32->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.3.weight"], weightMap["model.33.m.3.bias"]);
auto yolo = addYoLoLayer(network, weightMap, "model.33", std::vector<IConvolutionLayer*>{det0, det1, det2, det3});
yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
network->markOutput(*yolo->getOutput(0));
// Build engine
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap)
{
free((void*)(mem.second.values));
}
return engine;
}
void APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream, bool& is_p6, float& gd, float& gw, std::string& wts_name) {
// wts to tensorrt
// Create builder
IBuilder* builder = createInferBuilder(gLogger);
IBuilderConfig* config = builder->createBuilderConfig();
// Create model to populate the network, then set the outputs and create an engine
ICudaEngine *engine = nullptr;
if (is_p6) {
engine = build_engine_p6(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
} else {
engine = build_engine(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
}
assert(engine != nullptr);
// Serialize the engine
(*modelStream) = engine->serialize();
// Close everything down
engine->destroy();
builder->destroy();
config->destroy();
}
void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* input, float* output, int batchSize) {
// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
CUDA_CHECK(cudaMemcpyAsync(buffers[0], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueue(batchSize, buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}
ICudaEngine* get_engine(Logger gLogger, std::string engine_name){
// deserialize the .engine and run inference
std::ifstream file(engine_name, std::ios::binary);
if (!file.good()) {
std::cerr << "read " << engine_name << " error!" << std::endl;
return nullptr;
}
char *trtModelStream = nullptr;
size_t size = 0;
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size);
file.close();
IRuntime* runtime = createInferRuntime(gLogger);
assert(runtime != nullptr);
ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
assert(engine != nullptr);
delete[] trtModelStream;
runtime->destroy();
return engine;
}
void prepare_input_data(cv::Mat& img, float* data){
cv::Mat pr_img = preprocess_img(img, INPUT_W, INPUT_H); // letterbox BGR to RGB
int i = 0;
for (int row = 0; row < INPUT_H; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < INPUT_W; ++col) {
data[i] = (float)uc_pixel[2] / 255.0;
data[i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
data[i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
uc_pixel += 3; ++i;
}
}
return ;
}
std::vector<Yolo::Detection> inference_and_nms(
IExecutionContext& context, cudaStream_t& stream, void **buffers, float* data, float* prob,
int batchSize, std::vector<float> merge_cls_id){
// Run inference
// auto start = std::chrono::system_clock::now();
doInference(context, stream, buffers, data, prob, batchSize);
// auto end = std::chrono::system_clock::now();
// printf("The inference time is: %dms. \n", std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count());
// 每一类单独做nms
// std::vector<Yolo::Detection> batch_res;
// nms(batch_res, &prob[0], CONF_THRESH, NMS_THRESH);
// merge_cls_id: head and helmet共同做nms.
std::vector<Yolo::Detection> batch_res;
nms_merge_cls_id(batch_res, &prob[0], merge_cls_id, CONF_THRESH, NMS_THRESH);
return batch_res;
}
double determinant(double v1, double v2, double v3, double v4){
return (v1*v3-v2*v4);
}
bool is_intersect(const cv::Point& aa, const cv::Point& bb, const cv::Point& cc, const cv::Point& dd){
// line1(aa, bb), line1(cc, dd)
double delta = determinant(bb.x-aa.x, dd.x-cc.x, dd.y-cc.y, bb.y-aa.y);
if(delta<=(1e-6) && delta>=-(1e-6))
return false;
double namenda = determinant(dd.x-cc.x, aa.x-cc.x, aa.y-cc.y, dd.y-cc.y) / delta;
if(namenda>1 || namenda<0)
return false;
double miu = determinant(bb.x-aa.x, aa.x-cc.x, aa.y-cc.y, bb.y-aa.y) / delta;
if(miu>1 || miu<0)
return false;
return true;
}
void init_statistic_info(std::map<std::string, std::map<int, int>>& statistic_info,
std::vector<std::vector<cv::Point>>& centers_list, const vector<int>& target_id){
statistic_info.clear();
centers_list.clear();
// std::map<"sum", std::map<cls_id, int>>& statistic_info
vector<std::string> statistic_obj = {"sum", "left", "right"};
for(auto & s_obj : statistic_obj){
std::map<int, int> tmp;
for(auto cls_id: target_id){
tmp.insert(pair<int, int>(cls_id, 0));
}
statistic_info.insert(pair<std::string, std::map<int, int>>(s_obj, tmp));
}
for(size_t i=0; i<9999; i++){
std::vector<cv::Point> center;
centers_list.push_back(center);
}
}
void statistic(vector<TrackingBox>& detTrackerRes, std::map<std::string, std::map<int, int>>& statistic_info,
std::vector<std::vector<cv::Point>>& centers, std::vector<cv::Point>& line, bool side){
if(detTrackerRes.empty())
return ;
for(size_t i=0; i<detTrackerRes.size(); i++){
TrackingBox tb = detTrackerRes[i];
cv::Rect rect = tb.box;
int target_id = tb.id;
float conf = tb.conf;
int class_id = int(tb.class_id);
cv::Point center_p = cv::Point((rect.x*2+rect.width)/2, (rect.y*2+rect.height)/2);
centers[target_id].push_back(center_p);
size_t curr_id_size = centers[target_id].size();
if(curr_id_size >= 2){
cv::Point p1 = centers[target_id].at(curr_id_size-2);
cv::Point p2 = centers[target_id].at(curr_id_size-1);
if(is_intersect(p1, p2, line[0], line[1])
// && is_intersect(p1, p2, cv::Point(line[0].x+10, line[0].y), cv::Point(line[1].x+10, line[1].y))
){
statistic_info["sum"][class_id] += 1;
if(side){
if(p2.x>p1.x)
statistic_info["right"][class_id] += 1;
else
statistic_info["left"][class_id] += 1;
}else{
if(p2.y>p1.y)
statistic_info["left"][class_id] += 1;
else
statistic_info["right"][class_id] += 1;
}
}
}
}
}
void draw_statistic_info(cv::Mat& img, std::map<std::string, std::map<int, int>>& statistic_info, std::vector<cv::Point>& line,
const std::vector<int>& target_cls_id, std::vector<std::string> categories){
int thickness=1, lineType=4;
int start_width = 20, widht_gap=150, start_height=50, height_gap=15;
cv::line(img, line[0], line[1], cv::Scalar(0, 255, 0), 3, 4);
std::vector<std::string> statistic_obj = {"sum", "left", "right"};
for(size_t k=0; k<statistic_obj.size(); k++){
std::string s_obj = statistic_obj[k];
int counts = 0;
for(size_t i=0; i<target_cls_id.size(); i++){
int cls_id = target_cls_id[i];
char statistic_text[50];
std::string cls_name = categories[cls_id];
sprintf(statistic_text, "%s: %d", cls_name.c_str(), statistic_info[s_obj][cls_id]);
std::string statistic_str = statistic_text;
cv::putText(img, statistic_str, cv::Point(start_width+widht_gap*int(k), start_height+height_gap*int(i+1)),
cv::FONT_HERSHEY_COMPLEX,0.5, cv::Scalar(50*i, 255, 50*i), 1);
counts += statistic_info[s_obj][cls_id];
}
char sum_text[50];
sprintf(sum_text, "%s: %d", s_obj.c_str(), counts);
std::string sum_str = sum_text;
cv::putText(img, sum_str, cv::Point(start_width+widht_gap*int(k), start_height),
cv::FONT_HERSHEY_COMPLEX,0.5, cv::Scalar(255, 255, 0), 1);
}
}
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。