更换文档检测模型

This commit is contained in:
2024-08-27 14:42:45 +08:00
parent aea6f19951
commit 1514e09c40
2072 changed files with 254336 additions and 4967 deletions

View File

@@ -0,0 +1,142 @@
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <map>
#include <string>
#include <vector>
#include "yaml-cpp/yaml.h"
#ifdef _WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif
namespace PaddleDetection {
// Inference model configuration parser
class ConfigPaser {
public:
ConfigPaser() {}
~ConfigPaser() {}
bool load_config(const std::string& model_dir,
const std::string& cfg = "infer_cfg.yml") {
// Load as a YAML::Node
YAML::Node config;
config = YAML::LoadFile(model_dir + OS_PATH_SEP + cfg);
// Get runtime mode : paddle, trt_fp16, trt_fp32
if (config["mode"].IsDefined()) {
mode_ = config["mode"].as<std::string>();
} else {
std::cerr << "Please set mode, "
<< "support value : paddle/trt_fp16/trt_fp32." << std::endl;
return false;
}
// Get model arch : YOLO, SSD, RetinaNet, RCNN, Face
if (config["arch"].IsDefined()) {
arch_ = config["arch"].as<std::string>();
} else {
std::cerr << "Please set model arch,"
<< "support value : YOLO, SSD, RetinaNet, RCNN, Face."
<< std::endl;
return false;
}
// Get min_subgraph_size for tensorrt
if (config["min_subgraph_size"].IsDefined()) {
min_subgraph_size_ = config["min_subgraph_size"].as<int>();
} else {
std::cerr << "Please set min_subgraph_size." << std::endl;
return false;
}
// Get draw_threshold for visualization
if (config["draw_threshold"].IsDefined()) {
draw_threshold_ = config["draw_threshold"].as<float>();
} else {
std::cerr << "Please set draw_threshold." << std::endl;
return false;
}
// Get Preprocess for preprocessing
if (config["Preprocess"].IsDefined()) {
preprocess_info_ = config["Preprocess"];
} else {
std::cerr << "Please set Preprocess." << std::endl;
return false;
}
// Get label_list for visualization
if (config["label_list"].IsDefined()) {
label_list_ = config["label_list"].as<std::vector<std::string>>();
} else {
std::cerr << "Please set label_list." << std::endl;
return false;
}
// Get use_dynamic_shape for TensorRT
if (config["use_dynamic_shape"].IsDefined()) {
use_dynamic_shape_ = config["use_dynamic_shape"].as<bool>();
} else {
std::cerr << "Please set use_dynamic_shape." << std::endl;
return false;
}
// Get conf_thresh for tracker
if (config["tracker"].IsDefined()) {
if (config["tracker"]["conf_thres"].IsDefined()) {
conf_thresh_ = config["tracker"]["conf_thres"].as<float>();
} else {
std::cerr << "Please set conf_thres in tracker." << std::endl;
return false;
}
}
// Get NMS for postprocess
if (config["NMS"].IsDefined()) {
nms_info_ = config["NMS"];
}
// Get fpn_stride in PicoDet
if (config["fpn_stride"].IsDefined()) {
fpn_stride_.clear();
for (auto item : config["fpn_stride"]) {
fpn_stride_.emplace_back(item.as<int>());
}
}
if (config["mask"].IsDefined()) {
mask_ = config["mask"].as<bool>();
}
return true;
}
std::string mode_;
float draw_threshold_;
std::string arch_;
int min_subgraph_size_;
YAML::Node preprocess_info_;
YAML::Node nms_info_;
std::vector<std::string> label_list_;
std::vector<int> fpn_stride_;
bool use_dynamic_shape_;
float conf_thresh_;
bool mask_ = false;
};
} // namespace PaddleDetection

View File

@@ -0,0 +1,134 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <ctime>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "paddle_inference_api.h" // NOLINT
#include "include/config_parser.h"
#include "include/preprocess_op.h"
#include "include/tracker.h"
using namespace paddle_infer;
namespace PaddleDetection {
// JDE Detection Result
struct MOT_Rect {
float left;
float top;
float right;
float bottom;
};
struct MOT_Track {
int ids;
float score;
MOT_Rect rects;
};
typedef std::vector<MOT_Track> MOT_Result;
// Generate visualization color
cv::Scalar GetColor(int idx);
// Visualiztion Detection Result
cv::Mat VisualizeTrackResult(const cv::Mat& img,
const MOT_Result& results,
const float fps,
const int frame_id);
class JDEDetector {
public:
explicit JDEDetector(const std::string& model_dir,
const std::string& device = "CPU",
bool use_mkldnn = false,
int cpu_threads = 1,
const std::string& run_mode = "paddle",
const int batch_size = 1,
const int gpu_id = 0,
const int trt_min_shape = 1,
const int trt_max_shape = 1280,
const int trt_opt_shape = 640,
bool trt_calib_mode = false,
const int min_box_area = 200) {
this->device_ = device;
this->gpu_id_ = gpu_id;
this->cpu_math_library_num_threads_ = cpu_threads;
this->use_mkldnn_ = use_mkldnn;
this->trt_min_shape_ = trt_min_shape;
this->trt_max_shape_ = trt_max_shape;
this->trt_opt_shape_ = trt_opt_shape;
this->trt_calib_mode_ = trt_calib_mode;
config_.load_config(model_dir);
this->use_dynamic_shape_ = config_.use_dynamic_shape_;
this->min_subgraph_size_ = config_.min_subgraph_size_;
threshold_ = config_.draw_threshold_;
preprocessor_.Init(config_.preprocess_info_);
LoadModel(model_dir, batch_size, run_mode);
this->min_box_area_ = min_box_area;
this->conf_thresh_ = config_.conf_thresh_;
}
// Load Paddle inference model
void LoadModel(const std::string& model_dir,
const int batch_size = 1,
const std::string& run_mode = "paddle");
// Run predictor
void Predict(const std::vector<cv::Mat> imgs,
const double threshold = 0.5,
const int warmup = 0,
const int repeats = 1,
MOT_Result* result = nullptr,
std::vector<double>* times = nullptr);
private:
std::string device_ = "CPU";
int gpu_id_ = 0;
int cpu_math_library_num_threads_ = 1;
bool use_mkldnn_ = false;
int min_subgraph_size_ = 3;
bool use_dynamic_shape_ = false;
int trt_min_shape_ = 1;
int trt_max_shape_ = 1280;
int trt_opt_shape_ = 640;
bool trt_calib_mode_ = false;
// Preprocess image and copy data to input buffer
void Preprocess(const cv::Mat& image_mat);
// Postprocess result
void Postprocess(const cv::Mat dets, const cv::Mat emb, MOT_Result* result);
std::shared_ptr<Predictor> predictor_;
Preprocessor preprocessor_;
ImageBlob inputs_;
std::vector<float> bbox_data_;
std::vector<float> emb_data_;
float threshold_;
ConfigPaser config_;
float min_box_area_;
float conf_thresh_;
};
} // namespace PaddleDetection

View File

@@ -0,0 +1,126 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <ctime>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "paddle_inference_api.h" // NOLINT
#include "include/config_parser.h"
#include "include/keypoint_postprocess.h"
#include "include/preprocess_op.h"
using namespace paddle_infer;
namespace PaddleDetection {
// Visualiztion KeyPoint Result
cv::Mat VisualizeKptsResult(const cv::Mat& img,
const std::vector<KeyPointResult>& results,
const std::vector<int>& colormap);
class KeyPointDetector {
public:
explicit KeyPointDetector(const std::string& model_dir,
const std::string& device = "CPU",
bool use_mkldnn = false,
int cpu_threads = 1,
const std::string& run_mode = "paddle",
const int batch_size = 1,
const int gpu_id = 0,
const int trt_min_shape = 1,
const int trt_max_shape = 1280,
const int trt_opt_shape = 640,
bool trt_calib_mode = false,
bool use_dark = true) {
this->device_ = device;
this->gpu_id_ = gpu_id;
this->cpu_math_library_num_threads_ = cpu_threads;
this->use_mkldnn_ = use_mkldnn;
this->use_dark = use_dark;
this->trt_min_shape_ = trt_min_shape;
this->trt_max_shape_ = trt_max_shape;
this->trt_opt_shape_ = trt_opt_shape;
this->trt_calib_mode_ = trt_calib_mode;
config_.load_config(model_dir);
this->use_dynamic_shape_ = config_.use_dynamic_shape_;
this->min_subgraph_size_ = config_.min_subgraph_size_;
threshold_ = config_.draw_threshold_;
preprocessor_.Init(config_.preprocess_info_);
LoadModel(model_dir, batch_size, run_mode);
}
// Load Paddle inference model
void LoadModel(const std::string& model_dir,
const int batch_size = 1,
const std::string& run_mode = "paddle");
// Run predictor
void Predict(const std::vector<cv::Mat> imgs,
std::vector<std::vector<float>>& center,
std::vector<std::vector<float>>& scale,
const double threshold = 0.5,
const int warmup = 0,
const int repeats = 1,
std::vector<KeyPointResult>* result = nullptr,
std::vector<double>* times = nullptr);
// Get Model Label list
const std::vector<std::string>& GetLabelList() const {
return config_.label_list_;
}
private:
std::string device_ = "CPU";
int gpu_id_ = 0;
int cpu_math_library_num_threads_ = 1;
bool use_dark = true;
bool use_mkldnn_ = false;
int min_subgraph_size_ = 3;
bool use_dynamic_shape_ = false;
int trt_min_shape_ = 1;
int trt_max_shape_ = 1280;
int trt_opt_shape_ = 640;
bool trt_calib_mode_ = false;
// Preprocess image and copy data to input buffer
void Preprocess(const cv::Mat& image_mat);
// Postprocess result
void Postprocess(std::vector<float>& output,
std::vector<int> output_shape,
std::vector<int64_t>& idxout,
std::vector<int> idx_shape,
std::vector<KeyPointResult>* result,
std::vector<std::vector<float>>& center,
std::vector<std::vector<float>>& scale);
std::shared_ptr<Predictor> predictor_;
Preprocessor preprocessor_;
ImageBlob inputs_;
std::vector<float> output_data_;
std::vector<int64_t> idx_data_;
float threshold_;
ConfigPaser config_;
};
} // namespace PaddleDetection

View File

@@ -0,0 +1,134 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <math.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
namespace PaddleDetection {
std::vector<float> get_3rd_point(std::vector<float>& a, std::vector<float>& b);
std::vector<float> get_dir(float src_point_x, float src_point_y, float rot_rad);
void affine_tranform(
float pt_x, float pt_y, cv::Mat& trans, std::vector<float>& preds, int p);
cv::Mat get_affine_transform(std::vector<float>& center,
std::vector<float>& scale,
float rot,
std::vector<int>& output_size,
int inv);
void transform_preds(std::vector<float>& coords,
std::vector<float>& center,
std::vector<float>& scale,
std::vector<int>& output_size,
std::vector<int>& dim,
std::vector<float>& target_coords,
bool affine = false);
void box_to_center_scale(std::vector<int>& box,
int width,
int height,
std::vector<float>& center,
std::vector<float>& scale);
void get_max_preds(float* heatmap,
std::vector<int>& dim,
std::vector<float>& preds,
float* maxvals,
int batchid,
int joint_idx);
void get_final_preds(std::vector<float>& heatmap,
std::vector<int>& dim,
std::vector<int64_t>& idxout,
std::vector<int>& idxdim,
std::vector<float>& center,
std::vector<float> scale,
std::vector<float>& preds,
int batchid,
bool DARK = true);
// Object KeyPoint Result
struct KeyPointResult {
// Keypoints: shape(N x 3); N: number of Joints; 3: x,y,conf
std::vector<float> keypoints;
int num_joints = -1;
};
class PoseSmooth {
public:
explicit PoseSmooth(const int width,
const int height,
std::string filter_type = "OneEuro",
float alpha = 0.5,
float fc_d = 0.1,
float fc_min = 0.1,
float beta = 0.1,
float thres_mult = 0.3)
: width(width),
height(height),
alpha(alpha),
fc_d(fc_d),
fc_min(fc_min),
beta(beta),
filter_type(filter_type),
thres_mult(thres_mult){};
// Run predictor
KeyPointResult smooth_process(KeyPointResult* result);
void PointSmooth(KeyPointResult* result,
KeyPointResult* keypoint_smoothed,
std::vector<float> thresholds,
int index);
float OneEuroFilter(float x_cur, float x_pre, int loc);
float smoothing_factor(float te, float fc);
float ExpSmoothing(float x_cur, float x_pre, int loc = 0);
private:
int width = 0;
int height = 0;
float alpha = 0.;
float fc_d = 1.;
float fc_min = 0.;
float beta = 1.;
float thres_mult = 1.;
std::string filter_type = "OneEuro";
std::vector<float> thresholds = {0.005,
0.005,
0.005,
0.005,
0.005,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01,
0.01};
KeyPointResult x_prev_hat;
KeyPointResult dx_prev_hat;
};
} // namespace PaddleDetection

View File

@@ -0,0 +1,52 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The code is based on:
// https://github.com/gatagat/lap/blob/master/lap/lapjv.h
// Ths copyright of gatagat/lap is as follows:
// MIT License
#ifndef LAPJV_H
#define LAPJV_H
#define LARGE 1000000
#if !defined TRUE
#define TRUE 1
#endif
#if !defined FALSE
#define FALSE 0
#endif
#define NEW(x, t, n) if ((x = (t *)malloc(sizeof(t) * (n))) == 0) {return -1;}
#define FREE(x) if (x != 0) { free(x); x = 0; }
#define SWAP_INDICES(a, b) { int_t _temp_index = a; a = b; b = _temp_index; }
#include <opencv2/opencv.hpp>
namespace PaddleDetection {
typedef signed int int_t;
typedef unsigned int uint_t;
typedef double cost_t;
typedef char boolean;
typedef enum fp_t { FP_1 = 1, FP_2 = 2, FP_DYNAMIC = 3 } fp_t;
int lapjv_internal(
const cv::Mat &cost, const bool extend_cost, const float cost_limit,
int *x, int *y);
} // namespace PaddleDetection
#endif // LAPJV_H

View File

@@ -0,0 +1,124 @@
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <ctime>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "paddle_inference_api.h" // NOLINT
#include "include/config_parser.h"
#include "include/picodet_postprocess.h"
#include "include/preprocess_op.h"
#include "include/utils.h"
using namespace paddle_infer;
namespace PaddleDetection {
// Generate visualization colormap for each class
std::vector<int> GenerateColorMap(int num_class);
// Visualiztion Detection Result
cv::Mat
VisualizeResult(const cv::Mat &img,
const std::vector<PaddleDetection::ObjectResult> &results,
const std::vector<std::string> &lables,
const std::vector<int> &colormap, const bool is_rbox);
class ObjectDetector {
public:
explicit ObjectDetector(const std::string &model_dir,
const std::string &device = "CPU",
bool use_mkldnn = false, int cpu_threads = 1,
const std::string &run_mode = "paddle",
const int batch_size = 1, const int gpu_id = 0,
const int trt_min_shape = 1,
const int trt_max_shape = 1280,
const int trt_opt_shape = 640,
bool trt_calib_mode = false) {
this->device_ = device;
this->gpu_id_ = gpu_id;
this->cpu_math_library_num_threads_ = cpu_threads;
this->use_mkldnn_ = use_mkldnn;
this->trt_min_shape_ = trt_min_shape;
this->trt_max_shape_ = trt_max_shape;
this->trt_opt_shape_ = trt_opt_shape;
this->trt_calib_mode_ = trt_calib_mode;
config_.load_config(model_dir);
this->use_dynamic_shape_ = config_.use_dynamic_shape_;
this->min_subgraph_size_ = config_.min_subgraph_size_;
threshold_ = config_.draw_threshold_;
preprocessor_.Init(config_.preprocess_info_);
LoadModel(model_dir, batch_size, run_mode);
}
// Load Paddle inference model
void LoadModel(const std::string &model_dir, const int batch_size = 1,
const std::string &run_mode = "paddle");
// Run predictor
void Predict(const std::vector<cv::Mat> imgs, const double threshold = 0.5,
const int warmup = 0, const int repeats = 1,
std::vector<PaddleDetection::ObjectResult> *result = nullptr,
std::vector<int> *bbox_num = nullptr,
std::vector<double> *times = nullptr);
// Get Model Label list
const std::vector<std::string> &GetLabelList() const {
return config_.label_list_;
}
private:
std::string device_ = "CPU";
int gpu_id_ = 0;
int cpu_math_library_num_threads_ = 1;
bool use_mkldnn_ = false;
int min_subgraph_size_ = 3;
bool use_dynamic_shape_ = false;
int trt_min_shape_ = 1;
int trt_max_shape_ = 1280;
int trt_opt_shape_ = 640;
bool trt_calib_mode_ = false;
// Preprocess image and copy data to input buffer
void Preprocess(const cv::Mat &image_mat);
// Postprocess result
void Postprocess(const std::vector<cv::Mat> mats,
std::vector<PaddleDetection::ObjectResult> *result,
std::vector<int> bbox_num, std::vector<float> output_data_,
std::vector<int> output_mask_data_, bool is_rbox);
void SOLOv2Postprocess(
const std::vector<cv::Mat> mats, std::vector<ObjectResult> *result,
std::vector<int> *bbox_num, std::vector<int> out_bbox_num_data_,
std::vector<int64_t> out_label_data_, std::vector<float> out_score_data_,
std::vector<uint8_t> out_global_mask_data_, float threshold = 0.5);
std::shared_ptr<Predictor> predictor_;
Preprocessor preprocessor_;
ImageBlob inputs_;
float threshold_;
ConfigPaser config_;
};
} // namespace PaddleDetection

View File

@@ -0,0 +1,37 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cmath>
#include <ctime>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "include/utils.h"
namespace PaddleDetection {
void PicoDetPostProcess(std::vector<PaddleDetection::ObjectResult> *results,
std::vector<const float *> outs,
std::vector<int> fpn_stride,
std::vector<float> im_shape,
std::vector<float> scale_factor,
float score_threshold = 0.3, float nms_threshold = 0.5,
int num_class = 80, int reg_max = 7);
} // namespace PaddleDetection

View File

@@ -0,0 +1,239 @@
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <iostream>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
namespace PaddleDetection {
// Object for storing all preprocessed data
class ImageBlob {
public:
// image width and height
std::vector<float> im_shape_;
// Buffer for image data after preprocessing
std::vector<float> im_data_;
// in net data shape(after pad)
std::vector<float> in_net_shape_;
// Evaluation image width and height
// std::vector<float> eval_im_size_f_;
// Scale factor for image size to origin image size
std::vector<float> scale_factor_;
// in net image after preprocessing
cv::Mat in_net_im_;
};
// Abstraction of preprocessing opration class
class PreprocessOp {
public:
virtual void Init(const YAML::Node& item) = 0;
virtual void Run(cv::Mat* im, ImageBlob* data) = 0;
};
class InitInfo : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item) {}
virtual void Run(cv::Mat* im, ImageBlob* data);
};
class NormalizeImage : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item) {
mean_ = item["mean"].as<std::vector<float>>();
scale_ = item["std"].as<std::vector<float>>();
if (item["is_scale"]) is_scale_ = item["is_scale"].as<bool>();
if (item["norm_type"]) norm_type_ = item["norm_type"].as<std::string>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
// CHW or HWC
std::vector<float> mean_;
std::vector<float> scale_;
bool is_scale_ = true;
std::string norm_type_ = "mean_std";
};
class Permute : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item) {}
virtual void Run(cv::Mat* im, ImageBlob* data);
};
class Resize : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item) {
interp_ = item["interp"].as<int>();
keep_ratio_ = item["keep_ratio"].as<bool>();
target_size_ = item["target_size"].as<std::vector<int>>();
}
// Compute best resize scale for x-dimension, y-dimension
std::pair<float, float> GenerateScale(const cv::Mat& im);
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
int interp_;
bool keep_ratio_;
std::vector<int> target_size_;
std::vector<int> in_net_shape_;
};
class LetterBoxResize : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item) {
target_size_ = item["target_size"].as<std::vector<int>>();
}
float GenerateScale(const cv::Mat& im);
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
std::vector<int> target_size_;
std::vector<int> in_net_shape_;
};
// Models with FPN need input shape % stride == 0
class PadStride : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item) {
stride_ = item["stride"].as<int>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
int stride_;
};
class TopDownEvalAffine : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item) {
trainsize_ = item["trainsize"].as<std::vector<int>>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
int interp_ = 1;
std::vector<int> trainsize_;
};
class WarpAffine : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item) {
input_h_ = item["input_h"].as<int>();
input_w_ = item["input_w"].as<int>();
keep_res_ = item["keep_res"].as<bool>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
int input_h_;
int input_w_;
int interp_ = 1;
bool keep_res_ = true;
int pad_ = 31;
};
class Pad : public PreprocessOp {
public:
virtual void Init(const YAML::Node& item) {
size_ = item["size"].as<std::vector<int>>();
fill_value_ = item["fill_value"].as<std::vector<float>>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
std::vector<int> size_;
std::vector<float> fill_value_;
};
void CropImg(cv::Mat& img,
cv::Mat& crop_img,
std::vector<int>& area,
std::vector<float>& center,
std::vector<float>& scale,
float expandratio = 0.15);
// check whether the input size is dynamic
bool CheckDynamicInput(const std::vector<cv::Mat>& imgs);
// Pad images in batch
std::vector<cv::Mat> PadBatch(const std::vector<cv::Mat>& imgs);
class Preprocessor {
public:
void Init(const YAML::Node& config_node) {
// initialize image info at first
ops_["InitInfo"] = std::make_shared<InitInfo>();
for (const auto& item : config_node) {
auto op_name = item["type"].as<std::string>();
ops_[op_name] = CreateOp(op_name);
ops_[op_name]->Init(item);
}
}
std::shared_ptr<PreprocessOp> CreateOp(const std::string& name) {
if (name == "Resize") {
return std::make_shared<Resize>();
} else if (name == "LetterBoxResize") {
return std::make_shared<LetterBoxResize>();
} else if (name == "Permute") {
return std::make_shared<Permute>();
} else if (name == "NormalizeImage") {
return std::make_shared<NormalizeImage>();
} else if (name == "PadStride") {
// use PadStride instead of PadBatch
return std::make_shared<PadStride>();
} else if (name == "TopDownEvalAffine") {
return std::make_shared<TopDownEvalAffine>();
} else if (name == "WarpAffine") {
return std::make_shared<WarpAffine>();
}else if (name == "Pad") {
return std::make_shared<Pad>();
}
std::cerr << "can not find function of OP: " << name
<< " and return: nullptr" << std::endl;
return nullptr;
}
void Run(cv::Mat* im, ImageBlob* data);
public:
static const std::vector<std::string> RUN_ORDER;
private:
std::unordered_map<std::string, std::shared_ptr<PreprocessOp>> ops_;
};
} // namespace PaddleDetection

View File

@@ -0,0 +1,63 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The code is based on:
// https://github.com/CnybTseng/JDE/blob/master/platforms/common/jdetracker.h
// Ths copyright of CnybTseng/JDE is as follows:
// MIT License
#pragma once
#include <map>
#include <vector>
#include <opencv2/opencv.hpp>
#include "trajectory.h"
namespace PaddleDetection {
typedef std::map<int, int> Match;
typedef std::map<int, int>::iterator MatchIterator;
struct Track
{
int id;
float score;
cv::Vec4f ltrb;
};
class JDETracker
{
public:
static JDETracker *instance(void);
virtual bool update(const cv::Mat &dets, const cv::Mat &emb, std::vector<Track> &tracks);
private:
JDETracker(void);
virtual ~JDETracker(void) {}
cv::Mat motion_distance(const TrajectoryPtrPool &a, const TrajectoryPool &b);
void linear_assignment(const cv::Mat &cost, float cost_limit, Match &matches,
std::vector<int> &mismatch_row, std::vector<int> &mismatch_col);
void remove_duplicate_trajectory(TrajectoryPool &a, TrajectoryPool &b, float iou_thresh=0.15f);
private:
static JDETracker *me;
int timestamp;
TrajectoryPool tracked_trajectories;
TrajectoryPool lost_trajectories;
TrajectoryPool removed_trajectories;
int max_lost_time;
float lambda;
float det_thresh;
};
} // namespace PaddleDetection

View File

@@ -0,0 +1,202 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The code is based on:
// https://github.com/CnybTseng/JDE/blob/master/platforms/common/trajectory.h
// Ths copyright of CnybTseng/JDE is as follows:
// MIT License
#pragma once
#include <vector>
#include <opencv2/opencv.hpp>
namespace PaddleDetection {
typedef enum
{
New = 0,
Tracked = 1,
Lost = 2,
Removed = 3
} TrajectoryState;
class Trajectory;
typedef std::vector<Trajectory> TrajectoryPool;
typedef std::vector<Trajectory>::iterator TrajectoryPoolIterator;
typedef std::vector<Trajectory *>TrajectoryPtrPool;
typedef std::vector<Trajectory *>::iterator TrajectoryPtrPoolIterator;
class TKalmanFilter : public cv::KalmanFilter
{
public:
TKalmanFilter(void);
virtual ~TKalmanFilter(void) {}
virtual void init(const cv::Mat &measurement);
virtual const cv::Mat &predict();
virtual const cv::Mat &correct(const cv::Mat &measurement);
virtual void project(cv::Mat &mean, cv::Mat &covariance) const;
private:
float std_weight_position;
float std_weight_velocity;
};
inline TKalmanFilter::TKalmanFilter(void) : cv::KalmanFilter(8, 4)
{
cv::KalmanFilter::transitionMatrix = cv::Mat::eye(8, 8, CV_32F);
for (int i = 0; i < 4; ++i)
cv::KalmanFilter::transitionMatrix.at<float>(i, i + 4) = 1;
cv::KalmanFilter::measurementMatrix = cv::Mat::eye(4, 8, CV_32F);
std_weight_position = 1/20.f;
std_weight_velocity = 1/160.f;
}
class Trajectory : public TKalmanFilter
{
public:
Trajectory();
Trajectory(cv::Vec4f &ltrb, float score, const cv::Mat &embedding);
Trajectory(const Trajectory &other);
Trajectory &operator=(const Trajectory &rhs);
virtual ~Trajectory(void) {};
static int next_id();
virtual const cv::Mat &predict(void);
virtual void update(Trajectory &traj, int timestamp, bool update_embedding=true);
virtual void activate(int timestamp);
virtual void reactivate(Trajectory &traj, int timestamp, bool newid=false);
virtual void mark_lost(void);
virtual void mark_removed(void);
friend TrajectoryPool operator+(const TrajectoryPool &a, const TrajectoryPool &b);
friend TrajectoryPool operator+(const TrajectoryPool &a, const TrajectoryPtrPool &b);
friend TrajectoryPool &operator+=(TrajectoryPool &a, const TrajectoryPtrPool &b);
friend TrajectoryPool operator-(const TrajectoryPool &a, const TrajectoryPool &b);
friend TrajectoryPool &operator-=(TrajectoryPool &a, const TrajectoryPool &b);
friend TrajectoryPtrPool operator+(const TrajectoryPtrPool &a, const TrajectoryPtrPool &b);
friend TrajectoryPtrPool operator+(const TrajectoryPtrPool &a, TrajectoryPool &b);
friend TrajectoryPtrPool operator-(const TrajectoryPtrPool &a, const TrajectoryPtrPool &b);
friend cv::Mat embedding_distance(const TrajectoryPool &a, const TrajectoryPool &b);
friend cv::Mat embedding_distance(const TrajectoryPtrPool &a, const TrajectoryPtrPool &b);
friend cv::Mat embedding_distance(const TrajectoryPtrPool &a, const TrajectoryPool &b);
friend cv::Mat mahalanobis_distance(const TrajectoryPool &a, const TrajectoryPool &b);
friend cv::Mat mahalanobis_distance(const TrajectoryPtrPool &a, const TrajectoryPtrPool &b);
friend cv::Mat mahalanobis_distance(const TrajectoryPtrPool &a, const TrajectoryPool &b);
friend cv::Mat iou_distance(const TrajectoryPool &a, const TrajectoryPool &b);
friend cv::Mat iou_distance(const TrajectoryPtrPool &a, const TrajectoryPtrPool &b);
friend cv::Mat iou_distance(const TrajectoryPtrPool &a, const TrajectoryPool &b);
private:
void update_embedding(const cv::Mat &embedding);
public:
TrajectoryState state;
cv::Vec4f ltrb;
cv::Mat smooth_embedding;
int id;
bool is_activated;
int timestamp;
int starttime;
float score;
private:
static int count;
cv::Vec4f xyah;
cv::Mat current_embedding;
float eta;
int length;
};
inline cv::Vec4f ltrb2xyah(cv::Vec4f &ltrb)
{
cv::Vec4f xyah;
xyah[0] = (ltrb[0] + ltrb[2]) * 0.5f;
xyah[1] = (ltrb[1] + ltrb[3]) * 0.5f;
xyah[3] = ltrb[3] - ltrb[1];
xyah[2] = (ltrb[2] - ltrb[0]) / xyah[3];
return xyah;
}
inline Trajectory::Trajectory() :
state(New), ltrb(cv::Vec4f()), smooth_embedding(cv::Mat()), id(0),
is_activated(false), timestamp(0), starttime(0), score(0), eta(0.9), length(0)
{
}
inline Trajectory::Trajectory(cv::Vec4f &ltrb_, float score_, const cv::Mat &embedding) :
state(New), ltrb(ltrb_), smooth_embedding(cv::Mat()), id(0),
is_activated(false), timestamp(0), starttime(0), score(score_), eta(0.9), length(0)
{
xyah = ltrb2xyah(ltrb);
update_embedding(embedding);
}
inline Trajectory::Trajectory(const Trajectory &other):
state(other.state), ltrb(other.ltrb), id(other.id), is_activated(other.is_activated),
timestamp(other.timestamp), starttime(other.starttime), xyah(other.xyah),
score(other.score), eta(other.eta), length(other.length)
{
other.smooth_embedding.copyTo(smooth_embedding);
other.current_embedding.copyTo(current_embedding);
// copy state in KalmanFilter
other.statePre.copyTo(cv::KalmanFilter::statePre);
other.statePost.copyTo(cv::KalmanFilter::statePost);
other.errorCovPre.copyTo(cv::KalmanFilter::errorCovPre);
other.errorCovPost.copyTo(cv::KalmanFilter::errorCovPost);
}
inline Trajectory &Trajectory::operator=(const Trajectory &rhs)
{
this->state = rhs.state;
this->ltrb = rhs.ltrb;
rhs.smooth_embedding.copyTo(this->smooth_embedding);
this->id = rhs.id;
this->is_activated = rhs.is_activated;
this->timestamp = rhs.timestamp;
this->starttime = rhs.starttime;
this->xyah = rhs.xyah;
this->score = rhs.score;
rhs.current_embedding.copyTo(this->current_embedding);
this->eta = rhs.eta;
this->length = rhs.length;
// copy state in KalmanFilter
rhs.statePre.copyTo(cv::KalmanFilter::statePre);
rhs.statePost.copyTo(cv::KalmanFilter::statePost);
rhs.errorCovPre.copyTo(cv::KalmanFilter::errorCovPre);
rhs.errorCovPost.copyTo(cv::KalmanFilter::errorCovPost);
return *this;
}
inline int Trajectory::next_id()
{
++count;
return count;
}
inline void Trajectory::mark_lost(void)
{
state = Lost;
}
inline void Trajectory::mark_removed(void)
{
state = Removed;
}
} // namespace PaddleDetection

View File

@@ -0,0 +1,41 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <algorithm>
#include <ctime>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
namespace PaddleDetection {
// Object Detection Result
struct ObjectResult {
// Rectangle coordinates of detected object: left, right, top, down
std::vector<int> rect;
// Class id of detected object
int class_id;
// Confidence of detected object
float confidence;
// Mask of detected object
std::vector<int> mask;
};
void nms(std::vector<ObjectResult> &input_boxes, float nms_threshold);
} // namespace PaddleDetection