更换文档检测模型
This commit is contained in:
242
paddle_detection/deploy/pptracking/cpp/CMakeLists.txt
Normal file
242
paddle_detection/deploy/pptracking/cpp/CMakeLists.txt
Normal file
@@ -0,0 +1,242 @@
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
project(PaddleObjectDetector CXX C)
|
||||
|
||||
option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ON)
|
||||
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
|
||||
option(WITH_TENSORRT "Compile demo with TensorRT." OFF)
|
||||
|
||||
SET(PADDLE_DIR "" CACHE PATH "Location of libraries")
|
||||
SET(PADDLE_LIB_NAME "" CACHE STRING "libpaddle_inference")
|
||||
SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
|
||||
SET(CUDA_LIB "" CACHE PATH "Location of libraries")
|
||||
SET(CUDNN_LIB "" CACHE PATH "Location of libraries")
|
||||
SET(TENSORRT_INC_DIR "" CACHE PATH "Compile demo with TensorRT")
|
||||
SET(TENSORRT_LIB_DIR "" CACHE PATH "Compile demo with TensorRT")
|
||||
|
||||
include(cmake/yaml-cpp.cmake)
|
||||
|
||||
include_directories("${CMAKE_SOURCE_DIR}/")
|
||||
include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include")
|
||||
link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
|
||||
|
||||
set(SRCS src/main.cc src/preprocess_op.cc src/pipeline.cc src/jde_predictor.cc src/sde_predictor.cc src/tracker.cc src/trajectory.cc src/lapjv.cpp src/postprocess.cc)
|
||||
|
||||
macro(safe_set_static_flag)
|
||||
foreach(flag_var
|
||||
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
|
||||
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
|
||||
if(${flag_var} MATCHES "/MD")
|
||||
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
|
||||
endif(${flag_var} MATCHES "/MD")
|
||||
endforeach(flag_var)
|
||||
endmacro()
|
||||
|
||||
if (WITH_MKL)
|
||||
ADD_DEFINITIONS(-DUSE_MKL)
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED PADDLE_DIR OR ${PADDLE_DIR} STREQUAL "")
|
||||
message(FATAL_ERROR "please set PADDLE_DIR with -DPADDLE_DIR=/path/paddle_influence_dir")
|
||||
endif()
|
||||
message("PADDLE_DIR IS:" ${PADDLE_DIR})
|
||||
|
||||
if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "")
|
||||
message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv")
|
||||
endif()
|
||||
|
||||
include_directories("${CMAKE_SOURCE_DIR}/")
|
||||
include_directories("${PADDLE_DIR}/")
|
||||
include_directories("${PADDLE_DIR}/third_party/install/protobuf/include")
|
||||
include_directories("${PADDLE_DIR}/third_party/install/glog/include")
|
||||
include_directories("${PADDLE_DIR}/third_party/install/gflags/include")
|
||||
include_directories("${PADDLE_DIR}/third_party/install/xxhash/include")
|
||||
if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/include")
|
||||
include_directories("${PADDLE_DIR}/third_party/install/snappy/include")
|
||||
endif()
|
||||
if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/include")
|
||||
include_directories("${PADDLE_DIR}/third_party/install/snappystream/include")
|
||||
endif()
|
||||
include_directories("${PADDLE_DIR}/third_party/boost")
|
||||
include_directories("${PADDLE_DIR}/third_party/eigen3")
|
||||
|
||||
if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib")
|
||||
link_directories("${PADDLE_DIR}/third_party/install/snappy/lib")
|
||||
endif()
|
||||
if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib")
|
||||
link_directories("${PADDLE_DIR}/third_party/install/snappystream/lib")
|
||||
endif()
|
||||
|
||||
link_directories("${PADDLE_DIR}/third_party/install/protobuf/lib")
|
||||
link_directories("${PADDLE_DIR}/third_party/install/glog/lib")
|
||||
link_directories("${PADDLE_DIR}/third_party/install/gflags/lib")
|
||||
link_directories("${PADDLE_DIR}/third_party/install/xxhash/lib")
|
||||
link_directories("${PADDLE_DIR}/paddle/lib/")
|
||||
link_directories("${CMAKE_CURRENT_BINARY_DIR}")
|
||||
|
||||
|
||||
|
||||
if (WIN32)
|
||||
include_directories("${PADDLE_DIR}/paddle/fluid/inference")
|
||||
include_directories("${PADDLE_DIR}/paddle/include")
|
||||
link_directories("${PADDLE_DIR}/paddle/fluid/inference")
|
||||
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
|
||||
|
||||
else ()
|
||||
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
|
||||
include_directories("${PADDLE_DIR}/paddle/include")
|
||||
link_directories("${PADDLE_DIR}/paddle/lib")
|
||||
endif ()
|
||||
include_directories(${OpenCV_INCLUDE_DIRS})
|
||||
|
||||
if (WIN32)
|
||||
add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
|
||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT")
|
||||
else()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o2 -fopenmp -std=c++11")
|
||||
set(CMAKE_STATIC_LIBRARY_PREFIX "")
|
||||
endif()
|
||||
|
||||
# TODO let users define cuda lib path
|
||||
if (WITH_GPU)
|
||||
if (NOT DEFINED CUDA_LIB OR ${CUDA_LIB} STREQUAL "")
|
||||
message(FATAL_ERROR "please set CUDA_LIB with -DCUDA_LIB=/path/cuda-8.0/lib64")
|
||||
endif()
|
||||
if (NOT WIN32)
|
||||
if (NOT DEFINED CUDNN_LIB)
|
||||
message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn_v7.4/cuda/lib64")
|
||||
endif()
|
||||
endif(NOT WIN32)
|
||||
endif()
|
||||
|
||||
|
||||
if (NOT WIN32)
|
||||
if (WITH_TENSORRT AND WITH_GPU)
|
||||
include_directories("${TENSORRT_INC_DIR}/")
|
||||
link_directories("${TENSORRT_LIB_DIR}/")
|
||||
endif()
|
||||
endif(NOT WIN32)
|
||||
|
||||
if (NOT WIN32)
|
||||
set(NGRAPH_PATH "${PADDLE_DIR}/third_party/install/ngraph")
|
||||
if(EXISTS ${NGRAPH_PATH})
|
||||
include(GNUInstallDirs)
|
||||
include_directories("${NGRAPH_PATH}/include")
|
||||
link_directories("${NGRAPH_PATH}/${CMAKE_INSTALL_LIBDIR}")
|
||||
set(NGRAPH_LIB ${NGRAPH_PATH}/${CMAKE_INSTALL_LIBDIR}/libngraph${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_MKL)
|
||||
include_directories("${PADDLE_DIR}/third_party/install/mklml/include")
|
||||
if (WIN32)
|
||||
set(MATH_LIB ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.lib
|
||||
${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.lib)
|
||||
else ()
|
||||
set(MATH_LIB ${PADDLE_DIR}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
|
||||
${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||
execute_process(COMMAND cp -r ${PADDLE_DIR}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib)
|
||||
endif ()
|
||||
set(MKLDNN_PATH "${PADDLE_DIR}/third_party/install/mkldnn")
|
||||
if(EXISTS ${MKLDNN_PATH})
|
||||
include_directories("${MKLDNN_PATH}/include")
|
||||
if (WIN32)
|
||||
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
|
||||
else ()
|
||||
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
|
||||
endif ()
|
||||
endif()
|
||||
else()
|
||||
set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
endif()
|
||||
|
||||
|
||||
if (WIN32)
|
||||
if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
set(DEPS
|
||||
${PADDLE_DIR}/paddle/fluid/inference/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
else()
|
||||
set(DEPS
|
||||
${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
if (WIN32)
|
||||
set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
else()
|
||||
set(DEPS ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||
endif()
|
||||
|
||||
message("PADDLE_LIB_NAME:" ${PADDLE_LIB_NAME})
|
||||
message("DEPS:" $DEPS)
|
||||
|
||||
if (NOT WIN32)
|
||||
set(DEPS ${DEPS}
|
||||
${MATH_LIB} ${MKLDNN_LIB}
|
||||
glog gflags protobuf z xxhash yaml-cpp
|
||||
)
|
||||
if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib")
|
||||
set(DEPS ${DEPS} snappystream)
|
||||
endif()
|
||||
if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib")
|
||||
set(DEPS ${DEPS} snappy)
|
||||
endif()
|
||||
else()
|
||||
set(DEPS ${DEPS}
|
||||
${MATH_LIB} ${MKLDNN_LIB}
|
||||
glog gflags_static libprotobuf xxhash libyaml-cppmt)
|
||||
set(DEPS ${DEPS} libcmt shlwapi)
|
||||
if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib")
|
||||
set(DEPS ${DEPS} snappy)
|
||||
endif()
|
||||
if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib")
|
||||
set(DEPS ${DEPS} snappystream)
|
||||
endif()
|
||||
endif(NOT WIN32)
|
||||
|
||||
if(WITH_GPU)
|
||||
if(NOT WIN32)
|
||||
if (WITH_TENSORRT)
|
||||
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||
endif()
|
||||
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||
set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||
else()
|
||||
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
|
||||
set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
|
||||
set(DEPS ${DEPS} ${CUDNN_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT WIN32)
|
||||
set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
|
||||
set(DEPS ${DEPS} ${EXTERNAL_LIB})
|
||||
endif()
|
||||
|
||||
set(DEPS ${DEPS} ${OpenCV_LIBS})
|
||||
add_executable(main ${SRCS})
|
||||
ADD_DEPENDENCIES(main ext-yaml-cpp)
|
||||
message("DEPS:" $DEPS)
|
||||
target_link_libraries(main ${DEPS})
|
||||
|
||||
if (WIN32 AND WITH_MKL)
|
||||
add_custom_command(TARGET main POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll
|
||||
)
|
||||
endif()
|
||||
|
||||
if (WIN32)
|
||||
add_custom_command(TARGET main POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll
|
||||
)
|
||||
endif()
|
||||
175
paddle_detection/deploy/pptracking/cpp/README.md
Normal file
175
paddle_detection/deploy/pptracking/cpp/README.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# C++端预测部署
|
||||
|
||||
在PaddlePaddle中预测引擎和训练引擎底层有着不同的优化方法, 预测引擎使用了AnalysisPredictor,专门针对推理进行了优化,该引擎可以对模型进行多项图优化,减少不必要的内存拷贝。如果用户在部署已训练模型的过程中对性能有较高的要求,我们提供了独立于PaddleDetection的预测脚本,方便用户直接集成部署。当前C++部署支持基于Fairmot的单镜头类别预测部署,并支持人流量统计、出入口计数功能。
|
||||
|
||||
主要包含三个步骤:
|
||||
- 准备环境
|
||||
- 导出预测模型
|
||||
- C++预测
|
||||
|
||||
## 一、准备环境
|
||||
|
||||
环境要求:
|
||||
|
||||
- GCC 8.2
|
||||
- CUDA 10.1/10.2/11.1; CUDNN 7.6/8.1
|
||||
- CMake 3.0+
|
||||
- TensorRT 6/7
|
||||
|
||||
NVIDIA Jetson用户请参考[Jetson平台编译指南](../../cpp/docs/Jetson_build.md#jetson环境搭建)完成JetPack安装
|
||||
|
||||
### 1. 下载代码
|
||||
|
||||
```
|
||||
git clone https://github.com/PaddlePaddle/PaddleDetection.git
|
||||
# C++部署代码与其他目录代码独立
|
||||
cd deploy/pptracking/cpp
|
||||
```
|
||||
|
||||
### 2. 下载或编译PaddlePaddle C++预测库
|
||||
|
||||
请根据环境选择适当的预测库进行下载,参考[C++预测库下载列表](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html)
|
||||
|
||||
下载并解压后`./paddle_inference`目录包含内容为:
|
||||
|
||||
```
|
||||
paddle_inference
|
||||
├── paddle # paddle核心库和头文件
|
||||
|
|
||||
├── third_party # 第三方依赖库和头文件
|
||||
|
|
||||
└── version.txt # 版本和编译信息
|
||||
```
|
||||
|
||||
**注意:** 如果用户环境与官网提供环境不一致(如cuda 、cudnn、tensorrt版本不一致等),或对飞桨源代码有修改需求,或希望进行定制化构建,可参考[文档](https://paddleinference.paddlepaddle.org.cn/user_guides/source_compile.html)自行源码编译预测库。
|
||||
|
||||
### 3. 编译
|
||||
|
||||
<details>
|
||||
<summary>Linux编译:</summary>
|
||||
|
||||
编译`cmake`的命令在`scripts/build.sh`中,请根据实际情况修改主要参数,其主要内容说明如下:
|
||||
|
||||
```
|
||||
# 是否使用GPU(即是否使用 CUDA)
|
||||
WITH_GPU=ON
|
||||
|
||||
# 是否使用MKL or openblas,TX2需要设置为OFF
|
||||
WITH_MKL=OFF
|
||||
|
||||
# 是否集成 TensorRT(仅WITH_GPU=ON 有效)
|
||||
WITH_TENSORRT=ON
|
||||
|
||||
# TensorRT 的include路径
|
||||
TENSORRT_INC_DIR=/path/to/TensorRT/include
|
||||
|
||||
# TensorRT 的lib路径
|
||||
TENSORRT_LIB_DIR=/path/to/TensorRT/lib
|
||||
|
||||
# Paddle 预测库路径
|
||||
PADDLE_DIR=/path/to/paddle_inference/
|
||||
|
||||
# Paddle 预测库名称
|
||||
PADDLE_LIB_NAME=libpaddle_inference
|
||||
|
||||
# CUDA 的 lib 路径
|
||||
CUDA_LIB=/path/to/cuda/lib
|
||||
|
||||
# CUDNN 的 lib 路径
|
||||
CUDNN_LIB=/path/to/cudnn/lib
|
||||
|
||||
# OPENCV路径
|
||||
OPENCV_DIR=/path/to/opencv
|
||||
```
|
||||
|
||||
修改脚本设置好主要参数后,执行```build.sh```脚本:
|
||||
|
||||
```
|
||||
sh ./scripts/build.sh
|
||||
```
|
||||
|
||||
|
||||
</details>
|
||||
<details>
|
||||
<summary>Windows编译:</summary>
|
||||
|
||||
- 安装配置OpenCV
|
||||
1. 在OpenCV官网下载适用于Windows平台的3.4.6版本,[下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download)
|
||||
2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv`
|
||||
3. 配置环境变量,如下流程所示(如果使用全局绝对路径,可以不用设置环境变量)
|
||||
|
||||
- 我的电脑->属性->高级系统设置->环境变量
|
||||
- 在系统变量中找到Path(如没有,自行创建),并双击编辑
|
||||
- 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin`
|
||||
|
||||
- 使用CMake生成项目文件
|
||||
|
||||
执行如下命令项目文件:
|
||||
```
|
||||
cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=path_to_opencv -DWITH_KEYPOINT=ON
|
||||
```
|
||||
|
||||
- 编译
|
||||
用`Visual Studio 2019`打开`cpp`文件夹下的`PaddleObjectDetector.sln`,将编译模式设置为`Release`,点击`生成`->`全部生成
|
||||
|
||||
编译产出的可执行文件在`Release`目录下
|
||||
|
||||
</details>
|
||||
|
||||
**注意:**
|
||||
|
||||
1. `TX2`平台的`CUDA`、`CUDNN`需要通过`JetPack`安装。
|
||||
2. 已提供linux和tx2平台的opencv下载方式,其他环境请自行安装[opencv](https://opencv.org/)
|
||||
3. Windows用户推荐使用Visual Studio 2019编译
|
||||
|
||||
## 二、导出预测模型
|
||||
|
||||
将训练保存的权重导出为预测库需要的模型格式,使用PaddleDetection下的```tools/export_model.py```导出模型
|
||||
|
||||
```
|
||||
python tools/export_model.py -c configs/mot/fairmot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.pdparams
|
||||
```
|
||||
|
||||
预测模型会默认导出到```output_inference/fairmot_hrnetv2_w18_dlafpn_30e_576x320```目录下,包括```infer_cfg.yml```, ```model.pdiparams```, ```model.pdiparams.info```, ```model.pdmodel```
|
||||
|
||||
导出模型也可以通过[预测模型列表](../README.md)中'算法介绍部分'直接下载使用
|
||||
|
||||
## 三、C++预测
|
||||
|
||||
完成以上步骤后,可以通过```build/main```(Linux)或```main.exe```(Windows)进行预测,参数列表如下:
|
||||
|
||||
| 参数 | 说明 |
|
||||
| ---- | ---- |
|
||||
| --track_model_dir | 导出的跟踪预测模型所在路径 |
|
||||
| --video_file | 要预测的视频文件路径 |
|
||||
| --device | 运行时的设备,可选择`CPU/GPU/XPU`,默认为`CPU`|
|
||||
| --gpu_id | 指定进行推理的GPU device id(默认值为0)|
|
||||
| --run_mode | 使用GPU时,默认为paddle, 可选(paddle/trt_fp32/trt_fp16/trt_int8)|
|
||||
| --output_dir | 输出图片所在的文件夹, 默认为output |
|
||||
| --use_mkldnn | CPU预测中是否开启MKLDNN加速 |
|
||||
| --cpu_threads | 设置cpu线程数,默认为1 |
|
||||
| --do_entrance_counting | 是否进行出入口流量统计,默认为否 |
|
||||
| --save_result | 是否保存跟踪结果 |
|
||||
|
||||
样例一:
|
||||
|
||||
```shell
|
||||
# 使用CPU测试视频 `test.mp4` , 模型和测试视频均移至`build`目录下
|
||||
|
||||
./main --track_model_dir=./fairmot_hrnetv2_w18_dlafpn_30e_576x320 --video_file=test.mp4
|
||||
|
||||
# 视频可视化预测结果默认保存在当前目录下output/test.mp4文件中
|
||||
```
|
||||
|
||||
|
||||
样例二:
|
||||
|
||||
```shell
|
||||
# 使用GPU测试视频 `test.mp4` , 模型和测试视频均移至`build`目录下,实现出入口计数功能,并保存跟踪结果
|
||||
|
||||
./main -video_file=test.mp4 -track_model_dir=./fairmot_dla34_30e_1088x608/ --device=gpu --do_entrance_counting=True --save_result=True
|
||||
|
||||
# 视频可视化预测结果默认保存在当前目录下`output/test.mp4`中
|
||||
# 跟踪结果保存在`output/mot_output.txt`中
|
||||
# 计数结果保存在`output/flow_statistic.txt`中
|
||||
```
|
||||
30
paddle_detection/deploy/pptracking/cpp/cmake/yaml-cpp.cmake
Normal file
30
paddle_detection/deploy/pptracking/cpp/cmake/yaml-cpp.cmake
Normal file
@@ -0,0 +1,30 @@
|
||||
|
||||
find_package(Git REQUIRED)
|
||||
|
||||
include(ExternalProject)
|
||||
|
||||
message("${CMAKE_BUILD_TYPE}")
|
||||
|
||||
ExternalProject_Add(
|
||||
ext-yaml-cpp
|
||||
URL https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip
|
||||
URL_MD5 9542d6de397d1fbd649ed468cb5850e6
|
||||
CMAKE_ARGS
|
||||
-DYAML_CPP_BUILD_TESTS=OFF
|
||||
-DYAML_CPP_BUILD_TOOLS=OFF
|
||||
-DYAML_CPP_INSTALL=OFF
|
||||
-DYAML_CPP_BUILD_CONTRIB=OFF
|
||||
-DMSVC_SHARED_RT=OFF
|
||||
-DBUILD_SHARED_LIBS=OFF
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
|
||||
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
|
||||
-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
|
||||
-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
|
||||
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
|
||||
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
|
||||
PREFIX "${CMAKE_BINARY_DIR}/ext/yaml-cpp"
|
||||
# Disable install step
|
||||
INSTALL_COMMAND ""
|
||||
LOG_DOWNLOAD ON
|
||||
LOG_BUILD 1
|
||||
)
|
||||
137
paddle_detection/deploy/pptracking/cpp/include/config_parser.h
Normal file
137
paddle_detection/deploy/pptracking/cpp/include/config_parser.h
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "yaml-cpp/yaml.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#define OS_PATH_SEP "\\"
|
||||
#else
|
||||
#define OS_PATH_SEP "/"
|
||||
#endif
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
// Inference model configuration parser
|
||||
class ConfigPaser {
|
||||
public:
|
||||
ConfigPaser() {}
|
||||
|
||||
~ConfigPaser() {}
|
||||
|
||||
bool load_config(const std::string& model_dir,
|
||||
const std::string& cfg = "infer_cfg.yml") {
|
||||
// Load as a YAML::Node
|
||||
YAML::Node config;
|
||||
config = YAML::LoadFile(model_dir + OS_PATH_SEP + cfg);
|
||||
|
||||
// Get runtime mode : paddle, trt_fp16, trt_fp32
|
||||
if (config["mode"].IsDefined()) {
|
||||
mode_ = config["mode"].as<std::string>();
|
||||
} else {
|
||||
std::cerr << "Please set mode, "
|
||||
<< "support value : paddle/trt_fp16/trt_fp32." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get model arch: FairMot or YOLO/Picodet/LCNet for DeepSort
|
||||
if (config["arch"].IsDefined()) {
|
||||
arch_ = config["arch"].as<std::string>();
|
||||
} else {
|
||||
std::cerr << "Please set model arch,"
|
||||
<< "support value : FairMot, YOLO, PicoDet, LCNet etc"
|
||||
<< std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get min_subgraph_size for tensorrt
|
||||
if (config["min_subgraph_size"].IsDefined()) {
|
||||
min_subgraph_size_ = config["min_subgraph_size"].as<int>();
|
||||
} else {
|
||||
std::cerr << "Please set min_subgraph_size." << std::endl;
|
||||
return false;
|
||||
}
|
||||
// Get draw_threshold for visualization
|
||||
if (config["draw_threshold"].IsDefined()) {
|
||||
draw_threshold_ = config["draw_threshold"].as<float>();
|
||||
} else {
|
||||
std::cerr << "Please set draw_threshold." << std::endl;
|
||||
return false;
|
||||
}
|
||||
// Get Preprocess for preprocessing
|
||||
if (config["Preprocess"].IsDefined()) {
|
||||
preprocess_info_ = config["Preprocess"];
|
||||
} else {
|
||||
std::cerr << "Please set Preprocess." << std::endl;
|
||||
return false;
|
||||
}
|
||||
// Get label_list for visualization
|
||||
if (config["label_list"].IsDefined()) {
|
||||
label_list_ = config["label_list"].as<std::vector<std::string>>();
|
||||
} else {
|
||||
std::cerr << "Please set label_list." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get use_dynamic_shape for TensorRT
|
||||
if (config["use_dynamic_shape"].IsDefined()) {
|
||||
use_dynamic_shape_ = config["use_dynamic_shape"].as<bool>();
|
||||
} else {
|
||||
std::cerr << "Please set use_dynamic_shape." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get conf_thresh for tracker
|
||||
if (config["tracker"].IsDefined()) {
|
||||
if (config["tracker"]["conf_thres"].IsDefined()) {
|
||||
conf_thresh_ = config["tracker"]["conf_thres"].as<float>();
|
||||
} else {
|
||||
std::cerr << "Please set conf_thres in tracker." << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Get NMS for postprocess
|
||||
if (config["NMS"].IsDefined()) {
|
||||
nms_info_ = config["NMS"];
|
||||
}
|
||||
// Get fpn_stride in PicoDet
|
||||
if (config["fpn_stride"].IsDefined()) {
|
||||
fpn_stride_.clear();
|
||||
for (auto item : config["fpn_stride"]) {
|
||||
fpn_stride_.emplace_back(item.as<int>());
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
std::string mode_;
|
||||
float draw_threshold_;
|
||||
std::string arch_;
|
||||
int min_subgraph_size_;
|
||||
YAML::Node preprocess_info_;
|
||||
YAML::Node nms_info_;
|
||||
std::vector<std::string> label_list_;
|
||||
std::vector<int> fpn_stride_;
|
||||
bool use_dynamic_shape_;
|
||||
float conf_thresh_;
|
||||
};
|
||||
|
||||
} // namespace PaddleDetection
|
||||
@@ -0,0 +1,98 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ctime>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
|
||||
#include "paddle_inference_api.h" // NOLINT
|
||||
|
||||
#include "include/config_parser.h"
|
||||
#include "include/preprocess_op.h"
|
||||
#include "include/utils.h"
|
||||
|
||||
using namespace paddle_infer; // NOLINT
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
class JDEPredictor {
|
||||
public:
|
||||
explicit JDEPredictor(const std::string& device = "CPU",
|
||||
const std::string& model_dir = "",
|
||||
const double threshold = -1.,
|
||||
const std::string& run_mode = "paddle",
|
||||
const int gpu_id = 0,
|
||||
const bool use_mkldnn = false,
|
||||
const int cpu_threads = 1,
|
||||
bool trt_calib_mode = false,
|
||||
const int min_box_area = 200) {
|
||||
this->device_ = device;
|
||||
this->gpu_id_ = gpu_id;
|
||||
this->use_mkldnn_ = use_mkldnn;
|
||||
this->cpu_math_library_num_threads_ = cpu_threads;
|
||||
this->trt_calib_mode_ = trt_calib_mode;
|
||||
this->min_box_area_ = min_box_area;
|
||||
|
||||
config_.load_config(model_dir);
|
||||
this->min_subgraph_size_ = config_.min_subgraph_size_;
|
||||
preprocessor_.Init(config_.preprocess_info_);
|
||||
LoadModel(model_dir, run_mode);
|
||||
this->conf_thresh_ = config_.conf_thresh_;
|
||||
this->threshold_ = config_.conf_thresh_;
|
||||
}
|
||||
|
||||
// Load Paddle inference model
|
||||
void LoadModel(const std::string& model_dir,
|
||||
const std::string& run_mode = "paddle");
|
||||
|
||||
// Run predictor
|
||||
void Predict(const std::vector<cv::Mat> imgs,
|
||||
const double threshold = 0.5,
|
||||
MOTResult* result = nullptr,
|
||||
std::vector<double>* times = nullptr);
|
||||
|
||||
private:
|
||||
std::string device_ = "CPU";
|
||||
float threhold = 0.5;
|
||||
int gpu_id_ = 0;
|
||||
bool use_mkldnn_ = false;
|
||||
int cpu_math_library_num_threads_ = 1;
|
||||
int min_subgraph_size_ = 3;
|
||||
bool trt_calib_mode_ = false;
|
||||
|
||||
// Preprocess image and copy data to input buffer
|
||||
void Preprocess(const cv::Mat& image_mat);
|
||||
// Postprocess result
|
||||
void Postprocess(const cv::Mat dets, const cv::Mat emb, MOTResult* result);
|
||||
|
||||
std::shared_ptr<Predictor> predictor_;
|
||||
Preprocessor preprocessor_;
|
||||
ImageBlob inputs_;
|
||||
std::vector<float> bbox_data_;
|
||||
std::vector<float> emb_data_;
|
||||
double threshold_;
|
||||
ConfigPaser config_;
|
||||
float min_box_area_;
|
||||
float conf_thresh_;
|
||||
};
|
||||
|
||||
} // namespace PaddleDetection
|
||||
64
paddle_detection/deploy/pptracking/cpp/include/lapjv.h
Normal file
64
paddle_detection/deploy/pptracking/cpp/include/lapjv.h
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The code is based on:
|
||||
// https://github.com/gatagat/lap/blob/master/lap/lapjv.h
|
||||
// Ths copyright of gatagat/lap is as follows:
|
||||
// MIT License
|
||||
|
||||
#ifndef DEPLOY_PPTRACKING_CPP_INCLUDE_LAPJV_H_
|
||||
#define DEPLOY_PPTRACKING_CPP_INCLUDE_LAPJV_H_
|
||||
#define LARGE 1000000
|
||||
|
||||
#if !defined TRUE
|
||||
#define TRUE 1
|
||||
#endif
|
||||
#if !defined FALSE
|
||||
#define FALSE 0
|
||||
#endif
|
||||
|
||||
#define NEW(x, t, n) \
|
||||
if ((x = reinterpret_cast<t *>(malloc(sizeof(t) * (n)))) == 0) { \
|
||||
return -1; \
|
||||
}
|
||||
#define FREE(x) \
|
||||
if (x != 0) { \
|
||||
free(x); \
|
||||
x = 0; \
|
||||
}
|
||||
#define SWAP_INDICES(a, b) \
|
||||
{ \
|
||||
int_t _temp_index = a; \
|
||||
a = b; \
|
||||
b = _temp_index; \
|
||||
}
|
||||
#include <opencv2/opencv.hpp>
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
typedef signed int int_t;
|
||||
typedef unsigned int uint_t;
|
||||
typedef double cost_t;
|
||||
typedef char boolean;
|
||||
typedef enum fp_t { FP_1 = 1, FP_2 = 2, FP_DYNAMIC = 3 } fp_t;
|
||||
|
||||
int lapjv_internal(const cv::Mat &cost,
|
||||
const bool extend_cost,
|
||||
const float cost_limit,
|
||||
int *x,
|
||||
int *y);
|
||||
|
||||
} // namespace PaddleDetection
|
||||
|
||||
#endif // DEPLOY_PPTRACKING_CPP_INCLUDE_LAPJV_H_
|
||||
142
paddle_detection/deploy/pptracking/cpp/include/pipeline.h
Normal file
142
paddle_detection/deploy/pptracking/cpp/include/pipeline.h
Normal file
@@ -0,0 +1,142 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef DEPLOY_PPTRACKING_CPP_INCLUDE_PIPELINE_H_
|
||||
#define DEPLOY_PPTRACKING_CPP_INCLUDE_PIPELINE_H_
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include <math.h>
|
||||
#include <sys/types.h>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <direct.h>
|
||||
#include <io.h>
|
||||
#elif LINUX
|
||||
#include <stdarg.h>
|
||||
#include <sys/stat.h>
|
||||
#endif
|
||||
|
||||
#include "include/jde_predictor.h"
|
||||
#include "include/sde_predictor.h"
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
class Pipeline {
|
||||
public:
|
||||
explicit Pipeline(const std::string& device,
|
||||
const double threshold,
|
||||
const std::string& output_dir,
|
||||
const std::string& run_mode = "paddle",
|
||||
const int gpu_id = 0,
|
||||
const bool use_mkldnn = false,
|
||||
const int cpu_threads = 1,
|
||||
const bool trt_calib_mode = false,
|
||||
const bool do_entrance_counting = false,
|
||||
const bool save_result = false,
|
||||
const std::string& scene = "pedestrian",
|
||||
const bool tiny_obj = false,
|
||||
const bool is_mtmct = false,
|
||||
const int secs_interval = 10,
|
||||
const std::string track_model_dir = "",
|
||||
const std::string det_model_dir = "",
|
||||
const std::string reid_model_dir = "") {
|
||||
std::vector<std::string> input;
|
||||
this->input_ = input;
|
||||
this->device_ = device;
|
||||
this->threshold_ = threshold;
|
||||
this->output_dir_ = output_dir;
|
||||
this->run_mode_ = run_mode;
|
||||
this->gpu_id_ = gpu_id;
|
||||
this->use_mkldnn_ = use_mkldnn;
|
||||
this->cpu_threads_ = cpu_threads;
|
||||
this->trt_calib_mode_ = trt_calib_mode;
|
||||
this->do_entrance_counting_ = do_entrance_counting;
|
||||
this->secs_interval_ = secs_interval_;
|
||||
this->save_result_ = save_result;
|
||||
SelectModel(scene,
|
||||
tiny_obj,
|
||||
is_mtmct,
|
||||
track_model_dir,
|
||||
det_model_dir,
|
||||
reid_model_dir);
|
||||
InitPredictor();
|
||||
}
|
||||
|
||||
// Set input, it must execute before Run()
|
||||
void SetInput(const std::string& input_video);
|
||||
void ClearInput();
|
||||
|
||||
// Run pipeline in video
|
||||
void Run();
|
||||
void PredictMOT(const std::string& video_path);
|
||||
void PredictMTMCT(const std::vector<std::string> video_inputs);
|
||||
|
||||
// Run pipeline in stream
|
||||
void RunMOTStream(const cv::Mat img,
|
||||
const int frame_id,
|
||||
const int video_fps,
|
||||
const Rect entrance,
|
||||
cv::Mat out_img,
|
||||
std::vector<std::string>* records,
|
||||
std::set<int>* count_set,
|
||||
std::set<int>* interval_count_set,
|
||||
std::vector<int>* in_count_list,
|
||||
std::vector<int>* out_count_list,
|
||||
std::map<int, std::vector<float>>* prev_center,
|
||||
std::vector<std::string>* flow_records);
|
||||
void RunMTMCTStream(const std::vector<cv::Mat> imgs,
|
||||
std::vector<std::string>* records);
|
||||
|
||||
void PrintBenchmarkLog(const std::vector<double> det_time, const int img_num);
|
||||
|
||||
private:
|
||||
// Select model according to scenes, it must execute before Run()
|
||||
void SelectModel(const std::string& scene = "pedestrian",
|
||||
const bool tiny_obj = false,
|
||||
const bool is_mtmct = false,
|
||||
const std::string track_model_dir = "",
|
||||
const std::string det_model_dir = "",
|
||||
const std::string reid_model_dir = "");
|
||||
void InitPredictor();
|
||||
|
||||
std::shared_ptr<PaddleDetection::JDEPredictor> jde_sct_;
|
||||
std::shared_ptr<PaddleDetection::SDEPredictor> sde_sct_;
|
||||
|
||||
std::vector<std::string> input_;
|
||||
std::vector<cv::Mat> stream_;
|
||||
std::string device_;
|
||||
double threshold_;
|
||||
std::string output_dir_;
|
||||
std::string track_model_dir_;
|
||||
std::string det_model_dir_;
|
||||
std::string reid_model_dir_;
|
||||
std::string run_mode_ = "paddle";
|
||||
int gpu_id_ = 0;
|
||||
bool use_mkldnn_ = false;
|
||||
int cpu_threads_ = 1;
|
||||
bool trt_calib_mode_ = false;
|
||||
bool do_entrance_counting_ = false;
|
||||
bool save_result_ = false;
|
||||
int secs_interval_ = 10;
|
||||
};
|
||||
|
||||
} // namespace PaddleDetection
|
||||
|
||||
#endif // DEPLOY_PPTRACKING_CPP_INCLUDE_PIPELINE_H_
|
||||
62
paddle_detection/deploy/pptracking/cpp/include/postprocess.h
Normal file
62
paddle_detection/deploy/pptracking/cpp/include/postprocess.h
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include <ctime>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
|
||||
#include "include/utils.h"
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
// Generate visualization color
|
||||
cv::Scalar GetColor(int idx);
|
||||
|
||||
// Visualize Tracking Results
|
||||
cv::Mat VisualizeTrackResult(const cv::Mat& img,
|
||||
const MOTResult& results,
|
||||
const float fps,
|
||||
const int frame_id);
|
||||
|
||||
// Pedestrian/Vehicle Counting
|
||||
void FlowStatistic(const MOTResult& results,
|
||||
const int frame_id,
|
||||
const int secs_interval,
|
||||
const bool do_entrance_counting,
|
||||
const int video_fps,
|
||||
const Rect entrance,
|
||||
std::set<int>* id_set,
|
||||
std::set<int>* interval_id_set,
|
||||
std::vector<int>* in_id_list,
|
||||
std::vector<int>* out_id_list,
|
||||
std::map<int, std::vector<float>>* prev_center,
|
||||
std::vector<std::string>* records);
|
||||
|
||||
// Save Tracking Results
|
||||
void SaveMOTResult(const MOTResult& results,
|
||||
const int frame_id,
|
||||
std::vector<std::string>* records);
|
||||
|
||||
} // namespace PaddleDetection
|
||||
99
paddle_detection/deploy/pptracking/cpp/include/predictor.h
Normal file
99
paddle_detection/deploy/pptracking/cpp/include/predictor.h
Normal file
@@ -0,0 +1,99 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ctime>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
|
||||
#include "paddle_inference_api.h" // NOLINT
|
||||
|
||||
#include "include/config_parser.h"
|
||||
#include "include/jde_predictor.h"
|
||||
#include "include/preprocess_op.h"
|
||||
#include "include/sde_predictor.h"
|
||||
|
||||
using namespace paddle_infer; // NOLINT
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
class Predictor {
|
||||
public:
|
||||
explicit Predictor(const std::string& device = "CPU",
|
||||
const std::string& track_model_dir = "",
|
||||
const std::string& det_model_dir = "",
|
||||
const std::string& reid_model_dir = "",
|
||||
const double threshold = -1.,
|
||||
const std::string& run_mode = "paddle",
|
||||
const int gpu_id = 0,
|
||||
const bool use_mkldnn = false,
|
||||
const int cpu_threads = 1,
|
||||
bool trt_calib_mode = false,
|
||||
const int min_box_area = 200) {
|
||||
if (track_model_dir.empty() && det_model_dir.empty()) {
|
||||
throw "Predictor must receive track_model or det_model!";
|
||||
}
|
||||
|
||||
if (!track_model_dir.empty() && !det_model_dir.empty()) {
|
||||
throw "Predictor only receive one of track_model or det_model!";
|
||||
}
|
||||
|
||||
if (!track_model_dir.empty()) {
|
||||
jde_sct_ =
|
||||
std::make_shared<PaddleDetection::JDEPredictor>(device,
|
||||
track_model_dir,
|
||||
threshold,
|
||||
run_mode,
|
||||
gpu_id,
|
||||
use_mkldnn,
|
||||
cpu_threads,
|
||||
trt_calib_mode,
|
||||
min_box_area);
|
||||
use_jde_ = true;
|
||||
}
|
||||
if (!det_model_dir.empty()) {
|
||||
sde_sct_ = std::make_shared<PaddleDetection::SDEPredictor>(device,
|
||||
det_model_dir,
|
||||
reid_model_dir,
|
||||
threshold,
|
||||
run_mode,
|
||||
gpu_id,
|
||||
use_mkldnn,
|
||||
cpu_threads,
|
||||
trt_calib_mode,
|
||||
min_box_area);
|
||||
use_jde_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Run predictor
|
||||
void Predict(const std::vector<cv::Mat> imgs,
|
||||
const double threshold = 0.5,
|
||||
MOTResult* result = nullptr,
|
||||
std::vector<double>* times = nullptr);
|
||||
|
||||
private:
|
||||
std::shared_ptr<PaddleDetection::JDEPredictor> jde_sct_;
|
||||
std::shared_ptr<PaddleDetection::SDEPredictor> sde_sct_;
|
||||
bool use_jde_ = true;
|
||||
};
|
||||
|
||||
} // namespace PaddleDetection
|
||||
171
paddle_detection/deploy/pptracking/cpp/include/preprocess_op.h
Normal file
171
paddle_detection/deploy/pptracking/cpp/include/preprocess_op.h
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <yaml-cpp/yaml.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
// Object for storing all preprocessed data
|
||||
class ImageBlob {
|
||||
public:
|
||||
// image width and height
|
||||
std::vector<float> im_shape_;
|
||||
// Buffer for image data after preprocessing
|
||||
std::vector<float> im_data_;
|
||||
// in net data shape(after pad)
|
||||
std::vector<float> in_net_shape_;
|
||||
// Evaluation image width and height
|
||||
// std::vector<float> eval_im_size_f_;
|
||||
// Scale factor for image size to origin image size
|
||||
std::vector<float> scale_factor_;
|
||||
};
|
||||
|
||||
// Abstraction of preprocessing opration class
|
||||
class PreprocessOp {
|
||||
public:
|
||||
virtual void Init(const YAML::Node& item) = 0;
|
||||
virtual void Run(cv::Mat* im, ImageBlob* data) = 0;
|
||||
};
|
||||
|
||||
class InitInfo : public PreprocessOp {
|
||||
public:
|
||||
virtual void Init(const YAML::Node& item) {}
|
||||
virtual void Run(cv::Mat* im, ImageBlob* data);
|
||||
};
|
||||
|
||||
class NormalizeImage : public PreprocessOp {
|
||||
public:
|
||||
virtual void Init(const YAML::Node& item) {
|
||||
mean_ = item["mean"].as<std::vector<float>>();
|
||||
scale_ = item["std"].as<std::vector<float>>();
|
||||
is_scale_ = item["is_scale"].as<bool>();
|
||||
}
|
||||
|
||||
virtual void Run(cv::Mat* im, ImageBlob* data);
|
||||
|
||||
private:
|
||||
// CHW or HWC
|
||||
std::vector<float> mean_;
|
||||
std::vector<float> scale_;
|
||||
bool is_scale_;
|
||||
};
|
||||
|
||||
class Permute : public PreprocessOp {
|
||||
public:
|
||||
virtual void Init(const YAML::Node& item) {}
|
||||
virtual void Run(cv::Mat* im, ImageBlob* data);
|
||||
};
|
||||
|
||||
class Resize : public PreprocessOp {
|
||||
public:
|
||||
virtual void Init(const YAML::Node& item) {
|
||||
interp_ = item["interp"].as<int>();
|
||||
keep_ratio_ = item["keep_ratio"].as<bool>();
|
||||
target_size_ = item["target_size"].as<std::vector<int>>();
|
||||
}
|
||||
|
||||
// Compute best resize scale for x-dimension, y-dimension
|
||||
std::pair<float, float> GenerateScale(const cv::Mat& im);
|
||||
|
||||
virtual void Run(cv::Mat* im, ImageBlob* data);
|
||||
|
||||
private:
|
||||
int interp_;
|
||||
bool keep_ratio_;
|
||||
std::vector<int> target_size_;
|
||||
std::vector<int> in_net_shape_;
|
||||
};
|
||||
|
||||
class LetterBoxResize : public PreprocessOp {
|
||||
public:
|
||||
virtual void Init(const YAML::Node& item) {
|
||||
target_size_ = item["target_size"].as<std::vector<int>>();
|
||||
}
|
||||
|
||||
float GenerateScale(const cv::Mat& im);
|
||||
|
||||
virtual void Run(cv::Mat* im, ImageBlob* data);
|
||||
|
||||
private:
|
||||
std::vector<int> target_size_;
|
||||
std::vector<int> in_net_shape_;
|
||||
};
|
||||
// Models with FPN need input shape % stride == 0
|
||||
class PadStride : public PreprocessOp {
|
||||
public:
|
||||
virtual void Init(const YAML::Node& item) {
|
||||
stride_ = item["stride"].as<int>();
|
||||
}
|
||||
|
||||
virtual void Run(cv::Mat* im, ImageBlob* data);
|
||||
|
||||
private:
|
||||
int stride_;
|
||||
};
|
||||
|
||||
class Preprocessor {
|
||||
public:
|
||||
void Init(const YAML::Node& config_node) {
|
||||
// initialize image info at first
|
||||
ops_["InitInfo"] = std::make_shared<InitInfo>();
|
||||
for (const auto& item : config_node) {
|
||||
auto op_name = item["type"].as<std::string>();
|
||||
|
||||
ops_[op_name] = CreateOp(op_name);
|
||||
ops_[op_name]->Init(item);
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<PreprocessOp> CreateOp(const std::string& name) {
|
||||
if (name == "Resize") {
|
||||
return std::make_shared<Resize>();
|
||||
} else if (name == "LetterBoxResize") {
|
||||
return std::make_shared<LetterBoxResize>();
|
||||
} else if (name == "Permute") {
|
||||
return std::make_shared<Permute>();
|
||||
} else if (name == "NormalizeImage") {
|
||||
return std::make_shared<NormalizeImage>();
|
||||
} else if (name == "PadStride") {
|
||||
// use PadStride instead of PadBatch
|
||||
return std::make_shared<PadStride>();
|
||||
}
|
||||
std::cerr << "can not find function of OP: " << name
|
||||
<< " and return: nullptr" << std::endl;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void Run(cv::Mat* im, ImageBlob* data);
|
||||
|
||||
public:
|
||||
static const std::vector<std::string> RUN_ORDER;
|
||||
|
||||
private:
|
||||
std::unordered_map<std::string, std::shared_ptr<PreprocessOp>> ops_;
|
||||
};
|
||||
|
||||
} // namespace PaddleDetection
|
||||
106
paddle_detection/deploy/pptracking/cpp/include/sde_predictor.h
Normal file
106
paddle_detection/deploy/pptracking/cpp/include/sde_predictor.h
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ctime>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
|
||||
#include "paddle_inference_api.h" // NOLINT
|
||||
|
||||
#include "include/config_parser.h"
|
||||
#include "include/preprocess_op.h"
|
||||
#include "include/utils.h"
|
||||
|
||||
using namespace paddle_infer; // NOLINT
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
class SDEPredictor {
|
||||
public:
|
||||
explicit SDEPredictor(const std::string& device,
|
||||
const std::string& det_model_dir = "",
|
||||
const std::string& reid_model_dir = "",
|
||||
const double threshold = -1.,
|
||||
const std::string& run_mode = "paddle",
|
||||
const int gpu_id = 0,
|
||||
const bool use_mkldnn = false,
|
||||
const int cpu_threads = 1,
|
||||
bool trt_calib_mode = false,
|
||||
const int min_box_area = 200) {
|
||||
this->device_ = device;
|
||||
this->gpu_id_ = gpu_id;
|
||||
this->use_mkldnn_ = use_mkldnn;
|
||||
this->cpu_math_library_num_threads_ = cpu_threads;
|
||||
this->trt_calib_mode_ = trt_calib_mode;
|
||||
this->min_box_area_ = min_box_area;
|
||||
|
||||
det_config_.load_config(det_model_dir);
|
||||
this->min_subgraph_size_ = det_config_.min_subgraph_size_;
|
||||
det_preprocessor_.Init(det_config_.preprocess_info_);
|
||||
|
||||
reid_config_.load_config(reid_model_dir);
|
||||
reid_preprocessor_.Init(reid_config_.preprocess_info_);
|
||||
|
||||
LoadModel(det_model_dir, reid_model_dir, run_mode);
|
||||
this->conf_thresh_ = det_config_.conf_thresh_;
|
||||
}
|
||||
|
||||
// Load Paddle inference model
|
||||
void LoadModel(const std::string& det_model_dir,
|
||||
const std::string& reid_model_dir,
|
||||
const std::string& run_mode = "paddle");
|
||||
|
||||
// Run predictor
|
||||
void Predict(const std::vector<cv::Mat> imgs,
|
||||
const double threshold = 0.5,
|
||||
MOTResult* result = nullptr,
|
||||
std::vector<double>* times = nullptr);
|
||||
|
||||
private:
|
||||
std::string device_ = "CPU";
|
||||
float threhold = 0.5;
|
||||
int gpu_id_ = 0;
|
||||
bool use_mkldnn_ = false;
|
||||
int cpu_math_library_num_threads_ = 1;
|
||||
int min_subgraph_size_ = 3;
|
||||
bool trt_calib_mode_ = false;
|
||||
|
||||
// Preprocess image and copy data to input buffer
|
||||
void Preprocess(const cv::Mat& image_mat);
|
||||
// Postprocess result
|
||||
void Postprocess(const cv::Mat dets, const cv::Mat emb, MOTResult* result);
|
||||
|
||||
std::shared_ptr<Predictor> det_predictor_;
|
||||
std::shared_ptr<Predictor> reid_predictor_;
|
||||
Preprocessor det_preprocessor_;
|
||||
Preprocessor reid_preprocessor_;
|
||||
ImageBlob inputs_;
|
||||
std::vector<float> bbox_data_;
|
||||
std::vector<float> emb_data_;
|
||||
double threshold_;
|
||||
ConfigPaser det_config_;
|
||||
ConfigPaser reid_config_;
|
||||
float min_box_area_ = 200;
|
||||
float conf_thresh_;
|
||||
};
|
||||
|
||||
} // namespace PaddleDetection
|
||||
72
paddle_detection/deploy/pptracking/cpp/include/tracker.h
Normal file
72
paddle_detection/deploy/pptracking/cpp/include/tracker.h
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The code is based on:
|
||||
// https://github.com/CnybTseng/JDE/blob/master/platforms/common/jdetracker.h
|
||||
// Ths copyright of CnybTseng/JDE is as follows:
|
||||
// MIT License
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include "include/trajectory.h"
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
typedef std::map<int, int> Match;
|
||||
typedef std::map<int, int>::iterator MatchIterator;
|
||||
|
||||
struct Track {
|
||||
int id;
|
||||
float score;
|
||||
cv::Vec4f ltrb;
|
||||
};
|
||||
|
||||
class JDETracker {
|
||||
public:
|
||||
static JDETracker *instance(void);
|
||||
virtual bool update(const cv::Mat &dets,
|
||||
const cv::Mat &emb,
|
||||
std::vector<Track> *tracks);
|
||||
|
||||
private:
|
||||
JDETracker(void);
|
||||
virtual ~JDETracker(void) {}
|
||||
cv::Mat motion_distance(const TrajectoryPtrPool &a, const TrajectoryPool &b);
|
||||
void linear_assignment(const cv::Mat &cost,
|
||||
float cost_limit,
|
||||
Match *matches,
|
||||
std::vector<int> *mismatch_row,
|
||||
std::vector<int> *mismatch_col);
|
||||
void remove_duplicate_trajectory(TrajectoryPool *a,
|
||||
TrajectoryPool *b,
|
||||
float iou_thresh = 0.15f);
|
||||
|
||||
private:
|
||||
static JDETracker *me;
|
||||
int timestamp;
|
||||
TrajectoryPool tracked_trajectories;
|
||||
TrajectoryPool lost_trajectories;
|
||||
TrajectoryPool removed_trajectories;
|
||||
int max_lost_time;
|
||||
float lambda;
|
||||
float det_thresh;
|
||||
};
|
||||
|
||||
} // namespace PaddleDetection
|
||||
230
paddle_detection/deploy/pptracking/cpp/include/trajectory.h
Normal file
230
paddle_detection/deploy/pptracking/cpp/include/trajectory.h
Normal file
@@ -0,0 +1,230 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The code is based on:
|
||||
// https://github.com/CnybTseng/JDE/blob/master/platforms/common/trajectory.h
|
||||
// Ths copyright of CnybTseng/JDE is as follows:
|
||||
// MIT License
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
typedef enum { New = 0, Tracked = 1, Lost = 2, Removed = 3 } TrajectoryState;
|
||||
|
||||
class Trajectory;
|
||||
typedef std::vector<Trajectory> TrajectoryPool;
|
||||
typedef std::vector<Trajectory>::iterator TrajectoryPoolIterator;
|
||||
typedef std::vector<Trajectory *> TrajectoryPtrPool;
|
||||
typedef std::vector<Trajectory *>::iterator TrajectoryPtrPoolIterator;
|
||||
|
||||
class TKalmanFilter : public cv::KalmanFilter {
|
||||
public:
|
||||
TKalmanFilter(void);
|
||||
virtual ~TKalmanFilter(void) {}
|
||||
virtual void init(const cv::Mat &measurement);
|
||||
virtual const cv::Mat &predict();
|
||||
virtual const cv::Mat &correct(const cv::Mat &measurement);
|
||||
virtual void project(cv::Mat *mean, cv::Mat *covariance) const;
|
||||
|
||||
private:
|
||||
float std_weight_position;
|
||||
float std_weight_velocity;
|
||||
};
|
||||
|
||||
inline TKalmanFilter::TKalmanFilter(void) : cv::KalmanFilter(8, 4) {
|
||||
cv::KalmanFilter::transitionMatrix = cv::Mat::eye(8, 8, CV_32F);
|
||||
for (int i = 0; i < 4; ++i)
|
||||
cv::KalmanFilter::transitionMatrix.at<float>(i, i + 4) = 1;
|
||||
cv::KalmanFilter::measurementMatrix = cv::Mat::eye(4, 8, CV_32F);
|
||||
std_weight_position = 1 / 20.f;
|
||||
std_weight_velocity = 1 / 160.f;
|
||||
}
|
||||
|
||||
class Trajectory : public TKalmanFilter {
|
||||
public:
|
||||
Trajectory();
|
||||
Trajectory(const cv::Vec4f <rb, float score, const cv::Mat &embedding);
|
||||
Trajectory(const Trajectory &other);
|
||||
Trajectory &operator=(const Trajectory &rhs);
|
||||
virtual ~Trajectory(void) {}
|
||||
|
||||
static int next_id();
|
||||
virtual const cv::Mat &predict(void);
|
||||
virtual void update(Trajectory *traj,
|
||||
int timestamp,
|
||||
bool update_embedding = true);
|
||||
virtual void activate(int timestamp);
|
||||
virtual void reactivate(Trajectory *traj, int timestamp, bool newid = false);
|
||||
virtual void mark_lost(void);
|
||||
virtual void mark_removed(void);
|
||||
|
||||
friend TrajectoryPool operator+(const TrajectoryPool &a,
|
||||
const TrajectoryPool &b);
|
||||
friend TrajectoryPool operator+(const TrajectoryPool &a,
|
||||
const TrajectoryPtrPool &b);
|
||||
friend TrajectoryPool &operator+=(TrajectoryPool &a, // NOLINT
|
||||
const TrajectoryPtrPool &b);
|
||||
friend TrajectoryPool operator-(const TrajectoryPool &a,
|
||||
const TrajectoryPool &b);
|
||||
friend TrajectoryPool &operator-=(TrajectoryPool &a, // NOLINT
|
||||
const TrajectoryPool &b);
|
||||
friend TrajectoryPtrPool operator+(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPtrPool &b);
|
||||
friend TrajectoryPtrPool operator+(const TrajectoryPtrPool &a,
|
||||
TrajectoryPool *b);
|
||||
friend TrajectoryPtrPool operator-(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPtrPool &b);
|
||||
|
||||
friend cv::Mat embedding_distance(const TrajectoryPool &a,
|
||||
const TrajectoryPool &b);
|
||||
friend cv::Mat embedding_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPtrPool &b);
|
||||
friend cv::Mat embedding_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPool &b);
|
||||
|
||||
friend cv::Mat mahalanobis_distance(const TrajectoryPool &a,
|
||||
const TrajectoryPool &b);
|
||||
friend cv::Mat mahalanobis_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPtrPool &b);
|
||||
friend cv::Mat mahalanobis_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPool &b);
|
||||
|
||||
friend cv::Mat iou_distance(const TrajectoryPool &a, const TrajectoryPool &b);
|
||||
friend cv::Mat iou_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPtrPool &b);
|
||||
friend cv::Mat iou_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPool &b);
|
||||
|
||||
private:
|
||||
void update_embedding(const cv::Mat &embedding);
|
||||
|
||||
public:
|
||||
TrajectoryState state;
|
||||
cv::Vec4f ltrb;
|
||||
cv::Mat smooth_embedding;
|
||||
int id;
|
||||
bool is_activated;
|
||||
int timestamp;
|
||||
int starttime;
|
||||
float score;
|
||||
|
||||
private:
|
||||
static int count;
|
||||
cv::Vec4f xyah;
|
||||
cv::Mat current_embedding;
|
||||
float eta;
|
||||
int length;
|
||||
};
|
||||
|
||||
inline cv::Vec4f ltrb2xyah(const cv::Vec4f <rb) {
|
||||
cv::Vec4f xyah;
|
||||
xyah[0] = (ltrb[0] + ltrb[2]) * 0.5f;
|
||||
xyah[1] = (ltrb[1] + ltrb[3]) * 0.5f;
|
||||
xyah[3] = ltrb[3] - ltrb[1];
|
||||
xyah[2] = (ltrb[2] - ltrb[0]) / xyah[3];
|
||||
return xyah;
|
||||
}
|
||||
|
||||
inline Trajectory::Trajectory()
|
||||
: state(New),
|
||||
ltrb(cv::Vec4f()),
|
||||
smooth_embedding(cv::Mat()),
|
||||
id(0),
|
||||
is_activated(false),
|
||||
timestamp(0),
|
||||
starttime(0),
|
||||
score(0),
|
||||
eta(0.9),
|
||||
length(0) {}
|
||||
|
||||
inline Trajectory::Trajectory(const cv::Vec4f <rb_,
|
||||
float score_,
|
||||
const cv::Mat &embedding)
|
||||
: state(New),
|
||||
ltrb(ltrb_),
|
||||
smooth_embedding(cv::Mat()),
|
||||
id(0),
|
||||
is_activated(false),
|
||||
timestamp(0),
|
||||
starttime(0),
|
||||
score(score_),
|
||||
eta(0.9),
|
||||
length(0) {
|
||||
xyah = ltrb2xyah(ltrb);
|
||||
update_embedding(embedding);
|
||||
}
|
||||
|
||||
inline Trajectory::Trajectory(const Trajectory &other)
|
||||
: state(other.state),
|
||||
ltrb(other.ltrb),
|
||||
id(other.id),
|
||||
is_activated(other.is_activated),
|
||||
timestamp(other.timestamp),
|
||||
starttime(other.starttime),
|
||||
xyah(other.xyah),
|
||||
score(other.score),
|
||||
eta(other.eta),
|
||||
length(other.length) {
|
||||
other.smooth_embedding.copyTo(smooth_embedding);
|
||||
other.current_embedding.copyTo(current_embedding);
|
||||
// copy state in KalmanFilter
|
||||
|
||||
other.statePre.copyTo(cv::KalmanFilter::statePre);
|
||||
other.statePost.copyTo(cv::KalmanFilter::statePost);
|
||||
other.errorCovPre.copyTo(cv::KalmanFilter::errorCovPre);
|
||||
other.errorCovPost.copyTo(cv::KalmanFilter::errorCovPost);
|
||||
}
|
||||
|
||||
inline Trajectory &Trajectory::operator=(const Trajectory &rhs) {
|
||||
this->state = rhs.state;
|
||||
this->ltrb = rhs.ltrb;
|
||||
rhs.smooth_embedding.copyTo(this->smooth_embedding);
|
||||
this->id = rhs.id;
|
||||
this->is_activated = rhs.is_activated;
|
||||
this->timestamp = rhs.timestamp;
|
||||
this->starttime = rhs.starttime;
|
||||
this->xyah = rhs.xyah;
|
||||
this->score = rhs.score;
|
||||
rhs.current_embedding.copyTo(this->current_embedding);
|
||||
this->eta = rhs.eta;
|
||||
this->length = rhs.length;
|
||||
|
||||
// copy state in KalmanFilter
|
||||
|
||||
rhs.statePre.copyTo(cv::KalmanFilter::statePre);
|
||||
rhs.statePost.copyTo(cv::KalmanFilter::statePost);
|
||||
rhs.errorCovPre.copyTo(cv::KalmanFilter::errorCovPre);
|
||||
rhs.errorCovPost.copyTo(cv::KalmanFilter::errorCovPost);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline int Trajectory::next_id() {
|
||||
++count;
|
||||
return count;
|
||||
}
|
||||
|
||||
inline void Trajectory::mark_lost(void) { state = Lost; }
|
||||
|
||||
inline void Trajectory::mark_removed(void) { state = Removed; }
|
||||
|
||||
} // namespace PaddleDetection
|
||||
44
paddle_detection/deploy/pptracking/cpp/include/utils.h
Normal file
44
paddle_detection/deploy/pptracking/cpp/include/utils.h
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <ctime>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "include/tracker.h"
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
struct Rect {
|
||||
float left;
|
||||
float top;
|
||||
float right;
|
||||
float bottom;
|
||||
};
|
||||
|
||||
struct MOTTrack {
|
||||
int ids;
|
||||
float score;
|
||||
Rect rects;
|
||||
int class_id = -1;
|
||||
};
|
||||
|
||||
typedef std::vector<MOTTrack> MOTResult;
|
||||
|
||||
} // namespace PaddleDetection
|
||||
78
paddle_detection/deploy/pptracking/cpp/scripts/build.sh
Normal file
78
paddle_detection/deploy/pptracking/cpp/scripts/build.sh
Normal file
@@ -0,0 +1,78 @@
|
||||
# 是否使用GPU(即是否使用 CUDA)
|
||||
WITH_GPU=OFF
|
||||
|
||||
# 是否使用MKL or openblas,TX2需要设置为OFF
|
||||
WITH_MKL=ON
|
||||
|
||||
# 是否集成 TensorRT(仅WITH_GPU=ON 有效)
|
||||
WITH_TENSORRT=OFF
|
||||
|
||||
# paddle 预测库lib名称,由于不同平台不同版本预测库lib名称不同,请查看所下载的预测库中`paddle_inference/lib/`文件夹下`lib`的名称
|
||||
PADDLE_LIB_NAME=libpaddle_inference
|
||||
|
||||
# TensorRT 的include路径
|
||||
TENSORRT_INC_DIR=/path/to/tensorrt/include
|
||||
|
||||
# TensorRT 的lib路径
|
||||
TENSORRT_LIB_DIR=/path/to/tensorrt/lib
|
||||
|
||||
# Paddle 预测库路径
|
||||
PADDLE_DIR=/path/to/paddle_inference
|
||||
|
||||
# CUDA 的 lib 路径
|
||||
CUDA_LIB=/path/to/cuda/lib
|
||||
|
||||
# CUDNN 的 lib 路径
|
||||
CUDNN_LIB=/path/to/cudnn/lib
|
||||
|
||||
MACHINE_TYPE=`uname -m`
|
||||
echo "MACHINE_TYPE: "${MACHINE_TYPE}
|
||||
|
||||
|
||||
if [ "$MACHINE_TYPE" = "x86_64" ]
|
||||
then
|
||||
echo "set OPENCV_DIR for x86_64"
|
||||
# linux系统通过以下命令下载预编译的opencv
|
||||
mkdir -p $(pwd)/deps && cd $(pwd)/deps
|
||||
wget -c https://paddledet.bj.bcebos.com/data/opencv-3.4.16_gcc8.2_ffmpeg.tar.gz
|
||||
tar -xvf opencv-3.4.16_gcc8.2_ffmpeg.tar.gz && cd ..
|
||||
|
||||
# set OPENCV_DIR
|
||||
OPENCV_DIR=$(pwd)/deps/opencv-3.4.16_gcc8.2_ffmpeg
|
||||
|
||||
elif [ "$MACHINE_TYPE" = "aarch64" ]
|
||||
then
|
||||
echo "set OPENCV_DIR for aarch64"
|
||||
# TX2平台通过以下命令下载预编译的opencv
|
||||
mkdir -p $(pwd)/deps && cd $(pwd)/deps
|
||||
wget -c https://bj.bcebos.com/v1/paddledet/data/TX2_JetPack4.3_opencv_3.4.6_gcc7.5.0.tar.gz
|
||||
tar -xvf TX2_JetPack4.3_opencv_3.4.6_gcc7.5.0.tar.gz && cd ..
|
||||
|
||||
# set OPENCV_DIR
|
||||
OPENCV_DIR=$(pwd)/deps/TX2_JetPack4.3_opencv_3.4.6_gcc7.5.0/
|
||||
|
||||
else
|
||||
echo "Please set OPENCV_DIR manually"
|
||||
fi
|
||||
|
||||
echo "OPENCV_DIR: "$OPENCV_DIR
|
||||
|
||||
# 以下无需改动
|
||||
rm -rf build
|
||||
mkdir -p build
|
||||
cd build
|
||||
cmake .. \
|
||||
-DWITH_GPU=${WITH_GPU} \
|
||||
-DWITH_MKL=${WITH_MKL} \
|
||||
-DWITH_TENSORRT=${WITH_TENSORRT} \
|
||||
-DTENSORRT_LIB_DIR=${TENSORRT_LIB_DIR} \
|
||||
-DTENSORRT_INC_DIR=${TENSORRT_INC_DIR} \
|
||||
-DPADDLE_DIR=${PADDLE_DIR} \
|
||||
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
|
||||
-DCUDA_LIB=${CUDA_LIB} \
|
||||
-DCUDNN_LIB=${CUDNN_LIB} \
|
||||
-DOPENCV_DIR=${OPENCV_DIR} \
|
||||
-DPADDLE_LIB_NAME=${PADDLE_LIB_NAME} \
|
||||
|
||||
make
|
||||
echo "make finished!"
|
||||
235
paddle_detection/deploy/pptracking/cpp/src/jde_predictor.cc
Normal file
235
paddle_detection/deploy/pptracking/cpp/src/jde_predictor.cc
Normal file
@@ -0,0 +1,235 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <sstream>
|
||||
// for setprecision
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include "include/jde_predictor.h"
|
||||
|
||||
using namespace paddle_infer; // NOLINT
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
// Load Model and create model predictor
|
||||
void JDEPredictor::LoadModel(const std::string& model_dir,
|
||||
const std::string& run_mode) {
|
||||
paddle_infer::Config config;
|
||||
std::string prog_file = model_dir + OS_PATH_SEP + "model.pdmodel";
|
||||
std::string params_file = model_dir + OS_PATH_SEP + "model.pdiparams";
|
||||
config.SetModel(prog_file, params_file);
|
||||
if (this->device_ == "GPU") {
|
||||
config.EnableUseGpu(200, this->gpu_id_);
|
||||
config.SwitchIrOptim(true);
|
||||
// use tensorrt
|
||||
if (run_mode != "paddle") {
|
||||
auto precision = paddle_infer::Config::Precision::kFloat32;
|
||||
if (run_mode == "trt_fp32") {
|
||||
precision = paddle_infer::Config::Precision::kFloat32;
|
||||
} else if (run_mode == "trt_fp16") {
|
||||
precision = paddle_infer::Config::Precision::kHalf;
|
||||
} else if (run_mode == "trt_int8") {
|
||||
precision = paddle_infer::Config::Precision::kInt8;
|
||||
} else {
|
||||
printf(
|
||||
"run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or "
|
||||
"'trt_int8'");
|
||||
}
|
||||
// set tensorrt
|
||||
config.EnableTensorRtEngine(1 << 30,
|
||||
1,
|
||||
this->min_subgraph_size_,
|
||||
precision,
|
||||
false,
|
||||
this->trt_calib_mode_);
|
||||
}
|
||||
} else if (this->device_ == "XPU") {
|
||||
config.EnableXpu(10 * 1024 * 1024);
|
||||
} else {
|
||||
config.DisableGpu();
|
||||
if (this->use_mkldnn_) {
|
||||
config.EnableMKLDNN();
|
||||
// cache 10 different shapes for mkldnn to avoid memory leak
|
||||
config.SetMkldnnCacheCapacity(10);
|
||||
}
|
||||
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
|
||||
}
|
||||
config.SwitchUseFeedFetchOps(false);
|
||||
config.SwitchIrOptim(true);
|
||||
config.DisableGlogInfo();
|
||||
// Memory optimization
|
||||
config.EnableMemoryOptim();
|
||||
predictor_ = std::move(CreatePredictor(config));
|
||||
}
|
||||
|
||||
void FilterDets(const float conf_thresh,
|
||||
const cv::Mat dets,
|
||||
std::vector<int>* index) {
|
||||
for (int i = 0; i < dets.rows; ++i) {
|
||||
float score = *dets.ptr<float>(i, 4);
|
||||
if (score > conf_thresh) {
|
||||
index->push_back(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void JDEPredictor::Preprocess(const cv::Mat& ori_im) {
|
||||
// Clone the image : keep the original mat for postprocess
|
||||
cv::Mat im = ori_im.clone();
|
||||
preprocessor_.Run(&im, &inputs_);
|
||||
}
|
||||
|
||||
void JDEPredictor::Postprocess(const cv::Mat dets,
|
||||
const cv::Mat emb,
|
||||
MOTResult* result) {
|
||||
result->clear();
|
||||
std::vector<Track> tracks;
|
||||
std::vector<int> valid;
|
||||
FilterDets(conf_thresh_, dets, &valid);
|
||||
cv::Mat new_dets, new_emb;
|
||||
for (int i = 0; i < valid.size(); ++i) {
|
||||
new_dets.push_back(dets.row(valid[i]));
|
||||
new_emb.push_back(emb.row(valid[i]));
|
||||
}
|
||||
JDETracker::instance()->update(new_dets, new_emb, &tracks);
|
||||
if (tracks.size() == 0) {
|
||||
MOTTrack mot_track;
|
||||
Rect ret = {*dets.ptr<float>(0, 0),
|
||||
*dets.ptr<float>(0, 1),
|
||||
*dets.ptr<float>(0, 2),
|
||||
*dets.ptr<float>(0, 3)};
|
||||
mot_track.ids = 1;
|
||||
mot_track.score = *dets.ptr<float>(0, 4);
|
||||
mot_track.rects = ret;
|
||||
result->push_back(mot_track);
|
||||
} else {
|
||||
std::vector<Track>::iterator titer;
|
||||
for (titer = tracks.begin(); titer != tracks.end(); ++titer) {
|
||||
if (titer->score < threshold_) {
|
||||
continue;
|
||||
} else {
|
||||
float w = titer->ltrb[2] - titer->ltrb[0];
|
||||
float h = titer->ltrb[3] - titer->ltrb[1];
|
||||
bool vertical = w / h > 1.6;
|
||||
float area = w * h;
|
||||
if (area > min_box_area_ && !vertical) {
|
||||
MOTTrack mot_track;
|
||||
Rect ret = {
|
||||
titer->ltrb[0], titer->ltrb[1], titer->ltrb[2], titer->ltrb[3]};
|
||||
mot_track.rects = ret;
|
||||
mot_track.score = titer->score;
|
||||
mot_track.ids = titer->id;
|
||||
result->push_back(mot_track);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void JDEPredictor::Predict(const std::vector<cv::Mat> imgs,
|
||||
const double threshold,
|
||||
MOTResult* result,
|
||||
std::vector<double>* times) {
|
||||
auto preprocess_start = std::chrono::steady_clock::now();
|
||||
int batch_size = imgs.size();
|
||||
|
||||
// in_data_batch
|
||||
std::vector<float> in_data_all;
|
||||
std::vector<float> im_shape_all(batch_size * 2);
|
||||
std::vector<float> scale_factor_all(batch_size * 2);
|
||||
|
||||
// Preprocess image
|
||||
for (int bs_idx = 0; bs_idx < batch_size; bs_idx++) {
|
||||
cv::Mat im = imgs.at(bs_idx);
|
||||
Preprocess(im);
|
||||
im_shape_all[bs_idx * 2] = inputs_.im_shape_[0];
|
||||
im_shape_all[bs_idx * 2 + 1] = inputs_.im_shape_[1];
|
||||
|
||||
scale_factor_all[bs_idx * 2] = inputs_.scale_factor_[0];
|
||||
scale_factor_all[bs_idx * 2 + 1] = inputs_.scale_factor_[1];
|
||||
|
||||
in_data_all.insert(
|
||||
in_data_all.end(), inputs_.im_data_.begin(), inputs_.im_data_.end());
|
||||
}
|
||||
|
||||
// Prepare input tensor
|
||||
auto input_names = predictor_->GetInputNames();
|
||||
for (const auto& tensor_name : input_names) {
|
||||
auto in_tensor = predictor_->GetInputHandle(tensor_name);
|
||||
if (tensor_name == "image") {
|
||||
int rh = inputs_.in_net_shape_[0];
|
||||
int rw = inputs_.in_net_shape_[1];
|
||||
in_tensor->Reshape({batch_size, 3, rh, rw});
|
||||
in_tensor->CopyFromCpu(in_data_all.data());
|
||||
} else if (tensor_name == "im_shape") {
|
||||
in_tensor->Reshape({batch_size, 2});
|
||||
in_tensor->CopyFromCpu(im_shape_all.data());
|
||||
} else if (tensor_name == "scale_factor") {
|
||||
in_tensor->Reshape({batch_size, 2});
|
||||
in_tensor->CopyFromCpu(scale_factor_all.data());
|
||||
}
|
||||
}
|
||||
|
||||
auto preprocess_end = std::chrono::steady_clock::now();
|
||||
std::vector<int> bbox_shape;
|
||||
std::vector<int> emb_shape;
|
||||
|
||||
// Run predictor
|
||||
auto inference_start = std::chrono::steady_clock::now();
|
||||
predictor_->Run();
|
||||
// Get output tensor
|
||||
auto output_names = predictor_->GetOutputNames();
|
||||
auto bbox_tensor = predictor_->GetOutputHandle(output_names[0]);
|
||||
bbox_shape = bbox_tensor->shape();
|
||||
auto emb_tensor = predictor_->GetOutputHandle(output_names[1]);
|
||||
emb_shape = emb_tensor->shape();
|
||||
// Calculate bbox length
|
||||
int bbox_size = 1;
|
||||
for (int j = 0; j < bbox_shape.size(); ++j) {
|
||||
bbox_size *= bbox_shape[j];
|
||||
}
|
||||
// Calculate emb length
|
||||
int emb_size = 1;
|
||||
for (int j = 0; j < emb_shape.size(); ++j) {
|
||||
emb_size *= emb_shape[j];
|
||||
}
|
||||
|
||||
bbox_data_.resize(bbox_size);
|
||||
bbox_tensor->CopyToCpu(bbox_data_.data());
|
||||
|
||||
emb_data_.resize(emb_size);
|
||||
emb_tensor->CopyToCpu(emb_data_.data());
|
||||
auto inference_end = std::chrono::steady_clock::now();
|
||||
|
||||
// Postprocessing result
|
||||
auto postprocess_start = std::chrono::steady_clock::now();
|
||||
result->clear();
|
||||
|
||||
cv::Mat dets(bbox_shape[0], 6, CV_32FC1, bbox_data_.data());
|
||||
cv::Mat emb(bbox_shape[0], emb_shape[1], CV_32FC1, emb_data_.data());
|
||||
|
||||
Postprocess(dets, emb, result);
|
||||
|
||||
auto postprocess_end = std::chrono::steady_clock::now();
|
||||
|
||||
std::chrono::duration<float> preprocess_diff =
|
||||
preprocess_end - preprocess_start;
|
||||
(*times)[0] += static_cast<double>(preprocess_diff.count() * 1000);
|
||||
std::chrono::duration<float> inference_diff = inference_end - inference_start;
|
||||
(*times)[1] += static_cast<double>(inference_diff.count() * 1000);
|
||||
std::chrono::duration<float> postprocess_diff =
|
||||
postprocess_end - postprocess_start;
|
||||
(*times)[2] += static_cast<double>(postprocess_diff.count() * 1000);
|
||||
}
|
||||
|
||||
} // namespace PaddleDetection
|
||||
409
paddle_detection/deploy/pptracking/cpp/src/lapjv.cpp
Normal file
409
paddle_detection/deploy/pptracking/cpp/src/lapjv.cpp
Normal file
@@ -0,0 +1,409 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The code is based on:
|
||||
// https://github.com/gatagat/lap/blob/master/lap/lapjv.cpp
|
||||
// Ths copyright of gatagat/lap is as follows:
|
||||
// MIT License
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "include/lapjv.h"
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
/** Column-reduction and reduction transfer for a dense cost matrix.
|
||||
*/
|
||||
int _ccrrt_dense(
|
||||
const int n, float *cost[], int *free_rows, int *x, int *y, float *v) {
|
||||
int n_free_rows;
|
||||
bool *unique;
|
||||
|
||||
for (int i = 0; i < n; i++) {
|
||||
x[i] = -1;
|
||||
v[i] = LARGE;
|
||||
y[i] = 0;
|
||||
}
|
||||
for (int i = 0; i < n; i++) {
|
||||
for (int j = 0; j < n; j++) {
|
||||
const float c = cost[i][j];
|
||||
if (c < v[j]) {
|
||||
v[j] = c;
|
||||
y[j] = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
NEW(unique, bool, n);
|
||||
memset(unique, TRUE, n);
|
||||
{
|
||||
int j = n;
|
||||
do {
|
||||
j--;
|
||||
const int i = y[j];
|
||||
if (x[i] < 0) {
|
||||
x[i] = j;
|
||||
} else {
|
||||
unique[i] = FALSE;
|
||||
y[j] = -1;
|
||||
}
|
||||
} while (j > 0);
|
||||
}
|
||||
n_free_rows = 0;
|
||||
for (int i = 0; i < n; i++) {
|
||||
if (x[i] < 0) {
|
||||
free_rows[n_free_rows++] = i;
|
||||
} else if (unique[i]) {
|
||||
const int j = x[i];
|
||||
float min = LARGE;
|
||||
for (int j2 = 0; j2 < n; j2++) {
|
||||
if (j2 == static_cast<int>(j)) {
|
||||
continue;
|
||||
}
|
||||
const float c = cost[i][j2] - v[j2];
|
||||
if (c < min) {
|
||||
min = c;
|
||||
}
|
||||
}
|
||||
v[j] -= min;
|
||||
}
|
||||
}
|
||||
FREE(unique);
|
||||
return n_free_rows;
|
||||
}
|
||||
|
||||
/** Augmenting row reduction for a dense cost matrix.
|
||||
*/
|
||||
int _carr_dense(const int n,
|
||||
float *cost[],
|
||||
const int n_free_rows,
|
||||
int *free_rows,
|
||||
int *x,
|
||||
int *y,
|
||||
float *v) {
|
||||
int current = 0;
|
||||
int new_free_rows = 0;
|
||||
int rr_cnt = 0;
|
||||
while (current < n_free_rows) {
|
||||
int i0;
|
||||
int j1, j2;
|
||||
float v1, v2, v1_new;
|
||||
bool v1_lowers;
|
||||
|
||||
rr_cnt++;
|
||||
const int free_i = free_rows[current++];
|
||||
j1 = 0;
|
||||
v1 = cost[free_i][0] - v[0];
|
||||
j2 = -1;
|
||||
v2 = LARGE;
|
||||
for (int j = 1; j < n; j++) {
|
||||
const float c = cost[free_i][j] - v[j];
|
||||
if (c < v2) {
|
||||
if (c >= v1) {
|
||||
v2 = c;
|
||||
j2 = j;
|
||||
} else {
|
||||
v2 = v1;
|
||||
v1 = c;
|
||||
j2 = j1;
|
||||
j1 = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
i0 = y[j1];
|
||||
v1_new = v[j1] - (v2 - v1);
|
||||
v1_lowers = v1_new < v[j1];
|
||||
if (rr_cnt < current * n) {
|
||||
if (v1_lowers) {
|
||||
v[j1] = v1_new;
|
||||
} else if (i0 >= 0 && j2 >= 0) {
|
||||
j1 = j2;
|
||||
i0 = y[j2];
|
||||
}
|
||||
if (i0 >= 0) {
|
||||
if (v1_lowers) {
|
||||
free_rows[--current] = i0;
|
||||
} else {
|
||||
free_rows[new_free_rows++] = i0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (i0 >= 0) {
|
||||
free_rows[new_free_rows++] = i0;
|
||||
}
|
||||
}
|
||||
x[free_i] = j1;
|
||||
y[j1] = free_i;
|
||||
}
|
||||
return new_free_rows;
|
||||
}
|
||||
|
||||
/** Find columns with minimum d[j] and put them on the SCAN list.
|
||||
*/
|
||||
int _find_dense(const int n, int lo, float *d, int *cols, int *y) {
|
||||
int hi = lo + 1;
|
||||
float mind = d[cols[lo]];
|
||||
for (int k = hi; k < n; k++) {
|
||||
int j = cols[k];
|
||||
if (d[j] <= mind) {
|
||||
if (d[j] < mind) {
|
||||
hi = lo;
|
||||
mind = d[j];
|
||||
}
|
||||
cols[k] = cols[hi];
|
||||
cols[hi++] = j;
|
||||
}
|
||||
}
|
||||
return hi;
|
||||
}
|
||||
|
||||
// Scan all columns in TODO starting from arbitrary column in SCAN
|
||||
// and try to decrease d of the TODO columns using the SCAN column.
|
||||
int _scan_dense(const int n,
|
||||
float *cost[],
|
||||
int *plo,
|
||||
int *phi,
|
||||
float *d,
|
||||
int *cols,
|
||||
int *pred,
|
||||
int *y,
|
||||
float *v) {
|
||||
int lo = *plo;
|
||||
int hi = *phi;
|
||||
float h, cred_ij;
|
||||
|
||||
while (lo != hi) {
|
||||
int j = cols[lo++];
|
||||
const int i = y[j];
|
||||
const float mind = d[j];
|
||||
h = cost[i][j] - v[j] - mind;
|
||||
// For all columns in TODO
|
||||
for (int k = hi; k < n; k++) {
|
||||
j = cols[k];
|
||||
cred_ij = cost[i][j] - v[j] - h;
|
||||
if (cred_ij < d[j]) {
|
||||
d[j] = cred_ij;
|
||||
pred[j] = i;
|
||||
if (cred_ij == mind) {
|
||||
if (y[j] < 0) {
|
||||
return j;
|
||||
}
|
||||
cols[k] = cols[hi];
|
||||
cols[hi++] = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*plo = lo;
|
||||
*phi = hi;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/** Single iteration of modified Dijkstra shortest path algorithm as explained
|
||||
* in the JV paper.
|
||||
*
|
||||
* This is a dense matrix version.
|
||||
*
|
||||
* \return The closest free column index.
|
||||
*/
|
||||
int find_path_dense(const int n,
|
||||
float *cost[],
|
||||
const int start_i,
|
||||
int *y,
|
||||
float *v,
|
||||
int *pred) {
|
||||
int lo = 0, hi = 0;
|
||||
int final_j = -1;
|
||||
int n_ready = 0;
|
||||
int *cols;
|
||||
float *d;
|
||||
|
||||
NEW(cols, int, n);
|
||||
NEW(d, float, n);
|
||||
|
||||
for (int i = 0; i < n; i++) {
|
||||
cols[i] = i;
|
||||
pred[i] = start_i;
|
||||
d[i] = cost[start_i][i] - v[i];
|
||||
}
|
||||
while (final_j == -1) {
|
||||
// No columns left on the SCAN list.
|
||||
if (lo == hi) {
|
||||
n_ready = lo;
|
||||
hi = _find_dense(n, lo, d, cols, y);
|
||||
for (int k = lo; k < hi; k++) {
|
||||
const int j = cols[k];
|
||||
if (y[j] < 0) {
|
||||
final_j = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (final_j == -1) {
|
||||
final_j = _scan_dense(n, cost, &lo, &hi, d, cols, pred, y, v);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
const float mind = d[cols[lo]];
|
||||
for (int k = 0; k < n_ready; k++) {
|
||||
const int j = cols[k];
|
||||
v[j] += d[j] - mind;
|
||||
}
|
||||
}
|
||||
|
||||
FREE(cols);
|
||||
FREE(d);
|
||||
|
||||
return final_j;
|
||||
}
|
||||
|
||||
/** Augment for a dense cost matrix.
|
||||
*/
|
||||
int _ca_dense(const int n,
|
||||
float *cost[],
|
||||
const int n_free_rows,
|
||||
int *free_rows,
|
||||
int *x,
|
||||
int *y,
|
||||
float *v) {
|
||||
int *pred;
|
||||
|
||||
NEW(pred, int, n);
|
||||
|
||||
for (int *pfree_i = free_rows; pfree_i < free_rows + n_free_rows; pfree_i++) {
|
||||
int i = -1, j;
|
||||
int k = 0;
|
||||
|
||||
j = find_path_dense(n, cost, *pfree_i, y, v, pred);
|
||||
while (i != *pfree_i) {
|
||||
i = pred[j];
|
||||
y[j] = i;
|
||||
SWAP_INDICES(j, x[i]);
|
||||
k++;
|
||||
}
|
||||
}
|
||||
FREE(pred);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Solve dense sparse LAP.
|
||||
*/
|
||||
int lapjv_internal(const cv::Mat &cost,
|
||||
const bool extend_cost,
|
||||
const float cost_limit,
|
||||
int *x,
|
||||
int *y) {
|
||||
int n_rows = cost.rows;
|
||||
int n_cols = cost.cols;
|
||||
int n;
|
||||
if (n_rows == n_cols) {
|
||||
n = n_rows;
|
||||
} else if (!extend_cost) {
|
||||
throw std::invalid_argument(
|
||||
"Square cost array expected. If cost is intentionally non-square, pass "
|
||||
"extend_cost=True.");
|
||||
}
|
||||
|
||||
// Get extend cost
|
||||
if (extend_cost || cost_limit < LARGE) {
|
||||
n = n_rows + n_cols;
|
||||
}
|
||||
cv::Mat cost_expand(n, n, CV_32F);
|
||||
float expand_value;
|
||||
if (cost_limit < LARGE) {
|
||||
expand_value = cost_limit / 2;
|
||||
} else {
|
||||
double max_v;
|
||||
minMaxLoc(cost, nullptr, &max_v);
|
||||
expand_value = static_cast<float>(max_v) + 1.;
|
||||
}
|
||||
|
||||
for (int i = 0; i < n; ++i) {
|
||||
for (int j = 0; j < n; ++j) {
|
||||
cost_expand.at<float>(i, j) = expand_value;
|
||||
if (i >= n_rows && j >= n_cols) {
|
||||
cost_expand.at<float>(i, j) = 0;
|
||||
} else if (i < n_rows && j < n_cols) {
|
||||
cost_expand.at<float>(i, j) = cost.at<float>(i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert Mat to pointer array
|
||||
float **cost_ptr;
|
||||
NEW(cost_ptr, float *, n);
|
||||
for (int i = 0; i < n; ++i) {
|
||||
NEW(cost_ptr[i], float, n);
|
||||
}
|
||||
for (int i = 0; i < n; ++i) {
|
||||
for (int j = 0; j < n; ++j) {
|
||||
cost_ptr[i][j] = cost_expand.at<float>(i, j);
|
||||
}
|
||||
}
|
||||
|
||||
int ret;
|
||||
int *free_rows;
|
||||
float *v;
|
||||
int *x_c;
|
||||
int *y_c;
|
||||
|
||||
NEW(free_rows, int, n);
|
||||
NEW(v, float, n);
|
||||
NEW(x_c, int, n);
|
||||
NEW(y_c, int, n);
|
||||
|
||||
ret = _ccrrt_dense(n, cost_ptr, free_rows, x_c, y_c, v);
|
||||
int i = 0;
|
||||
while (ret > 0 && i < 2) {
|
||||
ret = _carr_dense(n, cost_ptr, ret, free_rows, x_c, y_c, v);
|
||||
i++;
|
||||
}
|
||||
if (ret > 0) {
|
||||
ret = _ca_dense(n, cost_ptr, ret, free_rows, x_c, y_c, v);
|
||||
}
|
||||
FREE(v);
|
||||
FREE(free_rows);
|
||||
for (int i = 0; i < n; ++i) {
|
||||
FREE(cost_ptr[i]);
|
||||
}
|
||||
FREE(cost_ptr);
|
||||
if (ret != 0) {
|
||||
if (ret == -1) {
|
||||
throw "Out of memory.";
|
||||
}
|
||||
throw "Unknown error (lapjv_internal)";
|
||||
}
|
||||
// Get output of x, y, opt
|
||||
for (int i = 0; i < n; ++i) {
|
||||
if (i < n_rows) {
|
||||
x[i] = x_c[i];
|
||||
if (x[i] >= n_cols) {
|
||||
x[i] = -1;
|
||||
}
|
||||
}
|
||||
if (i < n_cols) {
|
||||
y[i] = y_c[i];
|
||||
if (y[i] >= n_rows) {
|
||||
y[i] = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FREE(x_c);
|
||||
FREE(y_c);
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // namespace PaddleDetection
|
||||
172
paddle_detection/deploy/pptracking/cpp/src/main.cc
Normal file
172
paddle_detection/deploy/pptracking/cpp/src/main.cc
Normal file
@@ -0,0 +1,172 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include <math.h>
|
||||
#include <sys/types.h>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <direct.h>
|
||||
#include <io.h>
|
||||
#else
|
||||
#include <stdarg.h>
|
||||
#include <sys/stat.h>
|
||||
#endif
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
#include "include/pipeline.h"
|
||||
|
||||
DEFINE_string(video_file, "", "Path of input video.");
|
||||
DEFINE_string(video_other_file,
|
||||
"",
|
||||
"Path of other input video used for MTMCT.");
|
||||
DEFINE_string(device,
|
||||
"CPU",
|
||||
"Choose the device you want to run, it can be: CPU/GPU/XPU, "
|
||||
"default is CPU.");
|
||||
DEFINE_double(threshold, 0.5, "Threshold of score.");
|
||||
DEFINE_string(output_dir, "output", "Directory of output visualization files.");
|
||||
DEFINE_string(run_mode,
|
||||
"paddle",
|
||||
"Mode of running(paddle/trt_fp32/trt_fp16/trt_int8)");
|
||||
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute");
|
||||
DEFINE_bool(use_mkldnn, false, "Whether use mkldnn with CPU");
|
||||
DEFINE_int32(cpu_threads, 1, "Num of threads with CPU");
|
||||
DEFINE_bool(trt_calib_mode,
|
||||
false,
|
||||
"If the model is produced by TRT offline quantitative calibration, "
|
||||
"trt_calib_mode need to set True");
|
||||
DEFINE_bool(tiny_obj, false, "Whether tracking tiny object");
|
||||
DEFINE_bool(do_entrance_counting,
|
||||
false,
|
||||
"Whether counting the numbers of identifiers entering "
|
||||
"or getting out from the entrance.");
|
||||
DEFINE_int32(secs_interval, 10, "The seconds interval to count after tracking");
|
||||
DEFINE_bool(save_result, false, "Whether saving result after tracking");
|
||||
DEFINE_string(
|
||||
scene,
|
||||
"",
|
||||
"scene of tracking system, it can be : pedestrian/vehicle/multiclass");
|
||||
DEFINE_bool(is_mtmct, false, "Whether use multi-target multi-camera tracking");
|
||||
DEFINE_string(track_model_dir, "", "Path of tracking model");
|
||||
DEFINE_string(det_model_dir, "", "Path of detection model");
|
||||
DEFINE_string(reid_model_dir, "", "Path of reid model");
|
||||
|
||||
static std::string DirName(const std::string& filepath) {
|
||||
auto pos = filepath.rfind(OS_PATH_SEP);
|
||||
if (pos == std::string::npos) {
|
||||
return "";
|
||||
}
|
||||
return filepath.substr(0, pos);
|
||||
}
|
||||
|
||||
static bool PathExists(const std::string& path) {
|
||||
#ifdef _WIN32
|
||||
struct _stat buffer;
|
||||
return (_stat(path.c_str(), &buffer) == 0);
|
||||
#else
|
||||
struct stat buffer;
|
||||
return (stat(path.c_str(), &buffer) == 0);
|
||||
#endif // !_WIN32
|
||||
}
|
||||
|
||||
static void MkDir(const std::string& path) {
|
||||
if (PathExists(path)) return;
|
||||
int ret = 0;
|
||||
#ifdef _WIN32
|
||||
ret = _mkdir(path.c_str());
|
||||
#else
|
||||
ret = mkdir(path.c_str(), 0755);
|
||||
#endif // !_WIN32
|
||||
if (ret != 0) {
|
||||
std::string path_error(path);
|
||||
path_error += " mkdir failed!";
|
||||
throw std::runtime_error(path_error);
|
||||
}
|
||||
}
|
||||
|
||||
static void MkDirs(const std::string& path) {
|
||||
if (path.empty()) return;
|
||||
if (PathExists(path)) return;
|
||||
|
||||
MkDirs(DirName(path));
|
||||
MkDir(path);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// Parsing command-line
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
bool has_model_dir =
|
||||
!(FLAGS_track_model_dir.empty() && FLAGS_det_model_dir.empty() &&
|
||||
FLAGS_reid_model_dir.empty());
|
||||
if (FLAGS_video_file.empty() || (FLAGS_scene.empty() && !has_model_dir)) {
|
||||
LOG(ERROR) << "Usage: \n"
|
||||
<< "1. ./main -video_file=/PATH/TO/INPUT/IMAGE/ "
|
||||
<< "-scene=pedestrian/vehicle/multiclass\n"
|
||||
<< "2. ./main -video_file=/PATH/TO/INPUT/IMAGE/ "
|
||||
<< "-track_model_dir=/PATH/TO/MODEL_DIR" << std::endl;
|
||||
|
||||
return -1;
|
||||
}
|
||||
if (!(FLAGS_run_mode == "paddle" || FLAGS_run_mode == "trt_fp32" ||
|
||||
FLAGS_run_mode == "trt_fp16" || FLAGS_run_mode == "trt_int8")) {
|
||||
LOG(ERROR)
|
||||
<< "run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or 'trt_int8'.";
|
||||
return -1;
|
||||
}
|
||||
transform(FLAGS_device.begin(),
|
||||
FLAGS_device.end(),
|
||||
FLAGS_device.begin(),
|
||||
::toupper);
|
||||
if (!(FLAGS_device == "CPU" || FLAGS_device == "GPU" ||
|
||||
FLAGS_device == "XPU")) {
|
||||
LOG(ERROR) << "device should be 'CPU', 'GPU' or 'XPU'.";
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!PathExists(FLAGS_output_dir)) {
|
||||
MkDirs(FLAGS_output_dir);
|
||||
}
|
||||
|
||||
PaddleDetection::Pipeline pipeline(FLAGS_device,
|
||||
FLAGS_threshold,
|
||||
FLAGS_output_dir,
|
||||
FLAGS_run_mode,
|
||||
FLAGS_gpu_id,
|
||||
FLAGS_use_mkldnn,
|
||||
FLAGS_cpu_threads,
|
||||
FLAGS_trt_calib_mode,
|
||||
FLAGS_do_entrance_counting,
|
||||
FLAGS_save_result,
|
||||
FLAGS_scene,
|
||||
FLAGS_tiny_obj,
|
||||
FLAGS_is_mtmct,
|
||||
FLAGS_secs_interval,
|
||||
FLAGS_track_model_dir,
|
||||
FLAGS_det_model_dir,
|
||||
FLAGS_reid_model_dir);
|
||||
|
||||
pipeline.SetInput(FLAGS_video_file);
|
||||
if (!FLAGS_video_other_file.empty()) {
|
||||
pipeline.SetInput(FLAGS_video_other_file);
|
||||
}
|
||||
pipeline.Run();
|
||||
return 0;
|
||||
}
|
||||
367
paddle_detection/deploy/pptracking/cpp/src/pipeline.cc
Normal file
367
paddle_detection/deploy/pptracking/cpp/src/pipeline.cc
Normal file
@@ -0,0 +1,367 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <sstream>
|
||||
// for setprecision
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "include/pipeline.h"
|
||||
#include "include/postprocess.h"
|
||||
#include "include/predictor.h"
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
void Pipeline::SetInput(const std::string& input_video) {
|
||||
input_.push_back(input_video);
|
||||
}
|
||||
|
||||
void Pipeline::ClearInput() {
|
||||
input_.clear();
|
||||
stream_.clear();
|
||||
}
|
||||
|
||||
void Pipeline::SelectModel(const std::string& scene,
|
||||
const bool tiny_obj,
|
||||
const bool is_mtmct,
|
||||
const std::string track_model_dir,
|
||||
const std::string det_model_dir,
|
||||
const std::string reid_model_dir) {
|
||||
// model_dir has higher priority
|
||||
if (!track_model_dir.empty()) {
|
||||
track_model_dir_ = track_model_dir;
|
||||
return;
|
||||
}
|
||||
if (!det_model_dir.empty() && !reid_model_dir.empty()) {
|
||||
det_model_dir_ = det_model_dir;
|
||||
reid_model_dir_ = reid_model_dir;
|
||||
return;
|
||||
}
|
||||
|
||||
// Single camera model, based on FairMot
|
||||
if (scene == "pedestrian") {
|
||||
if (tiny_obj) {
|
||||
track_model_dir_ = "../pedestrian_track_tiny";
|
||||
} else {
|
||||
track_model_dir_ = "../pedestrian_track";
|
||||
}
|
||||
} else if (scene != "vehicle") {
|
||||
if (tiny_obj) {
|
||||
track_model_dir_ = "../vehicle_track_tiny";
|
||||
} else {
|
||||
track_model_dir_ = "../vehicle_track";
|
||||
}
|
||||
} else if (scene == "multiclass") {
|
||||
if (tiny_obj) {
|
||||
track_model_dir_ = "../multiclass_track_tiny";
|
||||
} else {
|
||||
track_model_dir_ = "../multiclass_track";
|
||||
}
|
||||
}
|
||||
|
||||
// Multi-camera model, based on PicoDet & LCNet
|
||||
if (is_mtmct && scene == "pedestrian") {
|
||||
det_model_dir_ = "../pedestrian_det";
|
||||
reid_model_dir_ = "../pedestrian_reid";
|
||||
} else if (is_mtmct && scene == "vehicle") {
|
||||
det_model_dir_ = "../vehicle_det";
|
||||
reid_model_dir_ = "../vehicle_reid";
|
||||
} else if (is_mtmct && scene == "multiclass") {
|
||||
throw "Multi-camera tracking is not supported in multiclass scene now.";
|
||||
}
|
||||
}
|
||||
|
||||
void Pipeline::InitPredictor() {
|
||||
if (track_model_dir_.empty() && det_model_dir_.empty()) {
|
||||
throw "Predictor must receive track_model or det_model!";
|
||||
}
|
||||
|
||||
if (!track_model_dir_.empty()) {
|
||||
jde_sct_ = std::make_shared<PaddleDetection::JDEPredictor>(device_,
|
||||
track_model_dir_,
|
||||
threshold_,
|
||||
run_mode_,
|
||||
gpu_id_,
|
||||
use_mkldnn_,
|
||||
cpu_threads_,
|
||||
trt_calib_mode_);
|
||||
}
|
||||
if (!det_model_dir_.empty()) {
|
||||
sde_sct_ = std::make_shared<PaddleDetection::SDEPredictor>(device_,
|
||||
det_model_dir_,
|
||||
reid_model_dir_,
|
||||
threshold_,
|
||||
run_mode_,
|
||||
gpu_id_,
|
||||
use_mkldnn_,
|
||||
cpu_threads_,
|
||||
trt_calib_mode_);
|
||||
}
|
||||
}
|
||||
|
||||
void Pipeline::Run() {
|
||||
if (track_model_dir_.empty() && det_model_dir_.empty()) {
|
||||
LOG(ERROR) << "Pipeline must use SelectModel before Run";
|
||||
return;
|
||||
}
|
||||
if (input_.size() == 0) {
|
||||
LOG(ERROR) << "Pipeline must use SetInput before Run";
|
||||
return;
|
||||
}
|
||||
|
||||
if (!track_model_dir_.empty()) {
|
||||
// single camera
|
||||
if (input_.size() > 1) {
|
||||
throw "Single camera tracking except single video, but received %d",
|
||||
input_.size();
|
||||
}
|
||||
PredictMOT(input_[0]);
|
||||
} else {
|
||||
// multi cameras
|
||||
if (input_.size() != 2) {
|
||||
throw "Multi camera tracking except two videos, but received %d",
|
||||
input_.size();
|
||||
}
|
||||
PredictMTMCT(input_);
|
||||
}
|
||||
}
|
||||
|
||||
void Pipeline::PredictMOT(const std::string& video_path) {
|
||||
// Open video
|
||||
cv::VideoCapture capture;
|
||||
capture.open(video_path.c_str());
|
||||
if (!capture.isOpened()) {
|
||||
printf("can not open video : %s\n", video_path.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
// Get Video info : resolution, fps
|
||||
int video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
|
||||
int video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
|
||||
int video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));
|
||||
|
||||
LOG(INFO) << "----------------------- Input info -----------------------";
|
||||
LOG(INFO) << "video_width: " << video_width;
|
||||
LOG(INFO) << "video_height: " << video_height;
|
||||
LOG(INFO) << "input fps: " << video_fps;
|
||||
|
||||
// Create VideoWriter for output
|
||||
cv::VideoWriter video_out;
|
||||
std::string video_out_path = output_dir_ + OS_PATH_SEP + "mot_output.mp4";
|
||||
int fcc = cv::VideoWriter::fourcc('m', 'p', '4', 'v');
|
||||
video_out.open(video_out_path.c_str(),
|
||||
fcc, // 0x00000021,
|
||||
video_fps,
|
||||
cv::Size(video_width, video_height),
|
||||
true);
|
||||
if (!video_out.isOpened()) {
|
||||
printf("create video writer failed!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
PaddleDetection::MOTResult result;
|
||||
std::vector<double> det_times(3);
|
||||
std::set<int> id_set;
|
||||
std::set<int> interval_id_set;
|
||||
std::vector<int> in_id_list;
|
||||
std::vector<int> out_id_list;
|
||||
std::map<int, std::vector<float>> prev_center;
|
||||
Rect entrance = {0,
|
||||
static_cast<float>(video_height) / 2,
|
||||
static_cast<float>(video_width),
|
||||
static_cast<float>(video_height) / 2};
|
||||
double times;
|
||||
double total_time;
|
||||
// Capture all frames and do inference
|
||||
cv::Mat frame;
|
||||
int frame_id = 0;
|
||||
|
||||
std::vector<std::string> records;
|
||||
std::vector<std::string> flow_records;
|
||||
records.push_back("result format: frame_id, track_id, x1, y1, w, h\n");
|
||||
|
||||
LOG(INFO) << "------------------- Predict info ------------------------";
|
||||
while (capture.read(frame)) {
|
||||
if (frame.empty()) {
|
||||
break;
|
||||
}
|
||||
std::vector<cv::Mat> imgs;
|
||||
imgs.push_back(frame);
|
||||
jde_sct_->Predict(imgs, threshold_, &result, &det_times);
|
||||
frame_id += 1;
|
||||
total_time = std::accumulate(det_times.begin(), det_times.end(), 0.);
|
||||
times = total_time / frame_id;
|
||||
|
||||
LOG(INFO) << "frame_id: " << frame_id
|
||||
<< " predict time(s): " << times / 1000;
|
||||
|
||||
cv::Mat out_img = PaddleDetection::VisualizeTrackResult(
|
||||
frame, result, 1000. / times, frame_id);
|
||||
|
||||
// TODO(qianhui): the entrance line can be set by users
|
||||
PaddleDetection::FlowStatistic(result,
|
||||
frame_id,
|
||||
secs_interval_,
|
||||
do_entrance_counting_,
|
||||
video_fps,
|
||||
entrance,
|
||||
&id_set,
|
||||
&interval_id_set,
|
||||
&in_id_list,
|
||||
&out_id_list,
|
||||
&prev_center,
|
||||
&flow_records);
|
||||
|
||||
if (save_result_) {
|
||||
PaddleDetection::SaveMOTResult(result, frame_id, &records);
|
||||
}
|
||||
|
||||
// Draw the entrance line
|
||||
if (do_entrance_counting_) {
|
||||
float line_thickness = std::max(1, static_cast<int>(video_width / 500.));
|
||||
cv::Point pt1 = cv::Point(entrance.left, entrance.top);
|
||||
cv::Point pt2 = cv::Point(entrance.right, entrance.bottom);
|
||||
cv::line(out_img, pt1, pt2, cv::Scalar(0, 255, 255), line_thickness);
|
||||
}
|
||||
video_out.write(out_img);
|
||||
}
|
||||
capture.release();
|
||||
video_out.release();
|
||||
PrintBenchmarkLog(det_times, frame_id);
|
||||
LOG(INFO) << "-------------------- Final Output info -------------------";
|
||||
LOG(INFO) << "Total frame: " << frame_id;
|
||||
LOG(INFO) << "Visualized output saved as " << video_out_path.c_str();
|
||||
if (save_result_) {
|
||||
FILE* fp;
|
||||
|
||||
std::string result_output_path =
|
||||
output_dir_ + OS_PATH_SEP + "mot_output.txt";
|
||||
if ((fp = fopen(result_output_path.c_str(), "w+")) == NULL) {
|
||||
printf("Open %s error.\n", result_output_path.c_str());
|
||||
return;
|
||||
}
|
||||
for (int l; l < records.size(); ++l) {
|
||||
fprintf(fp, records[l].c_str());
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
LOG(INFO) << "txt result output saved as " << result_output_path.c_str();
|
||||
|
||||
result_output_path = output_dir_ + OS_PATH_SEP + "flow_statistic.txt";
|
||||
if ((fp = fopen(result_output_path.c_str(), "w+")) == NULL) {
|
||||
printf("Open %s error.\n", result_output_path);
|
||||
return;
|
||||
}
|
||||
for (int l; l < flow_records.size(); ++l) {
|
||||
fprintf(fp, flow_records[l].c_str());
|
||||
}
|
||||
fclose(fp);
|
||||
LOG(INFO) << "txt flow statistic saved as " << result_output_path.c_str();
|
||||
}
|
||||
}
|
||||
|
||||
void Pipeline::PredictMTMCT(const std::vector<std::string> video_path) {
|
||||
throw "Not Implement!";
|
||||
}
|
||||
|
||||
void Pipeline::RunMOTStream(const cv::Mat img,
|
||||
const int frame_id,
|
||||
const int video_fps,
|
||||
const Rect entrance,
|
||||
cv::Mat out_img,
|
||||
std::vector<std::string>* records,
|
||||
std::set<int>* id_set,
|
||||
std::set<int>* interval_id_set,
|
||||
std::vector<int>* in_id_list,
|
||||
std::vector<int>* out_id_list,
|
||||
std::map<int, std::vector<float>>* prev_center,
|
||||
std::vector<std::string>* flow_records) {
|
||||
PaddleDetection::MOTResult result;
|
||||
std::vector<double> det_times(3);
|
||||
double times;
|
||||
double total_time;
|
||||
|
||||
LOG(INFO) << "------------------- Predict info ------------------------";
|
||||
std::vector<cv::Mat> imgs;
|
||||
imgs.push_back(img);
|
||||
jde_sct_->Predict(imgs, threshold_, &result, &det_times);
|
||||
total_time = std::accumulate(det_times.begin(), det_times.end(), 0.);
|
||||
times = total_time / frame_id;
|
||||
|
||||
LOG(INFO) << "frame_id: " << frame_id << " predict time(s): " << times / 1000;
|
||||
|
||||
out_img = PaddleDetection::VisualizeTrackResult(
|
||||
img, result, 1000. / times, frame_id);
|
||||
|
||||
// Count total number
|
||||
// Count in & out number
|
||||
PaddleDetection::FlowStatistic(result,
|
||||
frame_id,
|
||||
secs_interval_,
|
||||
do_entrance_counting_,
|
||||
video_fps,
|
||||
entrance,
|
||||
id_set,
|
||||
interval_id_set,
|
||||
in_id_list,
|
||||
out_id_list,
|
||||
prev_center,
|
||||
flow_records);
|
||||
|
||||
PrintBenchmarkLog(det_times, frame_id);
|
||||
if (save_result_) {
|
||||
PaddleDetection::SaveMOTResult(result, frame_id, records);
|
||||
}
|
||||
}
|
||||
|
||||
void Pipeline::RunMTMCTStream(const std::vector<cv::Mat> imgs,
|
||||
std::vector<std::string>* records) {
|
||||
throw "Not Implement!";
|
||||
}
|
||||
|
||||
void Pipeline::PrintBenchmarkLog(const std::vector<double> det_time,
|
||||
const int img_num) {
|
||||
LOG(INFO) << "----------------------- Config info -----------------------";
|
||||
LOG(INFO) << "runtime_device: " << device_;
|
||||
LOG(INFO) << "ir_optim: "
|
||||
<< "True";
|
||||
LOG(INFO) << "enable_memory_optim: "
|
||||
<< "True";
|
||||
int has_trt = run_mode_.find("trt");
|
||||
if (has_trt >= 0) {
|
||||
LOG(INFO) << "enable_tensorrt: "
|
||||
<< "True";
|
||||
std::string precision = run_mode_.substr(4, 8);
|
||||
LOG(INFO) << "precision: " << precision;
|
||||
} else {
|
||||
LOG(INFO) << "enable_tensorrt: "
|
||||
<< "False";
|
||||
LOG(INFO) << "precision: "
|
||||
<< "fp32";
|
||||
}
|
||||
LOG(INFO) << "enable_mkldnn: " << (use_mkldnn_ ? "True" : "False");
|
||||
LOG(INFO) << "cpu_math_library_num_threads: " << cpu_threads_;
|
||||
LOG(INFO) << "----------------------- Perf info ------------------------";
|
||||
LOG(INFO) << "Total number of predicted data: " << img_num
|
||||
<< " and total time spent(s): "
|
||||
<< std::accumulate(det_time.begin(), det_time.end(), 0.) / 1000;
|
||||
int num = std::max(1, img_num);
|
||||
LOG(INFO) << "preproce_time(ms): " << det_time[0] / num
|
||||
<< ", inference_time(ms): " << det_time[1] / num
|
||||
<< ", postprocess_time(ms): " << det_time[2] / num;
|
||||
}
|
||||
|
||||
} // namespace PaddleDetection
|
||||
207
paddle_detection/deploy/pptracking/cpp/src/postprocess.cc
Normal file
207
paddle_detection/deploy/pptracking/cpp/src/postprocess.cc
Normal file
@@ -0,0 +1,207 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <sstream>
|
||||
// for setprecision
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include "include/postprocess.h"
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
cv::Scalar GetColor(int idx) {
|
||||
idx = idx * 3;
|
||||
cv::Scalar color =
|
||||
cv::Scalar((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255);
|
||||
return color;
|
||||
}
|
||||
|
||||
cv::Mat VisualizeTrackResult(const cv::Mat& img,
|
||||
const MOTResult& results,
|
||||
const float fps,
|
||||
const int frame_id) {
|
||||
cv::Mat vis_img = img.clone();
|
||||
int im_h = img.rows;
|
||||
int im_w = img.cols;
|
||||
float text_scale = std::max(1, static_cast<int>(im_w / 1600.));
|
||||
float text_thickness = 2.;
|
||||
float line_thickness = std::max(1, static_cast<int>(im_w / 500.));
|
||||
|
||||
std::ostringstream oss;
|
||||
oss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
|
||||
oss << "frame: " << frame_id << " ";
|
||||
oss << "fps: " << fps << " ";
|
||||
oss << "num: " << results.size();
|
||||
std::string text = oss.str();
|
||||
|
||||
cv::Point origin;
|
||||
origin.x = 0;
|
||||
origin.y = static_cast<int>(15 * text_scale);
|
||||
cv::putText(vis_img,
|
||||
text,
|
||||
origin,
|
||||
cv::FONT_HERSHEY_PLAIN,
|
||||
text_scale,
|
||||
(0, 0, 255),
|
||||
2);
|
||||
|
||||
for (int i = 0; i < results.size(); ++i) {
|
||||
const int obj_id = results[i].ids;
|
||||
const float score = results[i].score;
|
||||
|
||||
cv::Scalar color = GetColor(obj_id);
|
||||
|
||||
cv::Point pt1 = cv::Point(results[i].rects.left, results[i].rects.top);
|
||||
cv::Point pt2 = cv::Point(results[i].rects.right, results[i].rects.bottom);
|
||||
cv::Point id_pt =
|
||||
cv::Point(results[i].rects.left, results[i].rects.top + 10);
|
||||
cv::Point score_pt =
|
||||
cv::Point(results[i].rects.left, results[i].rects.top - 10);
|
||||
cv::rectangle(vis_img, pt1, pt2, color, line_thickness);
|
||||
|
||||
std::ostringstream idoss;
|
||||
idoss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
|
||||
idoss << obj_id;
|
||||
std::string id_text = idoss.str();
|
||||
|
||||
cv::putText(vis_img,
|
||||
id_text,
|
||||
id_pt,
|
||||
cv::FONT_HERSHEY_PLAIN,
|
||||
text_scale,
|
||||
cv::Scalar(0, 255, 255),
|
||||
text_thickness);
|
||||
|
||||
std::ostringstream soss;
|
||||
soss << std::setiosflags(std::ios::fixed) << std::setprecision(2);
|
||||
soss << score;
|
||||
std::string score_text = soss.str();
|
||||
|
||||
cv::putText(vis_img,
|
||||
score_text,
|
||||
score_pt,
|
||||
cv::FONT_HERSHEY_PLAIN,
|
||||
text_scale,
|
||||
cv::Scalar(0, 255, 255),
|
||||
text_thickness);
|
||||
}
|
||||
return vis_img;
|
||||
}
|
||||
|
||||
void FlowStatistic(const MOTResult& results,
|
||||
const int frame_id,
|
||||
const int secs_interval,
|
||||
const bool do_entrance_counting,
|
||||
const int video_fps,
|
||||
const Rect entrance,
|
||||
std::set<int>* id_set,
|
||||
std::set<int>* interval_id_set,
|
||||
std::vector<int>* in_id_list,
|
||||
std::vector<int>* out_id_list,
|
||||
std::map<int, std::vector<float>>* prev_center,
|
||||
std::vector<std::string>* records) {
|
||||
if (frame_id == 0) interval_id_set->clear();
|
||||
|
||||
if (do_entrance_counting) {
|
||||
// Count in and out number:
|
||||
// Use horizontal center line as the entrance just for simplification.
|
||||
// If a person located in the above the horizontal center line
|
||||
// at the previous frame and is in the below the line at the current frame,
|
||||
// the in number is increased by one.
|
||||
// If a person was in the below the horizontal center line
|
||||
// at the previous frame and locates in the below the line at the current
|
||||
// frame,
|
||||
// the out number is increased by one.
|
||||
// TODO(qianhui): if the entrance is not the horizontal center line,
|
||||
// the counting method should be optimized.
|
||||
|
||||
float entrance_y = entrance.top;
|
||||
for (const auto& result : results) {
|
||||
float center_x = (result.rects.left + result.rects.right) / 2;
|
||||
float center_y = (result.rects.top + result.rects.bottom) / 2;
|
||||
int ids = result.ids;
|
||||
std::map<int, std::vector<float>>::iterator iter;
|
||||
iter = prev_center->find(ids);
|
||||
if (iter != prev_center->end()) {
|
||||
if (iter->second[1] <= entrance_y && center_y > entrance_y) {
|
||||
in_id_list->push_back(ids);
|
||||
}
|
||||
if (iter->second[1] >= entrance_y && center_y < entrance_y) {
|
||||
out_id_list->push_back(ids);
|
||||
}
|
||||
(*prev_center)[ids][0] = center_x;
|
||||
(*prev_center)[ids][1] = center_y;
|
||||
} else {
|
||||
prev_center->insert(
|
||||
std::pair<int, std::vector<float>>(ids, {center_x, center_y}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count totol number, number at a manual-setting interval
|
||||
for (const auto& result : results) {
|
||||
id_set->insert(result.ids);
|
||||
interval_id_set->insert(result.ids);
|
||||
}
|
||||
|
||||
std::ostringstream os;
|
||||
os << "Frame id: " << frame_id << ", Total count: " << id_set->size();
|
||||
if (do_entrance_counting) {
|
||||
os << ", In count: " << in_id_list->size()
|
||||
<< ", Out count: " << out_id_list->size();
|
||||
}
|
||||
|
||||
// Reset counting at the interval beginning
|
||||
int curr_interval_count = -1;
|
||||
if (frame_id % video_fps == 0 && frame_id / video_fps % secs_interval == 0) {
|
||||
curr_interval_count = interval_id_set->size();
|
||||
os << ", Count during " << secs_interval
|
||||
<< " secs: " << curr_interval_count;
|
||||
interval_id_set->clear();
|
||||
}
|
||||
os << "\n";
|
||||
std::string record = os.str();
|
||||
records->push_back(record);
|
||||
LOG(INFO) << record;
|
||||
}
|
||||
|
||||
void SaveMOTResult(const MOTResult& results,
|
||||
const int frame_id,
|
||||
std::vector<std::string>* records) {
|
||||
// result format: frame_id, track_id, x1, y1, w, h
|
||||
std::string record;
|
||||
for (int i = 0; i < results.size(); ++i) {
|
||||
MOTTrack mot_track = results[i];
|
||||
int ids = mot_track.ids;
|
||||
float score = mot_track.score;
|
||||
Rect rects = mot_track.rects;
|
||||
float x1 = rects.left;
|
||||
float y1 = rects.top;
|
||||
float x2 = rects.right;
|
||||
float y2 = rects.bottom;
|
||||
float w = x2 - x1;
|
||||
float h = y2 - y1;
|
||||
if (w == 0 || h == 0) {
|
||||
continue;
|
||||
}
|
||||
std::ostringstream os;
|
||||
os << frame_id << " " << ids << " " << x1 << " " << y1 << " " << w << " "
|
||||
<< h << "\n";
|
||||
record = os.str();
|
||||
records->push_back(record);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace PaddleDetection
|
||||
35
paddle_detection/deploy/pptracking/cpp/src/predictor.cc
Normal file
35
paddle_detection/deploy/pptracking/cpp/src/predictor.cc
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <sstream>
|
||||
// for setprecision
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include "include/predictor.h"
|
||||
|
||||
using namespace paddle_infer; // NOLINT
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
void Predictor::Predict(const std::vector<cv::Mat> imgs,
|
||||
const double threshold,
|
||||
MOTResult* result,
|
||||
std::vector<double>* times) {
|
||||
if (use_jde_) {
|
||||
jde_sct_->Predict(imgs, threshold, result, times);
|
||||
} else {
|
||||
sde_sct_->Predict(imgs, threshold, result, times);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace PaddleDetection
|
||||
187
paddle_detection/deploy/pptracking/cpp/src/preprocess_op.cc
Normal file
187
paddle_detection/deploy/pptracking/cpp/src/preprocess_op.cc
Normal file
@@ -0,0 +1,187 @@
|
||||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "include/preprocess_op.h"
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
void InitInfo::Run(cv::Mat* im, ImageBlob* data) {
|
||||
data->im_shape_ = {static_cast<float>(im->rows),
|
||||
static_cast<float>(im->cols)};
|
||||
data->scale_factor_ = {1., 1.};
|
||||
data->in_net_shape_ = {static_cast<float>(im->rows),
|
||||
static_cast<float>(im->cols)};
|
||||
}
|
||||
|
||||
void NormalizeImage::Run(cv::Mat* im, ImageBlob* data) {
|
||||
double e = 1.0;
|
||||
if (is_scale_) {
|
||||
e /= 255.0;
|
||||
}
|
||||
(*im).convertTo(*im, CV_32FC3, e);
|
||||
for (int h = 0; h < im->rows; h++) {
|
||||
for (int w = 0; w < im->cols; w++) {
|
||||
im->at<cv::Vec3f>(h, w)[0] =
|
||||
(im->at<cv::Vec3f>(h, w)[0] - mean_[0]) / scale_[0];
|
||||
im->at<cv::Vec3f>(h, w)[1] =
|
||||
(im->at<cv::Vec3f>(h, w)[1] - mean_[1]) / scale_[1];
|
||||
im->at<cv::Vec3f>(h, w)[2] =
|
||||
(im->at<cv::Vec3f>(h, w)[2] - mean_[2]) / scale_[2];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Permute::Run(cv::Mat* im, ImageBlob* data) {
|
||||
(*im).convertTo(*im, CV_32FC3);
|
||||
int rh = im->rows;
|
||||
int rw = im->cols;
|
||||
int rc = im->channels();
|
||||
(data->im_data_).resize(rc * rh * rw);
|
||||
float* base = (data->im_data_).data();
|
||||
for (int i = 0; i < rc; ++i) {
|
||||
cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);
|
||||
}
|
||||
}
|
||||
|
||||
void Resize::Run(cv::Mat* im, ImageBlob* data) {
|
||||
auto resize_scale = GenerateScale(*im);
|
||||
data->im_shape_ = {static_cast<float>(im->cols * resize_scale.first),
|
||||
static_cast<float>(im->rows * resize_scale.second)};
|
||||
data->in_net_shape_ = {static_cast<float>(im->cols * resize_scale.first),
|
||||
static_cast<float>(im->rows * resize_scale.second)};
|
||||
cv::resize(
|
||||
*im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_);
|
||||
data->im_shape_ = {
|
||||
static_cast<float>(im->rows), static_cast<float>(im->cols),
|
||||
};
|
||||
data->scale_factor_ = {
|
||||
resize_scale.second, resize_scale.first,
|
||||
};
|
||||
}
|
||||
|
||||
std::pair<float, float> Resize::GenerateScale(const cv::Mat& im) {
|
||||
std::pair<float, float> resize_scale;
|
||||
int origin_w = im.cols;
|
||||
int origin_h = im.rows;
|
||||
|
||||
if (keep_ratio_) {
|
||||
int im_size_max = std::max(origin_w, origin_h);
|
||||
int im_size_min = std::min(origin_w, origin_h);
|
||||
int target_size_max =
|
||||
*std::max_element(target_size_.begin(), target_size_.end());
|
||||
int target_size_min =
|
||||
*std::min_element(target_size_.begin(), target_size_.end());
|
||||
float scale_min =
|
||||
static_cast<float>(target_size_min) / static_cast<float>(im_size_min);
|
||||
float scale_max =
|
||||
static_cast<float>(target_size_max) / static_cast<float>(im_size_max);
|
||||
float scale_ratio = std::min(scale_min, scale_max);
|
||||
resize_scale = {scale_ratio, scale_ratio};
|
||||
} else {
|
||||
resize_scale.first =
|
||||
static_cast<float>(target_size_[1]) / static_cast<float>(origin_w);
|
||||
resize_scale.second =
|
||||
static_cast<float>(target_size_[0]) / static_cast<float>(origin_h);
|
||||
}
|
||||
return resize_scale;
|
||||
}
|
||||
|
||||
void LetterBoxResize::Run(cv::Mat* im, ImageBlob* data) {
|
||||
float resize_scale = GenerateScale(*im);
|
||||
int new_shape_w = std::round(im->cols * resize_scale);
|
||||
int new_shape_h = std::round(im->rows * resize_scale);
|
||||
data->im_shape_ = {static_cast<float>(new_shape_h),
|
||||
static_cast<float>(new_shape_w)};
|
||||
float padw = (target_size_[1] - new_shape_w) / 2.;
|
||||
float padh = (target_size_[0] - new_shape_h) / 2.;
|
||||
|
||||
int top = std::round(padh - 0.1);
|
||||
int bottom = std::round(padh + 0.1);
|
||||
int left = std::round(padw - 0.1);
|
||||
int right = std::round(padw + 0.1);
|
||||
|
||||
cv::resize(
|
||||
*im, *im, cv::Size(new_shape_w, new_shape_h), 0, 0, cv::INTER_AREA);
|
||||
|
||||
data->in_net_shape_ = {
|
||||
static_cast<float>(im->rows), static_cast<float>(im->cols),
|
||||
};
|
||||
cv::copyMakeBorder(*im,
|
||||
*im,
|
||||
top,
|
||||
bottom,
|
||||
left,
|
||||
right,
|
||||
cv::BORDER_CONSTANT,
|
||||
cv::Scalar(127.5));
|
||||
|
||||
data->in_net_shape_ = {
|
||||
static_cast<float>(im->rows), static_cast<float>(im->cols),
|
||||
};
|
||||
|
||||
data->scale_factor_ = {
|
||||
resize_scale, resize_scale,
|
||||
};
|
||||
}
|
||||
|
||||
float LetterBoxResize::GenerateScale(const cv::Mat& im) {
|
||||
int origin_w = im.cols;
|
||||
int origin_h = im.rows;
|
||||
|
||||
int target_h = target_size_[0];
|
||||
int target_w = target_size_[1];
|
||||
|
||||
float ratio_h = static_cast<float>(target_h) / static_cast<float>(origin_h);
|
||||
float ratio_w = static_cast<float>(target_w) / static_cast<float>(origin_w);
|
||||
float resize_scale = std::min(ratio_h, ratio_w);
|
||||
return resize_scale;
|
||||
}
|
||||
|
||||
void PadStride::Run(cv::Mat* im, ImageBlob* data) {
|
||||
if (stride_ <= 0) {
|
||||
return;
|
||||
}
|
||||
int rc = im->channels();
|
||||
int rh = im->rows;
|
||||
int rw = im->cols;
|
||||
int nh = (rh / stride_) * stride_ + (rh % stride_ != 0) * stride_;
|
||||
int nw = (rw / stride_) * stride_ + (rw % stride_ != 0) * stride_;
|
||||
cv::copyMakeBorder(
|
||||
*im, *im, 0, nh - rh, 0, nw - rw, cv::BORDER_CONSTANT, cv::Scalar(0));
|
||||
data->in_net_shape_ = {
|
||||
static_cast<float>(im->rows), static_cast<float>(im->cols),
|
||||
};
|
||||
}
|
||||
|
||||
// Preprocessor op running order
|
||||
const std::vector<std::string> Preprocessor::RUN_ORDER = {"InitInfo",
|
||||
"Resize",
|
||||
"LetterBoxResize",
|
||||
"NormalizeImage",
|
||||
"PadStride",
|
||||
"Permute"};
|
||||
|
||||
void Preprocessor::Run(cv::Mat* im, ImageBlob* data) {
|
||||
for (const auto& name : RUN_ORDER) {
|
||||
if (ops_.find(name) != ops_.end()) {
|
||||
ops_[name]->Run(im, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace PaddleDetection
|
||||
46
paddle_detection/deploy/pptracking/cpp/src/sde_predictor.cc
Normal file
46
paddle_detection/deploy/pptracking/cpp/src/sde_predictor.cc
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <sstream>
|
||||
// for setprecision
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include "include/sde_predictor.h"
|
||||
|
||||
using namespace paddle_infer; // NOLINT
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
// Load Model and create model predictor
|
||||
void SDEPredictor::LoadModel(const std::string& det_model_dir,
|
||||
const std::string& reid_model_dir,
|
||||
const std::string& run_mode) {
|
||||
throw "Not Implement";
|
||||
}
|
||||
|
||||
void SDEPredictor::Preprocess(const cv::Mat& ori_im) { throw "Not Implement"; }
|
||||
|
||||
void SDEPredictor::Postprocess(const cv::Mat dets,
|
||||
const cv::Mat emb,
|
||||
MOTResult* result) {
|
||||
throw "Not Implement";
|
||||
}
|
||||
|
||||
void SDEPredictor::Predict(const std::vector<cv::Mat> imgs,
|
||||
const double threshold,
|
||||
MOTResult* result,
|
||||
std::vector<double>* times) {
|
||||
throw "Not Implement";
|
||||
}
|
||||
|
||||
} // namespace PaddleDetection
|
||||
304
paddle_detection/deploy/pptracking/cpp/src/tracker.cc
Normal file
304
paddle_detection/deploy/pptracking/cpp/src/tracker.cc
Normal file
@@ -0,0 +1,304 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The code is based on:
|
||||
// https://github.com/CnybTseng/JDE/blob/master/platforms/common/jdetracker.cpp
|
||||
// Ths copyright of CnybTseng/JDE is as follows:
|
||||
// MIT License
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
|
||||
#include "include/lapjv.h"
|
||||
#include "include/tracker.h"
|
||||
|
||||
#define mat2vec4f(m) \
|
||||
cv::Vec4f(*m.ptr<float>(0, 0), \
|
||||
*m.ptr<float>(0, 1), \
|
||||
*m.ptr<float>(0, 2), \
|
||||
*m.ptr<float>(0, 3))
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
static std::map<int, float> chi2inv95 = {{1, 3.841459f},
|
||||
{2, 5.991465f},
|
||||
{3, 7.814728f},
|
||||
{4, 9.487729f},
|
||||
{5, 11.070498f},
|
||||
{6, 12.591587f},
|
||||
{7, 14.067140f},
|
||||
{8, 15.507313f},
|
||||
{9, 16.918978f}};
|
||||
|
||||
JDETracker *JDETracker::me = new JDETracker;
|
||||
|
||||
JDETracker *JDETracker::instance(void) { return me; }
|
||||
|
||||
JDETracker::JDETracker(void)
|
||||
: timestamp(0), max_lost_time(30), lambda(0.98f), det_thresh(0.3f) {}
|
||||
|
||||
bool JDETracker::update(const cv::Mat &dets,
|
||||
const cv::Mat &emb,
|
||||
std::vector<Track> *tracks) {
|
||||
++timestamp;
|
||||
TrajectoryPool candidates(dets.rows);
|
||||
for (int i = 0; i < dets.rows; ++i) {
|
||||
float score = *dets.ptr<float>(i, 1);
|
||||
const cv::Mat <rb_ = dets(cv::Rect(2, i, 4, 1));
|
||||
cv::Vec4f ltrb = mat2vec4f(ltrb_);
|
||||
const cv::Mat &embedding = emb(cv::Rect(0, i, emb.cols, 1));
|
||||
candidates[i] = Trajectory(ltrb, score, embedding);
|
||||
}
|
||||
|
||||
TrajectoryPtrPool tracked_trajectories;
|
||||
TrajectoryPtrPool unconfirmed_trajectories;
|
||||
for (size_t i = 0; i < this->tracked_trajectories.size(); ++i) {
|
||||
if (this->tracked_trajectories[i].is_activated)
|
||||
tracked_trajectories.push_back(&this->tracked_trajectories[i]);
|
||||
else
|
||||
unconfirmed_trajectories.push_back(&this->tracked_trajectories[i]);
|
||||
}
|
||||
|
||||
TrajectoryPtrPool trajectory_pool =
|
||||
tracked_trajectories + &(this->lost_trajectories);
|
||||
|
||||
for (size_t i = 0; i < trajectory_pool.size(); ++i)
|
||||
trajectory_pool[i]->predict();
|
||||
|
||||
Match matches;
|
||||
std::vector<int> mismatch_row;
|
||||
std::vector<int> mismatch_col;
|
||||
|
||||
cv::Mat cost = motion_distance(trajectory_pool, candidates);
|
||||
linear_assignment(cost, 0.7f, &matches, &mismatch_row, &mismatch_col);
|
||||
|
||||
MatchIterator miter;
|
||||
TrajectoryPtrPool activated_trajectories;
|
||||
TrajectoryPtrPool retrieved_trajectories;
|
||||
|
||||
for (miter = matches.begin(); miter != matches.end(); miter++) {
|
||||
Trajectory *pt = trajectory_pool[miter->first];
|
||||
Trajectory &ct = candidates[miter->second];
|
||||
if (pt->state == Tracked) {
|
||||
pt->update(&ct, timestamp);
|
||||
activated_trajectories.push_back(pt);
|
||||
} else {
|
||||
pt->reactivate(&ct, timestamp);
|
||||
retrieved_trajectories.push_back(pt);
|
||||
}
|
||||
}
|
||||
|
||||
TrajectoryPtrPool next_candidates(mismatch_col.size());
|
||||
for (size_t i = 0; i < mismatch_col.size(); ++i)
|
||||
next_candidates[i] = &candidates[mismatch_col[i]];
|
||||
|
||||
TrajectoryPtrPool next_trajectory_pool;
|
||||
for (size_t i = 0; i < mismatch_row.size(); ++i) {
|
||||
int j = mismatch_row[i];
|
||||
if (trajectory_pool[j]->state == Tracked)
|
||||
next_trajectory_pool.push_back(trajectory_pool[j]);
|
||||
}
|
||||
|
||||
cost = iou_distance(next_trajectory_pool, next_candidates);
|
||||
linear_assignment(cost, 0.5f, &matches, &mismatch_row, &mismatch_col);
|
||||
|
||||
for (miter = matches.begin(); miter != matches.end(); miter++) {
|
||||
Trajectory *pt = next_trajectory_pool[miter->first];
|
||||
Trajectory *ct = next_candidates[miter->second];
|
||||
if (pt->state == Tracked) {
|
||||
pt->update(ct, timestamp);
|
||||
activated_trajectories.push_back(pt);
|
||||
} else {
|
||||
pt->reactivate(ct, timestamp);
|
||||
retrieved_trajectories.push_back(pt);
|
||||
}
|
||||
}
|
||||
|
||||
TrajectoryPtrPool lost_trajectories;
|
||||
for (size_t i = 0; i < mismatch_row.size(); ++i) {
|
||||
Trajectory *pt = next_trajectory_pool[mismatch_row[i]];
|
||||
if (pt->state != Lost) {
|
||||
pt->mark_lost();
|
||||
lost_trajectories.push_back(pt);
|
||||
}
|
||||
}
|
||||
|
||||
TrajectoryPtrPool nnext_candidates(mismatch_col.size());
|
||||
for (size_t i = 0; i < mismatch_col.size(); ++i)
|
||||
nnext_candidates[i] = next_candidates[mismatch_col[i]];
|
||||
cost = iou_distance(unconfirmed_trajectories, nnext_candidates);
|
||||
linear_assignment(cost, 0.7f, &matches, &mismatch_row, &mismatch_col);
|
||||
|
||||
for (miter = matches.begin(); miter != matches.end(); miter++) {
|
||||
unconfirmed_trajectories[miter->first]->update(
|
||||
nnext_candidates[miter->second], timestamp);
|
||||
activated_trajectories.push_back(unconfirmed_trajectories[miter->first]);
|
||||
}
|
||||
|
||||
TrajectoryPtrPool removed_trajectories;
|
||||
|
||||
for (size_t i = 0; i < mismatch_row.size(); ++i) {
|
||||
unconfirmed_trajectories[mismatch_row[i]]->mark_removed();
|
||||
removed_trajectories.push_back(unconfirmed_trajectories[mismatch_row[i]]);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < mismatch_col.size(); ++i) {
|
||||
if (nnext_candidates[mismatch_col[i]]->score < det_thresh) continue;
|
||||
nnext_candidates[mismatch_col[i]]->activate(timestamp);
|
||||
activated_trajectories.push_back(nnext_candidates[mismatch_col[i]]);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < this->lost_trajectories.size(); ++i) {
|
||||
Trajectory < = this->lost_trajectories[i];
|
||||
if (timestamp - lt.timestamp > max_lost_time) {
|
||||
lt.mark_removed();
|
||||
removed_trajectories.push_back(<);
|
||||
}
|
||||
}
|
||||
|
||||
TrajectoryPoolIterator piter;
|
||||
for (piter = this->tracked_trajectories.begin();
|
||||
piter != this->tracked_trajectories.end();) {
|
||||
if (piter->state != Tracked)
|
||||
piter = this->tracked_trajectories.erase(piter);
|
||||
else
|
||||
++piter;
|
||||
}
|
||||
|
||||
this->tracked_trajectories += activated_trajectories;
|
||||
this->tracked_trajectories += retrieved_trajectories;
|
||||
|
||||
this->lost_trajectories -= this->tracked_trajectories;
|
||||
this->lost_trajectories += lost_trajectories;
|
||||
this->lost_trajectories -= this->removed_trajectories;
|
||||
this->removed_trajectories += removed_trajectories;
|
||||
remove_duplicate_trajectory(&this->tracked_trajectories,
|
||||
&this->lost_trajectories);
|
||||
|
||||
tracks->clear();
|
||||
for (size_t i = 0; i < this->tracked_trajectories.size(); ++i) {
|
||||
if (this->tracked_trajectories[i].is_activated) {
|
||||
Track track = {this->tracked_trajectories[i].id,
|
||||
this->tracked_trajectories[i].score,
|
||||
this->tracked_trajectories[i].ltrb};
|
||||
tracks->push_back(track);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
cv::Mat JDETracker::motion_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPool &b) {
|
||||
if (0 == a.size() || 0 == b.size())
|
||||
return cv::Mat(a.size(), b.size(), CV_32F);
|
||||
|
||||
cv::Mat edists = embedding_distance(a, b);
|
||||
cv::Mat mdists = mahalanobis_distance(a, b);
|
||||
cv::Mat fdists = lambda * edists + (1 - lambda) * mdists;
|
||||
|
||||
const float gate_thresh = chi2inv95[4];
|
||||
for (int i = 0; i < fdists.rows; ++i) {
|
||||
for (int j = 0; j < fdists.cols; ++j) {
|
||||
if (*mdists.ptr<float>(i, j) > gate_thresh)
|
||||
*fdists.ptr<float>(i, j) = FLT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
return fdists;
|
||||
}
|
||||
|
||||
void JDETracker::linear_assignment(const cv::Mat &cost,
|
||||
float cost_limit,
|
||||
Match *matches,
|
||||
std::vector<int> *mismatch_row,
|
||||
std::vector<int> *mismatch_col) {
|
||||
matches->clear();
|
||||
mismatch_row->clear();
|
||||
mismatch_col->clear();
|
||||
if (cost.empty()) {
|
||||
for (int i = 0; i < cost.rows; ++i) mismatch_row->push_back(i);
|
||||
for (int i = 0; i < cost.cols; ++i) mismatch_col->push_back(i);
|
||||
return;
|
||||
}
|
||||
|
||||
float opt = 0;
|
||||
cv::Mat x(cost.rows, 1, CV_32S);
|
||||
cv::Mat y(cost.cols, 1, CV_32S);
|
||||
|
||||
lapjv_internal(cost,
|
||||
true,
|
||||
cost_limit,
|
||||
reinterpret_cast<int *>(x.data),
|
||||
reinterpret_cast<int *>(y.data));
|
||||
|
||||
for (int i = 0; i < x.rows; ++i) {
|
||||
int j = *x.ptr<int>(i);
|
||||
if (j >= 0)
|
||||
matches->insert({i, j});
|
||||
else
|
||||
mismatch_row->push_back(i);
|
||||
}
|
||||
|
||||
for (int i = 0; i < y.rows; ++i) {
|
||||
int j = *y.ptr<int>(i);
|
||||
if (j < 0) mismatch_col->push_back(i);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void JDETracker::remove_duplicate_trajectory(TrajectoryPool *a,
|
||||
TrajectoryPool *b,
|
||||
float iou_thresh) {
|
||||
if (a->size() == 0 || b->size() == 0) return;
|
||||
|
||||
cv::Mat dist = iou_distance(*a, *b);
|
||||
cv::Mat mask = dist < iou_thresh;
|
||||
std::vector<cv::Point> idx;
|
||||
cv::findNonZero(mask, idx);
|
||||
|
||||
std::vector<int> da;
|
||||
std::vector<int> db;
|
||||
for (size_t i = 0; i < idx.size(); ++i) {
|
||||
int ta = (*a)[idx[i].y].timestamp - (*a)[idx[i].y].starttime;
|
||||
int tb = (*b)[idx[i].x].timestamp - (*b)[idx[i].x].starttime;
|
||||
if (ta > tb)
|
||||
db.push_back(idx[i].x);
|
||||
else
|
||||
da.push_back(idx[i].y);
|
||||
}
|
||||
|
||||
int id = 0;
|
||||
TrajectoryPoolIterator piter;
|
||||
for (piter = a->begin(); piter != a->end();) {
|
||||
std::vector<int>::iterator iter = find(da.begin(), da.end(), id++);
|
||||
if (iter != da.end())
|
||||
piter = a->erase(piter);
|
||||
else
|
||||
++piter;
|
||||
}
|
||||
|
||||
id = 0;
|
||||
for (piter = b->begin(); piter != b->end();) {
|
||||
std::vector<int>::iterator iter = find(db.begin(), db.end(), id++);
|
||||
if (iter != db.end())
|
||||
piter = b->erase(piter);
|
||||
else
|
||||
++piter;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace PaddleDetection
|
||||
517
paddle_detection/deploy/pptracking/cpp/src/trajectory.cc
Normal file
517
paddle_detection/deploy/pptracking/cpp/src/trajectory.cc
Normal file
@@ -0,0 +1,517 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The code is based on:
|
||||
// https://github.com/CnybTseng/JDE/blob/master/platforms/common/trajectory.cpp
|
||||
// Ths copyright of CnybTseng/JDE is as follows:
|
||||
// MIT License
|
||||
|
||||
#include "include/trajectory.h"
|
||||
#include <algorithm>
|
||||
|
||||
namespace PaddleDetection {
|
||||
|
||||
void TKalmanFilter::init(const cv::Mat &measurement) {
|
||||
measurement.copyTo(statePost(cv::Rect(0, 0, 1, 4)));
|
||||
statePost(cv::Rect(0, 4, 1, 4)).setTo(0);
|
||||
statePost.copyTo(statePre);
|
||||
|
||||
float varpos = 2 * std_weight_position * (*measurement.ptr<float>(3));
|
||||
varpos *= varpos;
|
||||
float varvel = 10 * std_weight_velocity * (*measurement.ptr<float>(3));
|
||||
varvel *= varvel;
|
||||
|
||||
errorCovPost.setTo(0);
|
||||
*errorCovPost.ptr<float>(0, 0) = varpos;
|
||||
*errorCovPost.ptr<float>(1, 1) = varpos;
|
||||
*errorCovPost.ptr<float>(2, 2) = 1e-4f;
|
||||
*errorCovPost.ptr<float>(3, 3) = varpos;
|
||||
*errorCovPost.ptr<float>(4, 4) = varvel;
|
||||
*errorCovPost.ptr<float>(5, 5) = varvel;
|
||||
*errorCovPost.ptr<float>(6, 6) = 1e-10f;
|
||||
*errorCovPost.ptr<float>(7, 7) = varvel;
|
||||
errorCovPost.copyTo(errorCovPre);
|
||||
}
|
||||
|
||||
const cv::Mat &TKalmanFilter::predict() {
|
||||
float varpos = std_weight_position * (*statePre.ptr<float>(3));
|
||||
varpos *= varpos;
|
||||
float varvel = std_weight_velocity * (*statePre.ptr<float>(3));
|
||||
varvel *= varvel;
|
||||
|
||||
processNoiseCov.setTo(0);
|
||||
*processNoiseCov.ptr<float>(0, 0) = varpos;
|
||||
*processNoiseCov.ptr<float>(1, 1) = varpos;
|
||||
*processNoiseCov.ptr<float>(2, 2) = 1e-4f;
|
||||
*processNoiseCov.ptr<float>(3, 3) = varpos;
|
||||
*processNoiseCov.ptr<float>(4, 4) = varvel;
|
||||
*processNoiseCov.ptr<float>(5, 5) = varvel;
|
||||
*processNoiseCov.ptr<float>(6, 6) = 1e-10f;
|
||||
*processNoiseCov.ptr<float>(7, 7) = varvel;
|
||||
|
||||
return cv::KalmanFilter::predict();
|
||||
}
|
||||
|
||||
const cv::Mat &TKalmanFilter::correct(const cv::Mat &measurement) {
|
||||
float varpos = std_weight_position * (*measurement.ptr<float>(3));
|
||||
varpos *= varpos;
|
||||
|
||||
measurementNoiseCov.setTo(0);
|
||||
*measurementNoiseCov.ptr<float>(0, 0) = varpos;
|
||||
*measurementNoiseCov.ptr<float>(1, 1) = varpos;
|
||||
*measurementNoiseCov.ptr<float>(2, 2) = 1e-2f;
|
||||
*measurementNoiseCov.ptr<float>(3, 3) = varpos;
|
||||
|
||||
return cv::KalmanFilter::correct(measurement);
|
||||
}
|
||||
|
||||
void TKalmanFilter::project(cv::Mat *mean, cv::Mat *covariance) const {
|
||||
float varpos = std_weight_position * (*statePost.ptr<float>(3));
|
||||
varpos *= varpos;
|
||||
|
||||
cv::Mat measurementNoiseCov_ = cv::Mat::eye(4, 4, CV_32F);
|
||||
*measurementNoiseCov_.ptr<float>(0, 0) = varpos;
|
||||
*measurementNoiseCov_.ptr<float>(1, 1) = varpos;
|
||||
*measurementNoiseCov_.ptr<float>(2, 2) = 1e-2f;
|
||||
*measurementNoiseCov_.ptr<float>(3, 3) = varpos;
|
||||
|
||||
*mean = measurementMatrix * statePost;
|
||||
cv::Mat temp = measurementMatrix * errorCovPost;
|
||||
gemm(temp,
|
||||
measurementMatrix,
|
||||
1,
|
||||
measurementNoiseCov_,
|
||||
1,
|
||||
*covariance,
|
||||
cv::GEMM_2_T);
|
||||
}
|
||||
|
||||
int Trajectory::count = 0;
|
||||
|
||||
const cv::Mat &Trajectory::predict(void) {
|
||||
if (state != Tracked) *cv::KalmanFilter::statePost.ptr<float>(7) = 0;
|
||||
return TKalmanFilter::predict();
|
||||
}
|
||||
|
||||
void Trajectory::update(Trajectory *traj,
|
||||
int timestamp_,
|
||||
bool update_embedding_) {
|
||||
timestamp = timestamp_;
|
||||
++length;
|
||||
ltrb = traj->ltrb;
|
||||
xyah = traj->xyah;
|
||||
TKalmanFilter::correct(cv::Mat(traj->xyah));
|
||||
state = Tracked;
|
||||
is_activated = true;
|
||||
score = traj->score;
|
||||
if (update_embedding_) update_embedding(traj->current_embedding);
|
||||
}
|
||||
|
||||
void Trajectory::activate(int timestamp_) {
|
||||
id = next_id();
|
||||
TKalmanFilter::init(cv::Mat(xyah));
|
||||
length = 0;
|
||||
state = Tracked;
|
||||
if (timestamp_ == 1) {
|
||||
is_activated = true;
|
||||
}
|
||||
timestamp = timestamp_;
|
||||
starttime = timestamp_;
|
||||
}
|
||||
|
||||
void Trajectory::reactivate(Trajectory *traj, int timestamp_, bool newid) {
|
||||
TKalmanFilter::correct(cv::Mat(traj->xyah));
|
||||
update_embedding(traj->current_embedding);
|
||||
length = 0;
|
||||
state = Tracked;
|
||||
is_activated = true;
|
||||
timestamp = timestamp_;
|
||||
if (newid) id = next_id();
|
||||
}
|
||||
|
||||
void Trajectory::update_embedding(const cv::Mat &embedding) {
|
||||
current_embedding = embedding / cv::norm(embedding);
|
||||
if (smooth_embedding.empty()) {
|
||||
smooth_embedding = current_embedding;
|
||||
} else {
|
||||
smooth_embedding = eta * smooth_embedding + (1 - eta) * current_embedding;
|
||||
}
|
||||
smooth_embedding = smooth_embedding / cv::norm(smooth_embedding);
|
||||
}
|
||||
|
||||
TrajectoryPool operator+(const TrajectoryPool &a, const TrajectoryPool &b) {
|
||||
TrajectoryPool sum;
|
||||
sum.insert(sum.end(), a.begin(), a.end());
|
||||
|
||||
std::vector<int> ids(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) ids[i] = a[i].id;
|
||||
|
||||
for (size_t i = 0; i < b.size(); ++i) {
|
||||
std::vector<int>::iterator iter = find(ids.begin(), ids.end(), b[i].id);
|
||||
if (iter == ids.end()) {
|
||||
sum.push_back(b[i]);
|
||||
ids.push_back(b[i].id);
|
||||
}
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
TrajectoryPool operator+(const TrajectoryPool &a, const TrajectoryPtrPool &b) {
|
||||
TrajectoryPool sum;
|
||||
sum.insert(sum.end(), a.begin(), a.end());
|
||||
|
||||
std::vector<int> ids(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) ids[i] = a[i].id;
|
||||
|
||||
for (size_t i = 0; i < b.size(); ++i) {
|
||||
std::vector<int>::iterator iter = find(ids.begin(), ids.end(), b[i]->id);
|
||||
if (iter == ids.end()) {
|
||||
sum.push_back(*b[i]);
|
||||
ids.push_back(b[i]->id);
|
||||
}
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
TrajectoryPool &operator+=(TrajectoryPool &a, // NOLINT
|
||||
const TrajectoryPtrPool &b) {
|
||||
std::vector<int> ids(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) ids[i] = a[i].id;
|
||||
|
||||
for (size_t i = 0; i < b.size(); ++i) {
|
||||
if (b[i]->smooth_embedding.empty()) continue;
|
||||
std::vector<int>::iterator iter = find(ids.begin(), ids.end(), b[i]->id);
|
||||
if (iter == ids.end()) {
|
||||
a.push_back(*b[i]);
|
||||
ids.push_back(b[i]->id);
|
||||
}
|
||||
}
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
TrajectoryPool operator-(const TrajectoryPool &a, const TrajectoryPool &b) {
|
||||
TrajectoryPool dif;
|
||||
std::vector<int> ids(b.size());
|
||||
for (size_t i = 0; i < b.size(); ++i) ids[i] = b[i].id;
|
||||
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
std::vector<int>::iterator iter = find(ids.begin(), ids.end(), a[i].id);
|
||||
if (iter == ids.end()) dif.push_back(a[i]);
|
||||
}
|
||||
|
||||
return dif;
|
||||
}
|
||||
|
||||
TrajectoryPool &operator-=(TrajectoryPool &a, // NOLINT
|
||||
const TrajectoryPool &b) {
|
||||
std::vector<int> ids(b.size());
|
||||
for (size_t i = 0; i < b.size(); ++i) ids[i] = b[i].id;
|
||||
|
||||
TrajectoryPoolIterator piter;
|
||||
for (piter = a.begin(); piter != a.end();) {
|
||||
std::vector<int>::iterator iter = find(ids.begin(), ids.end(), piter->id);
|
||||
if (iter == ids.end())
|
||||
++piter;
|
||||
else
|
||||
piter = a.erase(piter);
|
||||
}
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
TrajectoryPtrPool operator+(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPtrPool &b) {
|
||||
TrajectoryPtrPool sum;
|
||||
sum.insert(sum.end(), a.begin(), a.end());
|
||||
|
||||
std::vector<int> ids(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) ids[i] = a[i]->id;
|
||||
|
||||
for (size_t i = 0; i < b.size(); ++i) {
|
||||
std::vector<int>::iterator iter = find(ids.begin(), ids.end(), b[i]->id);
|
||||
if (iter == ids.end()) {
|
||||
sum.push_back(b[i]);
|
||||
ids.push_back(b[i]->id);
|
||||
}
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
TrajectoryPtrPool operator+(const TrajectoryPtrPool &a, TrajectoryPool *b) {
|
||||
TrajectoryPtrPool sum;
|
||||
sum.insert(sum.end(), a.begin(), a.end());
|
||||
|
||||
std::vector<int> ids(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) ids[i] = a[i]->id;
|
||||
|
||||
for (size_t i = 0; i < b->size(); ++i) {
|
||||
std::vector<int>::iterator iter = find(ids.begin(), ids.end(), (*b)[i].id);
|
||||
if (iter == ids.end()) {
|
||||
sum.push_back(&(*b)[i]);
|
||||
ids.push_back((*b)[i].id);
|
||||
}
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
TrajectoryPtrPool operator-(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPtrPool &b) {
|
||||
TrajectoryPtrPool dif;
|
||||
std::vector<int> ids(b.size());
|
||||
for (size_t i = 0; i < b.size(); ++i) ids[i] = b[i]->id;
|
||||
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
std::vector<int>::iterator iter = find(ids.begin(), ids.end(), a[i]->id);
|
||||
if (iter == ids.end()) dif.push_back(a[i]);
|
||||
}
|
||||
|
||||
return dif;
|
||||
}
|
||||
|
||||
cv::Mat embedding_distance(const TrajectoryPool &a, const TrajectoryPool &b) {
|
||||
cv::Mat dists(a.size(), b.size(), CV_32F);
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
float *distsi = dists.ptr<float>(i);
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
cv::Mat u = a[i].smooth_embedding;
|
||||
cv::Mat v = b[j].smooth_embedding;
|
||||
double uv = u.dot(v);
|
||||
double uu = u.dot(u);
|
||||
double vv = v.dot(v);
|
||||
double dist = std::abs(1. - uv / std::sqrt(uu * vv));
|
||||
// double dist = cv::norm(a[i].smooth_embedding, b[j].smooth_embedding,
|
||||
// cv::NORM_L2);
|
||||
distsi[j] = static_cast<float>(std::max(std::min(dist, 2.), 0.));
|
||||
}
|
||||
}
|
||||
return dists;
|
||||
}
|
||||
|
||||
cv::Mat embedding_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPtrPool &b) {
|
||||
cv::Mat dists(a.size(), b.size(), CV_32F);
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
float *distsi = dists.ptr<float>(i);
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
// double dist = cv::norm(a[i]->smooth_embedding, b[j]->smooth_embedding,
|
||||
// cv::NORM_L2);
|
||||
// distsi[j] = static_cast<float>(dist);
|
||||
cv::Mat u = a[i]->smooth_embedding;
|
||||
cv::Mat v = b[j]->smooth_embedding;
|
||||
double uv = u.dot(v);
|
||||
double uu = u.dot(u);
|
||||
double vv = v.dot(v);
|
||||
double dist = std::abs(1. - uv / std::sqrt(uu * vv));
|
||||
distsi[j] = static_cast<float>(std::max(std::min(dist, 2.), 0.));
|
||||
}
|
||||
}
|
||||
|
||||
return dists;
|
||||
}
|
||||
|
||||
cv::Mat embedding_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPool &b) {
|
||||
cv::Mat dists(a.size(), b.size(), CV_32F);
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
float *distsi = dists.ptr<float>(i);
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
// double dist = cv::norm(a[i]->smooth_embedding, b[j].smooth_embedding,
|
||||
// cv::NORM_L2);
|
||||
// distsi[j] = static_cast<float>(dist);
|
||||
cv::Mat u = a[i]->smooth_embedding;
|
||||
cv::Mat v = b[j].smooth_embedding;
|
||||
double uv = u.dot(v);
|
||||
double uu = u.dot(u);
|
||||
double vv = v.dot(v);
|
||||
double dist = std::abs(1. - uv / std::sqrt(uu * vv));
|
||||
distsi[j] = static_cast<float>(std::max(std::min(dist, 2.), 0.));
|
||||
}
|
||||
}
|
||||
|
||||
return dists;
|
||||
}
|
||||
|
||||
cv::Mat mahalanobis_distance(const TrajectoryPool &a, const TrajectoryPool &b) {
|
||||
std::vector<cv::Mat> means(a.size());
|
||||
std::vector<cv::Mat> icovariances(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
cv::Mat covariance;
|
||||
a[i].project(&means[i], &covariance);
|
||||
cv::invert(covariance, icovariances[i]);
|
||||
}
|
||||
|
||||
cv::Mat dists(a.size(), b.size(), CV_32F);
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
float *distsi = dists.ptr<float>(i);
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
const cv::Mat x(b[j].xyah);
|
||||
float dist =
|
||||
static_cast<float>(cv::Mahalanobis(x, means[i], icovariances[i]));
|
||||
distsi[j] = dist * dist;
|
||||
}
|
||||
}
|
||||
|
||||
return dists;
|
||||
}
|
||||
|
||||
cv::Mat mahalanobis_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPtrPool &b) {
|
||||
std::vector<cv::Mat> means(a.size());
|
||||
std::vector<cv::Mat> icovariances(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
cv::Mat covariance;
|
||||
a[i]->project(&means[i], &covariance);
|
||||
cv::invert(covariance, icovariances[i]);
|
||||
}
|
||||
|
||||
cv::Mat dists(a.size(), b.size(), CV_32F);
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
float *distsi = dists.ptr<float>(i);
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
const cv::Mat x(b[j]->xyah);
|
||||
float dist =
|
||||
static_cast<float>(cv::Mahalanobis(x, means[i], icovariances[i]));
|
||||
distsi[j] = dist * dist;
|
||||
}
|
||||
}
|
||||
|
||||
return dists;
|
||||
}
|
||||
|
||||
cv::Mat mahalanobis_distance(const TrajectoryPtrPool &a,
|
||||
const TrajectoryPool &b) {
|
||||
std::vector<cv::Mat> means(a.size());
|
||||
std::vector<cv::Mat> icovariances(a.size());
|
||||
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
cv::Mat covariance;
|
||||
a[i]->project(&means[i], &covariance);
|
||||
cv::invert(covariance, icovariances[i]);
|
||||
}
|
||||
|
||||
cv::Mat dists(a.size(), b.size(), CV_32F);
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
float *distsi = dists.ptr<float>(i);
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
const cv::Mat x(b[j].xyah);
|
||||
float dist =
|
||||
static_cast<float>(cv::Mahalanobis(x, means[i], icovariances[i]));
|
||||
distsi[j] = dist * dist;
|
||||
}
|
||||
}
|
||||
|
||||
return dists;
|
||||
}
|
||||
|
||||
static inline float calc_inter_area(const cv::Vec4f &a, const cv::Vec4f &b) {
|
||||
if (a[2] < b[0] || a[0] > b[2] || a[3] < b[1] || a[1] > b[3]) return 0.f;
|
||||
|
||||
float w = std::min(a[2], b[2]) - std::max(a[0], b[0]);
|
||||
float h = std::min(a[3], b[3]) - std::max(a[1], b[1]);
|
||||
return w * h;
|
||||
}
|
||||
|
||||
cv::Mat iou_distance(const TrajectoryPool &a, const TrajectoryPool &b) {
|
||||
std::vector<float> areaa(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
float w = a[i].ltrb[2] - a[i].ltrb[0];
|
||||
float h = a[i].ltrb[3] - a[i].ltrb[1];
|
||||
areaa[i] = w * h;
|
||||
}
|
||||
|
||||
std::vector<float> areab(b.size());
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
float w = b[j].ltrb[2] - b[j].ltrb[0];
|
||||
float h = b[j].ltrb[3] - b[j].ltrb[1];
|
||||
areab[j] = w * h;
|
||||
}
|
||||
|
||||
cv::Mat dists(a.size(), b.size(), CV_32F);
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
const cv::Vec4f &boxa = a[i].ltrb;
|
||||
float *distsi = dists.ptr<float>(i);
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
const cv::Vec4f &boxb = b[j].ltrb;
|
||||
float inters = calc_inter_area(boxa, boxb);
|
||||
distsi[j] = 1.f - inters / (areaa[i] + areab[j] - inters);
|
||||
}
|
||||
}
|
||||
|
||||
return dists;
|
||||
}
|
||||
|
||||
cv::Mat iou_distance(const TrajectoryPtrPool &a, const TrajectoryPtrPool &b) {
|
||||
std::vector<float> areaa(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
float w = a[i]->ltrb[2] - a[i]->ltrb[0];
|
||||
float h = a[i]->ltrb[3] - a[i]->ltrb[1];
|
||||
areaa[i] = w * h;
|
||||
}
|
||||
|
||||
std::vector<float> areab(b.size());
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
float w = b[j]->ltrb[2] - b[j]->ltrb[0];
|
||||
float h = b[j]->ltrb[3] - b[j]->ltrb[1];
|
||||
areab[j] = w * h;
|
||||
}
|
||||
|
||||
cv::Mat dists(a.size(), b.size(), CV_32F);
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
const cv::Vec4f &boxa = a[i]->ltrb;
|
||||
float *distsi = dists.ptr<float>(i);
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
const cv::Vec4f &boxb = b[j]->ltrb;
|
||||
float inters = calc_inter_area(boxa, boxb);
|
||||
distsi[j] = 1.f - inters / (areaa[i] + areab[j] - inters);
|
||||
}
|
||||
}
|
||||
|
||||
return dists;
|
||||
}
|
||||
|
||||
cv::Mat iou_distance(const TrajectoryPtrPool &a, const TrajectoryPool &b) {
|
||||
std::vector<float> areaa(a.size());
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
float w = a[i]->ltrb[2] - a[i]->ltrb[0];
|
||||
float h = a[i]->ltrb[3] - a[i]->ltrb[1];
|
||||
areaa[i] = w * h;
|
||||
}
|
||||
|
||||
std::vector<float> areab(b.size());
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
float w = b[j].ltrb[2] - b[j].ltrb[0];
|
||||
float h = b[j].ltrb[3] - b[j].ltrb[1];
|
||||
areab[j] = w * h;
|
||||
}
|
||||
|
||||
cv::Mat dists(a.size(), b.size(), CV_32F);
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
const cv::Vec4f &boxa = a[i]->ltrb;
|
||||
float *distsi = dists.ptr<float>(i);
|
||||
for (size_t j = 0; j < b.size(); ++j) {
|
||||
const cv::Vec4f &boxb = b[j].ltrb;
|
||||
float inters = calc_inter_area(boxa, boxb);
|
||||
distsi[j] = 1.f - inters / (areaa[i] + areab[j] - inters);
|
||||
}
|
||||
}
|
||||
|
||||
return dists;
|
||||
}
|
||||
|
||||
} // namespace PaddleDetection
|
||||
Reference in New Issue
Block a user