更换文档检测模型
This commit is contained in:
47
paddle_detection/benchmark/README.md
Normal file
47
paddle_detection/benchmark/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# 通用检测benchmark测试脚本说明
|
||||
|
||||
```
|
||||
├── benchmark
|
||||
│ ├── analysis_log.py
|
||||
│ ├── prepare.sh
|
||||
│ ├── README.md
|
||||
│ ├── run_all.sh
|
||||
│ ├── run_benchmark.sh
|
||||
```
|
||||
|
||||
## 脚本说明
|
||||
|
||||
### prepare.sh
|
||||
相关数据准备脚本,完成数据、模型的自动下载
|
||||
### run_all.sh
|
||||
主要运行脚本,可完成所有相关模型的测试方案
|
||||
### run_benchmark.sh
|
||||
单模型运行脚本,可完成指定模型的测试方案
|
||||
|
||||
## Docker 运行环境
|
||||
* docker image: registry.baidubce.com/paddlepaddle/paddle:2.1.2-gpu-cuda10.2-cudnn7
|
||||
* paddle = 2.1.2
|
||||
* python = 3.7
|
||||
|
||||
## 运行benchmark测试
|
||||
|
||||
### 运行所有模型
|
||||
```
|
||||
git clone https://github.com/PaddlePaddle/PaddleDetection.git
|
||||
cd PaddleDetection
|
||||
bash benchmark/run_all.sh
|
||||
```
|
||||
|
||||
### 运行指定模型
|
||||
* Usage:bash run_benchmark.sh ${run_mode} ${batch_size} ${fp_item} ${max_epoch} ${model_name}
|
||||
* model_name: faster_rcnn, fcos, deformable_detr, gfl, hrnet, higherhrnet, solov2, jde, fairmot
|
||||
```
|
||||
git clone https://github.com/PaddlePaddle/PaddleDetection.git
|
||||
cd PaddleDetection
|
||||
bash benchmark/prepare.sh
|
||||
|
||||
# 单卡
|
||||
CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh sp 2 fp32 1 faster_rcnn
|
||||
# 多卡
|
||||
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash benchmark/run_benchmark.sh mp 2 fp32 1 faster_rcnn
|
||||
```
|
||||
@@ -0,0 +1,48 @@
|
||||
_BASE_: [
|
||||
'../../configs/datasets/coco_detection.yml',
|
||||
'../../configs/runtime.yml',
|
||||
'../../configs/faster_rcnn/_base_/optimizer_1x.yml',
|
||||
'../../configs/faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',
|
||||
]
|
||||
weights: output/faster_rcnn_r50_fpn_1x_coco/model_final
|
||||
|
||||
worker_num: 2
|
||||
TrainReader:
|
||||
sample_transforms:
|
||||
- Decode: {}
|
||||
- Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
|
||||
- RandomFlip: {prob: 0.5}
|
||||
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
|
||||
- Permute: {}
|
||||
batch_transforms:
|
||||
- PadBatch: {pad_to_stride: 32}
|
||||
batch_size: 1
|
||||
shuffle: true
|
||||
drop_last: true
|
||||
collate_batch: false
|
||||
|
||||
|
||||
EvalReader:
|
||||
sample_transforms:
|
||||
- Decode: {}
|
||||
- Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
|
||||
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
|
||||
- Permute: {}
|
||||
batch_transforms:
|
||||
- PadBatch: {pad_to_stride: 32}
|
||||
batch_size: 1
|
||||
shuffle: false
|
||||
drop_last: false
|
||||
|
||||
|
||||
TestReader:
|
||||
sample_transforms:
|
||||
- Decode: {}
|
||||
- Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
|
||||
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
|
||||
- Permute: {}
|
||||
batch_transforms:
|
||||
- PadBatch: {pad_to_stride: 32}
|
||||
batch_size: 1
|
||||
shuffle: false
|
||||
drop_last: false
|
||||
17
paddle_detection/benchmark/prepare.sh
Normal file
17
paddle_detection/benchmark/prepare.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
pip install -U pip Cython
|
||||
pip install -r requirements.txt
|
||||
|
||||
mv ./dataset/coco/download_coco.py . && rm -rf ./dataset/coco/* && mv ./download_coco.py ./dataset/coco/
|
||||
# prepare lite train data
|
||||
wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_benchmark.tar
|
||||
cd ./dataset/coco/ && tar -xvf coco_benchmark.tar && mv -u coco_benchmark/* .
|
||||
rm -rf coco_benchmark/
|
||||
|
||||
cd ../../
|
||||
rm -rf ./dataset/mot/*
|
||||
# prepare mot mini train data
|
||||
wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/mot_benchmark.tar
|
||||
cd ./dataset/mot/ && tar -xvf mot_benchmark.tar && mv -u mot_benchmark/* .
|
||||
rm -rf mot_benchmark/
|
||||
47
paddle_detection/benchmark/run_all.sh
Normal file
47
paddle_detection/benchmark/run_all.sh
Normal file
@@ -0,0 +1,47 @@
|
||||
# Use docker: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 paddle=2.1.2 python3.7
|
||||
#
|
||||
# Usage:
|
||||
# git clone https://github.com/PaddlePaddle/PaddleDetection.git
|
||||
# cd PaddleDetection
|
||||
# bash benchmark/run_all.sh
|
||||
log_path=${LOG_PATH_INDEX_DIR:-$(pwd)} # benchmark系统指定该参数,不需要跑profile时,log_path指向存speed的目录
|
||||
|
||||
# run prepare.sh
|
||||
bash benchmark/prepare.sh
|
||||
|
||||
model_name_list=(faster_rcnn fcos deformable_detr gfl hrnet higherhrnet solov2 jde fairmot)
|
||||
fp_item_list=(fp32)
|
||||
max_epoch=2
|
||||
|
||||
for model_item in ${model_name_list[@]}; do
|
||||
for fp_item in ${fp_item_list[@]}; do
|
||||
case ${model_item} in
|
||||
faster_rcnn) bs_list=(1 8) ;;
|
||||
fcos) bs_list=(2) ;;
|
||||
deformable_detr) bs_list=(2) ;;
|
||||
gfl) bs_list=(2) ;;
|
||||
hrnet) bs_list=(64) ;;
|
||||
higherhrnet) bs_list=(20) ;;
|
||||
solov2) bs_list=(2) ;;
|
||||
jde) bs_list=(4) ;;
|
||||
fairmot) bs_list=(6) ;;
|
||||
*) echo "wrong model_name"; exit 1;
|
||||
esac
|
||||
for bs_item in ${bs_list[@]}
|
||||
do
|
||||
run_mode=sp
|
||||
log_name=detection_${model_item}_bs${bs_item}_${fp_item} # 如:clas_MobileNetv1_mp_bs32_fp32_8
|
||||
echo "index is speed, 1gpus, begin, ${log_name}"
|
||||
CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} \
|
||||
${fp_item} ${max_epoch} ${model_item} | tee ${log_path}/${log_name}_speed_1gpus 2>&1
|
||||
sleep 60
|
||||
|
||||
run_mode=mp
|
||||
log_name=detection_${model_item}_bs${bs_item}_${fp_item} # 如:clas_MobileNetv1_mp_bs32_fp32_8
|
||||
echo "index is speed, 8gpus, run_mode is multi_process, begin, ${log_name}"
|
||||
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash benchmark/run_benchmark.sh ${run_mode} \
|
||||
${bs_item} ${fp_item} ${max_epoch} ${model_item}| tee ${log_path}/${log_name}_speed_8gpus8p 2>&1
|
||||
sleep 60
|
||||
done
|
||||
done
|
||||
done
|
||||
92
paddle_detection/benchmark/run_benchmark.sh
Normal file
92
paddle_detection/benchmark/run_benchmark.sh
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env bash
|
||||
set -xe
|
||||
# Usage:CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh ${run_mode} ${batch_size} ${fp_item} ${max_epoch} ${model_name}
|
||||
python="python3.7"
|
||||
# Parameter description
|
||||
function _set_params(){
|
||||
run_mode=${1:-"sp"} # sp|mp
|
||||
batch_size=${2:-"2"}
|
||||
fp_item=${3:-"fp32"} # fp32|fp16
|
||||
max_epoch=${4:-"1"}
|
||||
model_item=${5:-"model_item"}
|
||||
run_log_path=${TRAIN_LOG_DIR:-$(pwd)}
|
||||
# 添加日志解析需要的参数
|
||||
base_batch_size=${batch_size}
|
||||
mission_name="目标检测"
|
||||
direction_id="0"
|
||||
ips_unit="images/s"
|
||||
skip_steps=10 # 解析日志,有些模型前几个step耗时长,需要跳过 (必填)
|
||||
keyword="ips:" # 解析日志,筛选出数据所在行的关键字 (必填)
|
||||
index="1"
|
||||
model_name=${model_item}_bs${batch_size}_${fp_item}
|
||||
|
||||
device=${CUDA_VISIBLE_DEVICES//,/ }
|
||||
arr=(${device})
|
||||
num_gpu_devices=${#arr[*]}
|
||||
log_file=${run_log_path}/${model_item}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices}
|
||||
}
|
||||
function _train(){
|
||||
echo "Train on ${num_gpu_devices} GPUs"
|
||||
echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size"
|
||||
|
||||
# set runtime params
|
||||
set_optimizer_lr_sp=" "
|
||||
set_optimizer_lr_mp=" "
|
||||
# parse model_item
|
||||
case ${model_item} in
|
||||
faster_rcnn) model_yml="benchmark/configs/faster_rcnn_r50_fpn_1x_coco.yml"
|
||||
set_optimizer_lr_sp="LearningRate.base_lr=0.001" ;;
|
||||
fcos) model_yml="configs/fcos/fcos_r50_fpn_1x_coco.yml"
|
||||
set_optimizer_lr_sp="LearningRate.base_lr=0.001" ;;
|
||||
deformable_detr) model_yml="configs/deformable_detr/deformable_detr_r50_1x_coco.yml" ;;
|
||||
gfl) model_yml="configs/gfl/gfl_r50_fpn_1x_coco.yml"
|
||||
set_optimizer_lr_sp="LearningRate.base_lr=0.001" ;;
|
||||
hrnet) model_yml="configs/keypoint/hrnet/hrnet_w32_256x192.yml" ;;
|
||||
higherhrnet) model_yml="configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512.yml" ;;
|
||||
solov2) model_yml="configs/solov2/solov2_r50_fpn_1x_coco.yml" ;;
|
||||
jde) model_yml="configs/mot/jde/jde_darknet53_30e_1088x608.yml" ;;
|
||||
fairmot) model_yml="configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml" ;;
|
||||
*) echo "Undefined model_item"; exit 1;
|
||||
esac
|
||||
|
||||
set_batch_size="TrainReader.batch_size=${batch_size}"
|
||||
set_max_epoch="epoch=${max_epoch}"
|
||||
set_log_iter="log_iter=1"
|
||||
if [ ${fp_item} = "fp16" ]; then
|
||||
set_fp_item="--fp16"
|
||||
else
|
||||
set_fp_item=" "
|
||||
fi
|
||||
|
||||
case ${run_mode} in
|
||||
sp) train_cmd="${python} -u tools/train.py -c ${model_yml} ${set_fp_item} \
|
||||
-o ${set_batch_size} ${set_max_epoch} ${set_log_iter} ${set_optimizer_lr_sp}" ;;
|
||||
mp) rm -rf mylog
|
||||
train_cmd="${python} -m paddle.distributed.launch --log_dir=./mylog \
|
||||
--gpus=${CUDA_VISIBLE_DEVICES} tools/train.py -c ${model_yml} ${set_fp_item} \
|
||||
-o ${set_batch_size} ${set_max_epoch} ${set_log_iter} ${set_optimizer_lr_mp}"
|
||||
log_parse_file="mylog/workerlog.0" ;;
|
||||
*) echo "choose run_mode(sp or mp)"; exit 1;
|
||||
esac
|
||||
|
||||
timeout 15m ${train_cmd} > ${log_file} 2>&1
|
||||
if [ $? -ne 0 ];then
|
||||
echo -e "${train_cmd}, FAIL"
|
||||
export job_fail_flag=1
|
||||
else
|
||||
echo -e "${train_cmd}, SUCCESS"
|
||||
export job_fail_flag=0
|
||||
fi
|
||||
kill -9 `ps -ef|grep 'python'|awk '{print $2}'`
|
||||
|
||||
if [ $run_mode = "mp" -a -d mylog ]; then
|
||||
rm ${log_file}
|
||||
cp mylog/workerlog.0 ${log_file}
|
||||
fi
|
||||
}
|
||||
|
||||
source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在联调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开
|
||||
_set_params $@
|
||||
# _train # 如果只想产出训练log,不解析,可取消注释
|
||||
_run # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开
|
||||
|
||||
Reference in New Issue
Block a user