更换文档检测模型

This commit is contained in:
2024-08-27 14:42:45 +08:00
parent aea6f19951
commit 1514e09c40
2072 changed files with 254336 additions and 4967 deletions

View File

@@ -0,0 +1,139 @@
use_gpu: true
log_iter: 10
save_dir: output
snapshot_epoch: 10
weights: output/higherhrnet_hrnet_w32_512/model_final
epoch: 300
num_joints: &num_joints 17
flip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
input_size: &input_size 512
hm_size: &hm_size 128
hm_size_2x: &hm_size_2x 256
max_people: &max_people 30
metric: COCO
IouType: keypoints
num_classes: 1
#####model
architecture: HigherHRNet
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Trunc_HRNet_W32_C_pretrained.pdparams
HigherHRNet:
backbone: HRNet
hrhrnet_head: HrHRNetHead
post_process: HrHRNetPostProcess
flip_perm: *flip_perm
eval_flip: true
HRNet:
width: &width 32
freeze_at: -1
freeze_norm: false
return_idx: [0]
HrHRNetHead:
num_joints: *num_joints
width: *width
loss: HrHRNetLoss
swahr: false
HrHRNetLoss:
num_joints: *num_joints
swahr: false
#####optimizer
LearningRate:
base_lr: 0.001
schedulers:
- !PiecewiseDecay
milestones: [200, 260]
gamma: 0.1
- !LinearWarmup
start_factor: 0.001
steps: 1000
OptimizerBuilder:
optimizer:
type: Adam
regularizer: None
#####data
TrainDataset:
!KeypointBottomUpCocoDataset
image_dir: train2017
anno_path: annotations/person_keypoints_train2017.json
dataset_dir: dataset/coco
num_joints: *num_joints
return_bbox: False
return_area: False
return_class: False
EvalDataset:
!KeypointBottomUpCocoDataset
image_dir: val2017
anno_path: annotations/person_keypoints_val2017.json
dataset_dir: dataset/coco
num_joints: *num_joints
test_mode: true
return_bbox: False
return_area: False
return_class: False
TestDataset:
!ImageFolder
anno_path: dataset/coco/keypoint_imagelist.txt
worker_num: 8
global_mean: &global_mean [0.485, 0.456, 0.406]
global_std: &global_std [0.229, 0.224, 0.225]
TrainReader:
sample_transforms:
- RandomAffine:
max_degree: 30
scale: [0.75, 1.5]
max_shift: 0.2
trainsize: [*input_size, *input_size]
hmsize: [*hm_size, *hm_size_2x]
- KeyPointFlip:
flip_prob: 0.5
flip_permutation: *flip_perm
hmsize: [*hm_size, *hm_size_2x]
- ToHeatmaps:
num_joints: *num_joints
hmsize: [*hm_size, *hm_size_2x]
sigma: 2
- TagGenerate:
num_joints: *num_joints
max_people: *max_people
- NormalizePermute:
mean: *global_mean
std: *global_std
batch_size: 20
shuffle: true
drop_last: true
use_shared_memory: true
EvalReader:
sample_transforms:
- EvalAffine:
size: *input_size
- NormalizeImage:
mean: *global_mean
std: *global_std
is_scale: true
- Permute: {}
batch_size: 1
TestReader:
sample_transforms:
- Decode: {}
- EvalAffine:
size: *input_size
- NormalizeImage:
mean: *global_mean
std: *global_std
is_scale: true
- Permute: {}
batch_size: 1

View File

@@ -0,0 +1,140 @@
use_gpu: true
log_iter: 10
save_dir: output
snapshot_epoch: 10
weights: output/higherhrnet_hrnet_w32_512_swahr/model_final
epoch: 300
num_joints: &num_joints 17
flip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
input_size: &input_size 512
hm_size: &hm_size 128
hm_size_2x: &hm_size_2x 256
max_people: &max_people 30
metric: COCO
IouType: keypoints
num_classes: 1
#####model
architecture: HigherHRNet
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Trunc_HRNet_W32_C_pretrained.pdparams
HigherHRNet:
backbone: HRNet
hrhrnet_head: HrHRNetHead
post_process: HrHRNetPostProcess
flip_perm: *flip_perm
eval_flip: true
HRNet:
width: &width 32
freeze_at: -1
freeze_norm: false
return_idx: [0]
HrHRNetHead:
num_joints: *num_joints
width: *width
loss: HrHRNetLoss
swahr: true
HrHRNetLoss:
num_joints: *num_joints
swahr: true
#####optimizer
LearningRate:
base_lr: 0.001
schedulers:
- !PiecewiseDecay
milestones: [200, 260]
gamma: 0.1
- !LinearWarmup
start_factor: 0.001
steps: 1000
OptimizerBuilder:
optimizer:
type: Adam
regularizer: None
#####data
TrainDataset:
!KeypointBottomUpCocoDataset
image_dir: train2017
anno_path: annotations/person_keypoints_train2017.json
dataset_dir: dataset/coco
num_joints: *num_joints
return_bbox: False
return_area: False
return_class: False
EvalDataset:
!KeypointBottomUpCocoDataset
image_dir: val2017
anno_path: annotations/person_keypoints_val2017.json
dataset_dir: dataset/coco
num_joints: *num_joints
test_mode: true
return_bbox: False
return_area: False
return_class: False
TestDataset:
!ImageFolder
anno_path: dataset/coco/keypoint_imagelist.txt
worker_num: 8
global_mean: &global_mean [0.485, 0.456, 0.406]
global_std: &global_std [0.229, 0.224, 0.225]
TrainReader:
sample_transforms:
- RandomAffine:
max_degree: 30
scale: [0.75, 1.5]
max_shift: 0.2
trainsize: [*input_size, *input_size]
hmsize: [*hm_size, *hm_size_2x]
- KeyPointFlip:
flip_prob: 0.5
flip_permutation: *flip_perm
hmsize: [*hm_size, *hm_size_2x]
- ToHeatmaps:
num_joints: *num_joints
hmsize: [*hm_size, *hm_size_2x]
sigma: 2
- TagGenerate:
num_joints: *num_joints
max_people: *max_people
- NormalizePermute:
mean: *global_mean
std: *global_std
batch_size: 16
shuffle: true
drop_last: true
use_shared_memory: true
EvalReader:
sample_transforms:
- EvalAffine:
size: *input_size
- NormalizeImage:
mean: *global_mean
std: *global_std
is_scale: true
- Permute: {}
batch_size: 1
TestReader:
sample_transforms:
- Decode: {}
- EvalAffine:
size: *input_size
- NormalizeImage:
mean: *global_mean
std: *global_std
is_scale: true
- Permute: {}
batch_size: 1

View File

@@ -0,0 +1,139 @@
use_gpu: true
log_iter: 10
save_dir: output
snapshot_epoch: 10
weights: output/higherhrnet_hrnet_w32_640/model_final
epoch: 300
num_joints: &num_joints 17
flip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
input_size: &input_size 640
hm_size: &hm_size 160
hm_size_2x: &hm_size_2x 320
max_people: &max_people 30
metric: COCO
IouType: keypoints
num_classes: 1
#####model
architecture: HigherHRNet
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Trunc_HRNet_W32_C_pretrained.pdparams
HigherHRNet:
backbone: HRNet
hrhrnet_head: HrHRNetHead
post_process: HrHRNetPostProcess
flip_perm: *flip_perm
eval_flip: true
HRNet:
width: &width 32
freeze_at: -1
freeze_norm: false
return_idx: [0]
HrHRNetHead:
num_joints: *num_joints
width: *width
loss: HrHRNetLoss
swahr: false
HrHRNetLoss:
num_joints: *num_joints
swahr: false
#####optimizer
LearningRate:
base_lr: 0.001
schedulers:
- !PiecewiseDecay
milestones: [200, 260]
gamma: 0.1
- !LinearWarmup
start_factor: 0.001
steps: 1000
OptimizerBuilder:
optimizer:
type: Adam
regularizer: None
#####data
TrainDataset:
!KeypointBottomUpCocoDataset
image_dir: train2017
anno_path: annotations/person_keypoints_train2017.json
dataset_dir: dataset/coco
num_joints: *num_joints
return_bbox: False
return_area: False
return_class: False
EvalDataset:
!KeypointBottomUpCocoDataset
image_dir: val2017
anno_path: annotations/person_keypoints_val2017.json
dataset_dir: dataset/coco
num_joints: *num_joints
test_mode: true
return_bbox: False
return_area: False
return_class: False
TestDataset:
!ImageFolder
anno_path: dataset/coco/keypoint_imagelist.txt
worker_num: 8
global_mean: &global_mean [0.485, 0.456, 0.406]
global_std: &global_std [0.229, 0.224, 0.225]
TrainReader:
sample_transforms:
- RandomAffine:
max_degree: 30
scale: [0.75, 1.5]
max_shift: 0.2
trainsize: [*input_size, *input_size]
hmsize: [*hm_size, *hm_size_2x]
- KeyPointFlip:
flip_prob: 0.5
flip_permutation: *flip_perm
hmsize: [*hm_size, *hm_size_2x]
- ToHeatmaps:
num_joints: *num_joints
hmsize: [*hm_size, *hm_size_2x]
sigma: 2
- TagGenerate:
num_joints: *num_joints
max_people: *max_people
- NormalizePermute:
mean: *global_mean
std: *global_std
batch_size: 20
shuffle: true
drop_last: true
use_shared_memory: true
EvalReader:
sample_transforms:
- EvalAffine:
size: *input_size
- NormalizeImage:
mean: *global_mean
std: *global_std
is_scale: true
- Permute: {}
batch_size: 1
TestReader:
sample_transforms:
- Decode: {}
- EvalAffine:
size: *input_size
- NormalizeImage:
mean: *global_mean
std: *global_std
is_scale: true
- Permute: {}
batch_size: 1