Skip to content

Commit 26f821b

Browse files
authored
[Add] Add EvalHook inherited from MMCV EvalHook (#90)
* upgrade eval * fix lint * fix lint * fix lint * add a unit test: test eval hook * add unit test * fix unit test * fix unit test: remove the requirement for cuda * use kwargs to receive EvalHook args * remove useless comments * create the folder if it does not exist * add new metric * fix some bugs * fix unit test * remove joint_error metric * fix unit test * fix pck thresholds * fix import error * fix import error * remove unused paramter * add more unit test * add unit test * rename p-mpjpe to pa-mpjpe * fix unit test * remove `mpjpe` in `__all__` * fix comments * add more unit tests * fix * rename * fix docsting * fix typo * update `getting_started.md` * fix docstring * add evaluation config * fix unit test * use mmhuman3d greater/less key
1 parent d2052c3 commit 26f821b

22 files changed

+1369
-365
lines changed

configs/hmr/resnet50_hmr_pw3d.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
_base_ = ['../_base_/default_runtime.py']
22
use_adversarial_train = True
33

4+
# evaluate
5+
evaluation = dict(metric=['pa-mpjpe', 'mpjpe'])
46
# optimizer
57
optimizer = dict(
68
backbone=dict(type='Adam', lr=2.5e-4),

configs/hybrik/resnet34_hybrik_mixed.py

+13-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
_base_ = ['../_base_/default_runtime.py']
22

3+
# evaluate
4+
evaluation = dict(metric=['pa-mpjpe', 'mpjpe'])
35
# optimizer
46
optimizer = dict(type='Adam', lr=1e-3, weight_decay=0)
57
optimizer_config = dict(grad_clip=None)
@@ -11,7 +13,7 @@
1113
interval=50,
1214
hooks=[
1315
dict(type='TextLoggerHook'),
14-
# dict(type='TensorboardLoggerHook')
16+
# dict(type='TensorboardLoggerHook')
1517
])
1618

1719
img_res = 256
@@ -166,7 +168,16 @@
166168
partition=[0.4, 0.1, 0.5]),
167169
test=dict(
168170
type=dataset_type,
171+
body_model=dict(
172+
type='GenderedSMPL', model_path='data/body_models/smpl'),
169173
dataset_name='pw3d',
170174
data_prefix='data',
171175
pipeline=test_pipeline,
172-
ann_file='hybrik_pw3d_test.npz'))
176+
ann_file='hybrik_pw3d_test.npz'),
177+
val=dict(
178+
type=dataset_type,
179+
dataset_name='pw3d',
180+
data_prefix='data',
181+
pipeline=test_pipeline,
182+
ann_file='hybrik_pw3d_test.npz'),
183+
)

configs/spin/resnet50_spin_pw3d.py

+3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
_base_ = ['../_base_/default_runtime.py']
22
use_adversarial_train = True
33

4+
# evaluate
5+
evaluation = dict(metric=['pa-mpjpe', 'mpjpe'])
6+
47
img_res = 224
58

69
body_model = dict(

configs/vibe/resnet50_vibe_pw3d.py

+3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
_base_ = ['../_base_/default_runtime.py']
22
use_adversarial_train = True
33

4+
# evaluate
5+
evaluation = dict(metric=['pa-mpjpe', 'mpjpe'])
6+
47
# optimizer
58
optimizer = dict(
69
neck=dict(type='Adam', lr=2.5e-4), head=dict(type='Adam', lr=2.5e-4))

docs/getting_started.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -110,23 +110,23 @@ We provide pretrained models in the respective method folders in [config](https:
110110
### Evaluate with a single GPU / multiple GPUs
111111

112112
```shell
113-
python tools/test.py ${CONFIG} --work-dir=${WORK_DIR} ${CHECKPOINT}
113+
python tools/test.py ${CONFIG} --work-dir=${WORK_DIR} ${CHECKPOINT} --metrics=${METRICS}
114114
```
115115
Example:
116116
```shell
117-
python tools/test.py configs/hmr/resnet50_hmr_pw3d.py --work-dir=work_dirs/hmr work_dirs/hmr/latest.pth
117+
python tools/test.py configs/hmr/resnet50_hmr_pw3d.py --work-dir=work_dirs/hmr work_dirs/hmr/latest.pth --metrics pa-mpjpe mpjpe
118118
```
119119

120120
### Evaluate with slurm
121121

122122
If you can run MMHuman3D on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_test.sh`.
123123

124124
```shell
125-
./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG} ${WORK_DIR} ${CHECKPOINT}
125+
./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG} ${WORK_DIR} ${CHECKPOINT} --metrics ${METRICS}
126126
```
127127
Example:
128128
```shell
129-
./tools/slurm_test.sh my_partition test_hmr configs/hmr/resnet50_hmr_pw3d.py work_dirs/hmr work_dirs/hmr/latest.pth 8
129+
./tools/slurm_test.sh my_partition test_hmr configs/hmr/resnet50_hmr_pw3d.py work_dirs/hmr work_dirs/hmr/latest.pth 8 --metrics pa-mpjpe mpjpe
130130
```
131131

132132

mmhuman3d/apis/test.py

+1-41
Original file line numberDiff line numberDiff line change
@@ -5,18 +5,12 @@
55
import time
66

77
import mmcv
8-
import numpy as np
98
import torch
109
import torch.distributed as dist
11-
from mmcv.image import tensor2imgs
1210
from mmcv.runner import get_dist_info
1311

1412

15-
def single_gpu_test(model,
16-
data_loader,
17-
show=False,
18-
out_dir=None,
19-
**show_kwargs):
13+
def single_gpu_test(model, data_loader):
2014
"""Test with single gpu."""
2115
model.eval()
2216
results = []
@@ -32,40 +26,6 @@ def single_gpu_test(model,
3226
else:
3327
results.append(result)
3428

35-
if show or out_dir:
36-
scores = np.vstack(result)
37-
pred_score = np.max(scores, axis=1)
38-
pred_label = np.argmax(scores, axis=1)
39-
pred_class = [model.CLASSES[lb] for lb in pred_label]
40-
41-
img_metas = data['img_metas'].data[0]
42-
imgs = tensor2imgs(data['img'], **img_metas[0]['img_norm_cfg'])
43-
assert len(imgs) == len(img_metas)
44-
45-
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
46-
h, w, _ = img_meta['img_shape']
47-
img_show = img[:h, :w, :]
48-
49-
ori_h, ori_w = img_meta['ori_shape'][:-1]
50-
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
51-
52-
if out_dir:
53-
out_file = osp.join(out_dir, img_meta['ori_filename'])
54-
else:
55-
out_file = None
56-
57-
result_show = {
58-
'pred_score': pred_score[i],
59-
'pred_label': pred_label[i],
60-
'pred_class': pred_class[i]
61-
}
62-
model.module.show_result(
63-
img_show,
64-
result_show,
65-
show=show,
66-
out_file=out_file,
67-
**show_kwargs)
68-
6929
if 'img' in data.keys():
7030
batch_size = data['img'].size(0)
7131
else:

mmhuman3d/apis/train.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@
1010
OptimizerHook,
1111
build_runner,
1212
)
13-
from mmcv.runner.hooks import DistEvalHook, EvalHook
1413

1514
from mmhuman3d.core.distributed_wrapper import DistributedDataParallelWrapper
15+
from mmhuman3d.core.evaluation import DistEvalHook, EvalHook
1616
from mmhuman3d.core.optimizer import build_optimizers
1717
from mmhuman3d.data.datasets import build_dataloader, build_dataset
1818
from mmhuman3d.utils import get_root_logger
@@ -156,7 +156,6 @@ def train_model(model,
156156
round_up=True)
157157
eval_cfg = cfg.get('evaluation', {})
158158
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
159-
eval_cfg['work_dir'] = cfg.work_dir
160159
eval_hook = DistEvalHook if distributed else EvalHook
161160
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
162161

mmhuman3d/core/evaluation/__init__.py

+12-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,16 @@
1-
from mmhuman3d.core.evaluation import mesh_eval, mpjpe
1+
from mmhuman3d.core.evaluation import mesh_eval
2+
from mmhuman3d.core.evaluation.eval_hooks import DistEvalHook, EvalHook
3+
from mmhuman3d.core.evaluation.eval_utils import (
4+
keypoint_3d_auc,
5+
keypoint_3d_pck,
6+
keypoint_accel_error,
7+
keypoint_mpjpe,
8+
vertice_pve,
9+
)
210
from mmhuman3d.core.evaluation.mesh_eval import compute_similarity_transform
3-
from mmhuman3d.core.evaluation.mpjpe import keypoint_mpjpe
411

512
__all__ = [
6-
'compute_similarity_transform', 'keypoint_mpjpe', 'mesh_eval', 'mpjpe'
13+
'compute_similarity_transform', 'keypoint_mpjpe', 'mesh_eval',
14+
'DistEvalHook', 'EvalHook', 'vertice_pve', 'keypoint_3d_pck',
15+
'keypoint_3d_auc', 'keypoint_accel_error'
716
]
+139
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
import tempfile
3+
import warnings
4+
5+
from mmcv.runner import DistEvalHook as BaseDistEvalHook
6+
from mmcv.runner import EvalHook as BaseEvalHook
7+
8+
MMHUMAN3D_GREATER_KEYS = ['3dpck', 'pa-3dpck', '3dauc', 'pa-3dauc']
9+
MMHUMAN3D_LESS_KEYS = ['mpjpe', 'pa-mpjpe', 'pve']
10+
11+
12+
class EvalHook(BaseEvalHook):
13+
14+
def __init__(self,
15+
dataloader,
16+
start=None,
17+
interval=1,
18+
by_epoch=True,
19+
save_best=None,
20+
rule=None,
21+
test_fn=None,
22+
greater_keys=MMHUMAN3D_GREATER_KEYS,
23+
less_keys=MMHUMAN3D_LESS_KEYS,
24+
**eval_kwargs):
25+
if test_fn is None:
26+
from mmhuman3d.apis import single_gpu_test
27+
test_fn = single_gpu_test
28+
29+
# remove "gpu_collect" from eval_kwargs
30+
if 'gpu_collect' in eval_kwargs:
31+
warnings.warn(
32+
'"gpu_collect" will be deprecated in EvalHook.'
33+
'Please remove it from the config.', DeprecationWarning)
34+
_ = eval_kwargs.pop('gpu_collect')
35+
36+
# update "save_best" according to "key_indicator" and remove the
37+
# latter from eval_kwargs
38+
if 'key_indicator' in eval_kwargs or isinstance(save_best, bool):
39+
warnings.warn(
40+
'"key_indicator" will be deprecated in EvalHook.'
41+
'Please use "save_best" to specify the metric key,'
42+
'e.g., save_best="pa-mpjpe".', DeprecationWarning)
43+
44+
key_indicator = eval_kwargs.pop('key_indicator', None)
45+
if save_best is True and key_indicator is None:
46+
raise ValueError('key_indicator should not be None, when '
47+
'save_best is set to True.')
48+
save_best = key_indicator
49+
50+
super().__init__(dataloader, start, interval, by_epoch, save_best,
51+
rule, test_fn, greater_keys, less_keys, **eval_kwargs)
52+
53+
def evaluate(self, runner, results):
54+
55+
with tempfile.TemporaryDirectory() as tmp_dir:
56+
eval_res = self.dataloader.dataset.evaluate(
57+
results,
58+
res_folder=tmp_dir,
59+
logger=runner.logger,
60+
**self.eval_kwargs)
61+
62+
for name, val in eval_res.items():
63+
runner.log_buffer.output[name] = val
64+
runner.log_buffer.ready = True
65+
66+
if self.save_best is not None:
67+
if self.key_indicator == 'auto':
68+
self._init_rule(self.rule, list(eval_res.keys())[0])
69+
70+
return eval_res[self.key_indicator]
71+
72+
return None
73+
74+
75+
class DistEvalHook(BaseDistEvalHook):
76+
77+
def __init__(self,
78+
dataloader,
79+
start=None,
80+
interval=1,
81+
by_epoch=True,
82+
save_best=None,
83+
rule=None,
84+
test_fn=None,
85+
greater_keys=MMHUMAN3D_GREATER_KEYS,
86+
less_keys=MMHUMAN3D_LESS_KEYS,
87+
broadcast_bn_buffer=True,
88+
tmpdir=None,
89+
gpu_collect=False,
90+
**eval_kwargs):
91+
92+
if test_fn is None:
93+
from mmhuman3d.apis import multi_gpu_test
94+
test_fn = multi_gpu_test
95+
96+
# update "save_best" according to "key_indicator" and remove the
97+
# latter from eval_kwargs
98+
if 'key_indicator' in eval_kwargs or isinstance(save_best, bool):
99+
warnings.warn(
100+
'"key_indicator" will be deprecated in EvalHook.'
101+
'Please use "save_best" to specify the metric key,'
102+
'e.g., save_best="pa-mpjpe".', DeprecationWarning)
103+
104+
key_indicator = eval_kwargs.pop('key_indicator', None)
105+
if save_best is True and key_indicator is None:
106+
raise ValueError('key_indicator should not be None, when '
107+
'save_best is set to True.')
108+
save_best = key_indicator
109+
110+
super().__init__(dataloader, start, interval, by_epoch, save_best,
111+
rule, test_fn, greater_keys, less_keys,
112+
broadcast_bn_buffer, tmpdir, gpu_collect,
113+
**eval_kwargs)
114+
115+
def evaluate(self, runner, results):
116+
"""Evaluate the results.
117+
118+
Args:
119+
runner (:obj:`mmcv.Runner`): The underlined training runner.
120+
results (list): Output results.
121+
"""
122+
with tempfile.TemporaryDirectory() as tmp_dir:
123+
eval_res = self.dataloader.dataset.evaluate(
124+
results,
125+
res_folder=tmp_dir,
126+
logger=runner.logger,
127+
**self.eval_kwargs)
128+
129+
for name, val in eval_res.items():
130+
runner.log_buffer.output[name] = val
131+
runner.log_buffer.ready = True
132+
133+
if self.save_best is not None:
134+
if self.key_indicator == 'auto':
135+
# infer from eval_results
136+
self._init_rule(self.rule, list(eval_res.keys())[0])
137+
return eval_res[self.key_indicator]
138+
139+
return None

0 commit comments

Comments
 (0)