File size: 3,172 Bytes
d3dbf03 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
_base_ = 'mmaction::_base_/default_runtime.py'
custom_imports = dict(imports='models')
model = dict(
type='RecognizerGCN',
backbone=dict(
type='CTRGCN', graph_cfg=dict(layout='coco', mode='spatial')),
cls_head=dict(type='GCNHead', num_classes=60, in_channels=256))
dataset_type = 'PoseDataset'
ann_file = 'data/skeleton/ntu60_2d.pkl'
train_pipeline = [
dict(type='PreNormalize2D'),
dict(type='GenSkeFeat', dataset='coco', feats=['j']),
dict(type='UniformSampleFrames', clip_len=100),
dict(type='PoseDecode'),
dict(type='FormatGCNInput', num_person=2),
dict(type='PackActionInputs')
]
val_pipeline = [
dict(type='PreNormalize2D'),
dict(type='GenSkeFeat', dataset='coco', feats=['j']),
dict(
type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
dict(type='PoseDecode'),
dict(type='FormatGCNInput', num_person=2),
dict(type='PackActionInputs')
]
test_pipeline = [
dict(type='PreNormalize2D'),
dict(type='GenSkeFeat', dataset='coco', feats=['j']),
dict(
type='UniformSampleFrames', clip_len=100, num_clips=10,
test_mode=True),
dict(type='PoseDecode'),
dict(type='FormatGCNInput', num_person=2),
dict(type='PackActionInputs')
]
train_dataloader = dict(
batch_size=16,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=ann_file,
pipeline=train_pipeline,
split='xsub_train')))
val_dataloader = dict(
batch_size=16,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
ann_file=ann_file,
pipeline=val_pipeline,
split='xsub_val',
test_mode=True))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
ann_file=ann_file,
pipeline=test_pipeline,
split='xsub_val',
test_mode=True))
val_evaluator = [dict(type='AccMetric')]
test_evaluator = val_evaluator
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='CosineAnnealingLR',
eta_min=0,
T_max=16,
by_epoch=True,
convert_to_iter_based=True)
]
optim_wrapper = dict(
optimizer=dict(
type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=128)
|