|
{ |
|
"architecture": "resnet34", |
|
"num_classes": 0, |
|
"num_features": 512, |
|
"pretrained_cfg": { |
|
"tag": "a1_in1k", |
|
"custom_load": false, |
|
"input_size": [ |
|
3, |
|
224, |
|
224 |
|
], |
|
"test_input_size": [ |
|
3, |
|
288, |
|
288 |
|
], |
|
"fixed_input_size": false, |
|
"interpolation": "bicubic", |
|
"crop_pct": 0.95, |
|
"test_crop_pct": 1.0, |
|
"crop_mode": "center", |
|
"mean": [ |
|
0.485, |
|
0.456, |
|
0.406 |
|
], |
|
"std": [ |
|
0.229, |
|
0.224, |
|
0.225 |
|
], |
|
"num_classes": 1000, |
|
"pool_size": [ |
|
7, |
|
7 |
|
], |
|
"first_conv": "conv1", |
|
"classifier": "fc", |
|
"origin_url": "https://github.com/huggingface/pytorch-image-models", |
|
"paper_ids": "arXiv:2110.00476" |
|
}, |
|
"hflip": 0.0, |
|
"vflip": 0.0, |
|
"gaussblr": 0.15, |
|
"grayscale": 0.15, |
|
"scale": [ |
|
1.0, |
|
1.0 |
|
], |
|
"crop_pct": 0.875, |
|
"input_size": [ |
|
3, |
|
256, |
|
256 |
|
], |
|
"test_input_size": [ |
|
3, |
|
256, |
|
256 |
|
] |
|
} |