Raghavan commited on
Commit
a270951
·
1 Parent(s): b3891dc

Upload 3 files

Browse files
Files changed (3) hide show
  1. config.json +278 -0
  2. preprocessor_config.json +31 -0
  3. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "FASTForImageCaptioning"
4
+ ],
5
+ "backbone_act_func": "relu",
6
+ "backbone_bias": false,
7
+ "backbone_dilation": 1,
8
+ "backbone_dropout_rate": 0,
9
+ "backbone_groups": 1,
10
+ "backbone_has_shuffle": false,
11
+ "backbone_in_channels": 3,
12
+ "backbone_kernel_size": 3,
13
+ "backbone_ops_order": "weight_bn_act",
14
+ "backbone_out_channels": 64,
15
+ "backbone_stage1_dilation": [
16
+ 1,
17
+ 1,
18
+ 1
19
+ ],
20
+ "backbone_stage1_groups": [
21
+ 1,
22
+ 1,
23
+ 1
24
+ ],
25
+ "backbone_stage1_in_channels": [
26
+ 64,
27
+ 64,
28
+ 64
29
+ ],
30
+ "backbone_stage1_kernel_size": [
31
+ [
32
+ 3,
33
+ 3
34
+ ],
35
+ [
36
+ 3,
37
+ 3
38
+ ],
39
+ [
40
+ 3,
41
+ 3
42
+ ]
43
+ ],
44
+ "backbone_stage1_out_channels": [
45
+ 64,
46
+ 64,
47
+ 64
48
+ ],
49
+ "backbone_stage1_stride": [
50
+ 1,
51
+ 2,
52
+ 1
53
+ ],
54
+ "backbone_stage2_dilation": [
55
+ 1,
56
+ 1,
57
+ 1,
58
+ 1
59
+ ],
60
+ "backbone_stage2_groups": [
61
+ 1,
62
+ 1,
63
+ 1,
64
+ 1
65
+ ],
66
+ "backbone_stage2_in_channels": [
67
+ 64,
68
+ 128,
69
+ 128,
70
+ 128
71
+ ],
72
+ "backbone_stage2_kernel_size": [
73
+ [
74
+ 3,
75
+ 3
76
+ ],
77
+ [
78
+ 1,
79
+ 3
80
+ ],
81
+ [
82
+ 3,
83
+ 3
84
+ ],
85
+ [
86
+ 3,
87
+ 1
88
+ ]
89
+ ],
90
+ "backbone_stage2_out_channels": [
91
+ 128,
92
+ 128,
93
+ 128,
94
+ 128
95
+ ],
96
+ "backbone_stage2_stride": [
97
+ 2,
98
+ 1,
99
+ 1,
100
+ 1
101
+ ],
102
+ "backbone_stage3_dilation": [
103
+ 1,
104
+ 1,
105
+ 1,
106
+ 1
107
+ ],
108
+ "backbone_stage3_groups": [
109
+ 1,
110
+ 1,
111
+ 1,
112
+ 1
113
+ ],
114
+ "backbone_stage3_in_channels": [
115
+ 128,
116
+ 256,
117
+ 256,
118
+ 256
119
+ ],
120
+ "backbone_stage3_kernel_size": [
121
+ [
122
+ 3,
123
+ 3
124
+ ],
125
+ [
126
+ 3,
127
+ 3
128
+ ],
129
+ [
130
+ 3,
131
+ 1
132
+ ],
133
+ [
134
+ 1,
135
+ 3
136
+ ]
137
+ ],
138
+ "backbone_stage3_out_channels": [
139
+ 256,
140
+ 256,
141
+ 256,
142
+ 256
143
+ ],
144
+ "backbone_stage3_stride": [
145
+ 2,
146
+ 1,
147
+ 1,
148
+ 1
149
+ ],
150
+ "backbone_stage4_dilation": [
151
+ 1,
152
+ 1,
153
+ 1,
154
+ 1
155
+ ],
156
+ "backbone_stage4_groups": [
157
+ 1,
158
+ 1,
159
+ 1,
160
+ 1
161
+ ],
162
+ "backbone_stage4_in_channels": [
163
+ 256,
164
+ 512,
165
+ 512,
166
+ 512
167
+ ],
168
+ "backbone_stage4_kernel_size": [
169
+ [
170
+ 3,
171
+ 3
172
+ ],
173
+ [
174
+ 3,
175
+ 1
176
+ ],
177
+ [
178
+ 1,
179
+ 3
180
+ ],
181
+ [
182
+ 3,
183
+ 3
184
+ ]
185
+ ],
186
+ "backbone_stage4_out_channels": [
187
+ 512,
188
+ 512,
189
+ 512,
190
+ 512
191
+ ],
192
+ "backbone_stage4_stride": [
193
+ 2,
194
+ 1,
195
+ 1,
196
+ 1
197
+ ],
198
+ "backbone_stride": 2,
199
+ "backbone_use_bn": true,
200
+ "bbox_type": "rect",
201
+ "head_conv_dilation": 1,
202
+ "head_conv_groups": 1,
203
+ "head_conv_in_channels": 512,
204
+ "head_conv_kernel_size": [
205
+ 3,
206
+ 3
207
+ ],
208
+ "head_conv_out_channels": 128,
209
+ "head_conv_stride": 1,
210
+ "head_dropout_ratio": 0.1,
211
+ "head_final_act_func": null,
212
+ "head_final_bias": false,
213
+ "head_final_dilation": 1,
214
+ "head_final_dropout_rate": 0,
215
+ "head_final_groups": 1,
216
+ "head_final_has_shuffle": false,
217
+ "head_final_in_channels": 128,
218
+ "head_final_kernel_size": 1,
219
+ "head_final_ops_order": "weight",
220
+ "head_final_out_channels": 5,
221
+ "head_final_stride": 1,
222
+ "head_final_use_bn": false,
223
+ "head_pooling_size": 9,
224
+ "initializer_range": 0.02,
225
+ "loss_bg": false,
226
+ "min_area": 250,
227
+ "min_score": 0.88,
228
+ "neck_dilation": [
229
+ 1,
230
+ 1,
231
+ 1,
232
+ 1
233
+ ],
234
+ "neck_groups": [
235
+ 1,
236
+ 1,
237
+ 1,
238
+ 1
239
+ ],
240
+ "neck_in_channels": [
241
+ 64,
242
+ 128,
243
+ 256,
244
+ 512
245
+ ],
246
+ "neck_kernel_size": [
247
+ [
248
+ 3,
249
+ 3
250
+ ],
251
+ [
252
+ 3,
253
+ 3
254
+ ],
255
+ [
256
+ 3,
257
+ 3
258
+ ],
259
+ [
260
+ 3,
261
+ 3
262
+ ]
263
+ ],
264
+ "neck_out_channels": [
265
+ 128,
266
+ 128,
267
+ 128,
268
+ 128
269
+ ],
270
+ "neck_stride": [
271
+ 1,
272
+ 1,
273
+ 1,
274
+ 1
275
+ ],
276
+ "torch_dtype": "float32",
277
+ "transformers_version": "4.34.0.dev0"
278
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bbox_type": "rect",
3
+ "crop_size": {
4
+ "height": 224,
5
+ "width": 224
6
+ },
7
+ "do_center_crop": false,
8
+ "do_normalize": true,
9
+ "do_reduce_labels": false,
10
+ "do_rescale": false,
11
+ "do_resize": true,
12
+ "image_mean": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "image_processor_type": "FastImageProcessor",
18
+ "image_std": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "min_area": 250,
24
+ "min_score": 0.88,
25
+ "resample": 3,
26
+ "rescale_factor": 0.00392156862745098,
27
+ "size": {
28
+ "height": 640,
29
+ "width": 640
30
+ }
31
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6690b4f4bcebae7687ea11c242e0096b9067cf689ea69cd25999cdef39024580
3
+ size 54381673