tensorpro commited on
Commit
a6cdc12
·
1 Parent(s): 40ec06f

Add b16 clip

Browse files
Files changed (2) hide show
  1. config.json +22 -7
  2. pytorch_model.bin +3 -0
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "architectures": [
3
  "CLIPViPModel"
4
  ],
@@ -12,8 +13,10 @@
12
  "architectures": null,
13
  "attention_dropout": 0.0,
14
  "bad_words_ids": null,
 
15
  "bos_token_id": 0,
16
  "chunk_size_feed_forward": 0,
 
17
  "decoder_start_token_id": null,
18
  "diversity_penalty": 0.0,
19
  "do_sample": false,
@@ -21,6 +24,7 @@
21
  "early_stopping": false,
22
  "encoder_no_repeat_ngram_size": 0,
23
  "eos_token_id": 2,
 
24
  "finetuning_task": null,
25
  "forced_bos_token_id": null,
26
  "forced_eos_token_id": null,
@@ -57,15 +61,17 @@
57
  "pad_token_id": 1,
58
  "prefix": null,
59
  "problem_type": null,
60
- "projection_dim" : 512,
61
  "pruned_heads": {},
62
  "remove_invalid_values": false,
63
  "repetition_penalty": 1.0,
64
  "return_dict": true,
65
  "return_dict_in_generate": false,
66
  "sep_token_id": null,
 
67
  "task_specific_params": null,
68
  "temperature": 1.0,
 
69
  "tie_encoder_decoder": false,
70
  "tie_word_embeddings": true,
71
  "tokenizer_class": null,
@@ -73,7 +79,8 @@
73
  "top_p": 1.0,
74
  "torch_dtype": null,
75
  "torchscript": false,
76
- "transformers_version": "4.12.0.dev0",
 
77
  "use_bfloat16": false,
78
  "vocab_size": 49408
79
  },
@@ -87,8 +94,10 @@
87
  "architectures": null,
88
  "attention_dropout": 0.0,
89
  "bad_words_ids": null,
 
90
  "bos_token_id": null,
91
  "chunk_size_feed_forward": 0,
 
92
  "decoder_start_token_id": null,
93
  "diversity_penalty": 0.0,
94
  "do_sample": false,
@@ -96,6 +105,7 @@
96
  "early_stopping": false,
97
  "encoder_no_repeat_ngram_size": 0,
98
  "eos_token_id": null,
 
99
  "finetuning_task": null,
100
  "forced_bos_token_id": null,
101
  "forced_eos_token_id": null,
@@ -117,7 +127,7 @@
117
  },
118
  "layer_norm_eps": 1e-05,
119
  "length_penalty": 1.0,
120
- "logit_scale_init_value": 4.60,
121
  "max_length": 20,
122
  "min_length": 0,
123
  "model_type": "clip_vip_vision_model",
@@ -125,6 +135,7 @@
125
  "num_attention_heads": 12,
126
  "num_beam_groups": 1,
127
  "num_beams": 1,
 
128
  "num_hidden_layers": 12,
129
  "num_return_sequences": 1,
130
  "output_attentions": false,
@@ -134,16 +145,18 @@
134
  "patch_size": 16,
135
  "prefix": null,
136
  "problem_type": null,
137
- "projection_dim" : 512,
138
  "pruned_heads": {},
139
  "remove_invalid_values": false,
140
  "repetition_penalty": 1.0,
141
  "return_dict": true,
142
  "return_dict_in_generate": false,
143
  "sep_token_id": null,
 
144
  "task_specific_params": null,
145
  "temperature": 1.0,
146
  "temporal_size": 12,
 
147
  "tie_encoder_decoder": false,
148
  "tie_word_embeddings": true,
149
  "tokenizer_class": null,
@@ -151,10 +164,12 @@
151
  "top_p": 1.0,
152
  "torch_dtype": null,
153
  "torchscript": false,
154
- "transformers_version": "4.12.0.dev0",
155
- "use_bfloat16": false
 
 
156
  },
157
  "vision_config_dict": {
158
  "patch_size": 16
159
  }
160
- }
 
1
  {
2
+ "_commit_hash": "40ec06f4e5e6642fcb394ba4211c01a75ec17d7c",
3
  "architectures": [
4
  "CLIPViPModel"
5
  ],
 
13
  "architectures": null,
14
  "attention_dropout": 0.0,
15
  "bad_words_ids": null,
16
+ "begin_suppress_tokens": null,
17
  "bos_token_id": 0,
18
  "chunk_size_feed_forward": 0,
19
+ "cross_attention_hidden_size": null,
20
  "decoder_start_token_id": null,
21
  "diversity_penalty": 0.0,
22
  "do_sample": false,
 
24
  "early_stopping": false,
25
  "encoder_no_repeat_ngram_size": 0,
26
  "eos_token_id": 2,
27
+ "exponential_decay_length_penalty": null,
28
  "finetuning_task": null,
29
  "forced_bos_token_id": null,
30
  "forced_eos_token_id": null,
 
61
  "pad_token_id": 1,
62
  "prefix": null,
63
  "problem_type": null,
64
+ "projection_dim": 512,
65
  "pruned_heads": {},
66
  "remove_invalid_values": false,
67
  "repetition_penalty": 1.0,
68
  "return_dict": true,
69
  "return_dict_in_generate": false,
70
  "sep_token_id": null,
71
+ "suppress_tokens": null,
72
  "task_specific_params": null,
73
  "temperature": 1.0,
74
+ "tf_legacy_loss": false,
75
  "tie_encoder_decoder": false,
76
  "tie_word_embeddings": true,
77
  "tokenizer_class": null,
 
79
  "top_p": 1.0,
80
  "torch_dtype": null,
81
  "torchscript": false,
82
+ "transformers_version": "4.30.0.dev0",
83
+ "typical_p": 1.0,
84
  "use_bfloat16": false,
85
  "vocab_size": 49408
86
  },
 
94
  "architectures": null,
95
  "attention_dropout": 0.0,
96
  "bad_words_ids": null,
97
+ "begin_suppress_tokens": null,
98
  "bos_token_id": null,
99
  "chunk_size_feed_forward": 0,
100
+ "cross_attention_hidden_size": null,
101
  "decoder_start_token_id": null,
102
  "diversity_penalty": 0.0,
103
  "do_sample": false,
 
105
  "early_stopping": false,
106
  "encoder_no_repeat_ngram_size": 0,
107
  "eos_token_id": null,
108
+ "exponential_decay_length_penalty": null,
109
  "finetuning_task": null,
110
  "forced_bos_token_id": null,
111
  "forced_eos_token_id": null,
 
127
  },
128
  "layer_norm_eps": 1e-05,
129
  "length_penalty": 1.0,
130
+ "logit_scale_init_value": 4.6,
131
  "max_length": 20,
132
  "min_length": 0,
133
  "model_type": "clip_vip_vision_model",
 
135
  "num_attention_heads": 12,
136
  "num_beam_groups": 1,
137
  "num_beams": 1,
138
+ "num_channels": 3,
139
  "num_hidden_layers": 12,
140
  "num_return_sequences": 1,
141
  "output_attentions": false,
 
145
  "patch_size": 16,
146
  "prefix": null,
147
  "problem_type": null,
148
+ "projection_dim": 512,
149
  "pruned_heads": {},
150
  "remove_invalid_values": false,
151
  "repetition_penalty": 1.0,
152
  "return_dict": true,
153
  "return_dict_in_generate": false,
154
  "sep_token_id": null,
155
+ "suppress_tokens": null,
156
  "task_specific_params": null,
157
  "temperature": 1.0,
158
  "temporal_size": 12,
159
+ "tf_legacy_loss": false,
160
  "tie_encoder_decoder": false,
161
  "tie_word_embeddings": true,
162
  "tokenizer_class": null,
 
164
  "top_p": 1.0,
165
  "torch_dtype": null,
166
  "torchscript": false,
167
+ "transformers_version": "4.30.0.dev0",
168
+ "typical_p": 1.0,
169
+ "use_bfloat16": false,
170
+ "use_temporal_embed": true
171
  },
172
  "vision_config_dict": {
173
  "patch_size": 16
174
  }
175
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05148d0605a6977d9ee1b48e282a78e48f1b9e8ec84f5f3d34735962496554c7
3
+ size 598663509