hermeschen-ezcon commited on
Commit
432f90b
·
verified ·
1 Parent(s): 2d69412

Upload folder using huggingface_hub

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
2
+ You are a helpful assistant.<|im_end|>
3
+ {% endif %}<|im_start|>{{ message['role'] }}
4
+ {% if message['content'] is string %}{{ message['content'] }}<|im_end|>
5
+ {% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
6
+ {% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
7
+ {% endif %}
config.json CHANGED
@@ -79,6 +79,100 @@
79
  "suppress_tokens": null,
80
  "task_specific_params": null,
81
  "temperature": 1.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  "tf_legacy_loss": false,
83
  "tie_encoder_decoder": false,
84
  "tie_word_embeddings": false,
@@ -175,7 +269,8 @@
175
  23,
176
  31
177
  ],
178
- "out_hidden_size": 3584
 
179
  },
180
  "vision_end_token_id": 151653,
181
  "vision_start_token_id": 151652,
 
79
  "suppress_tokens": null,
80
  "task_specific_params": null,
81
  "temperature": 1.0,
82
+ "text_config": {
83
+ "vocab_size": 152064,
84
+ "max_position_embeddings": 128000,
85
+ "hidden_size": 3584,
86
+ "intermediate_size": 18944,
87
+ "num_hidden_layers": 28,
88
+ "num_attention_heads": 28,
89
+ "use_sliding_window": false,
90
+ "sliding_window": 32768,
91
+ "max_window_layers": 28,
92
+ "num_key_value_heads": 4,
93
+ "hidden_act": "silu",
94
+ "initializer_range": 0.02,
95
+ "rms_norm_eps": 1e-06,
96
+ "use_cache": false,
97
+ "rope_theta": 1000000.0,
98
+ "attention_dropout": 0.0,
99
+ "rope_scaling": {
100
+ "mrope_section": [
101
+ 16,
102
+ 24,
103
+ 24
104
+ ],
105
+ "rope_type": "default",
106
+ "type": "default"
107
+ },
108
+ "return_dict": true,
109
+ "output_hidden_states": false,
110
+ "output_attentions": false,
111
+ "torchscript": false,
112
+ "torch_dtype": "bfloat16",
113
+ "use_bfloat16": false,
114
+ "tf_legacy_loss": false,
115
+ "pruned_heads": {},
116
+ "tie_word_embeddings": false,
117
+ "chunk_size_feed_forward": 0,
118
+ "is_encoder_decoder": false,
119
+ "is_decoder": false,
120
+ "cross_attention_hidden_size": null,
121
+ "add_cross_attention": false,
122
+ "tie_encoder_decoder": false,
123
+ "max_length": 20,
124
+ "min_length": 0,
125
+ "do_sample": false,
126
+ "early_stopping": false,
127
+ "num_beams": 1,
128
+ "num_beam_groups": 1,
129
+ "diversity_penalty": 0.0,
130
+ "temperature": 1.0,
131
+ "top_k": 50,
132
+ "top_p": 1.0,
133
+ "typical_p": 1.0,
134
+ "repetition_penalty": 1.0,
135
+ "length_penalty": 1.0,
136
+ "no_repeat_ngram_size": 0,
137
+ "encoder_no_repeat_ngram_size": 0,
138
+ "bad_words_ids": null,
139
+ "num_return_sequences": 1,
140
+ "output_scores": false,
141
+ "return_dict_in_generate": false,
142
+ "forced_bos_token_id": null,
143
+ "forced_eos_token_id": null,
144
+ "remove_invalid_values": false,
145
+ "exponential_decay_length_penalty": null,
146
+ "suppress_tokens": null,
147
+ "begin_suppress_tokens": null,
148
+ "architectures": [
149
+ "Qwen2_5_VLForConditionalGeneration"
150
+ ],
151
+ "finetuning_task": null,
152
+ "id2label": {
153
+ "0": "LABEL_0",
154
+ "1": "LABEL_1"
155
+ },
156
+ "label2id": {
157
+ "LABEL_0": 0,
158
+ "LABEL_1": 1
159
+ },
160
+ "tokenizer_class": null,
161
+ "prefix": null,
162
+ "bos_token_id": 151643,
163
+ "pad_token_id": null,
164
+ "eos_token_id": 151645,
165
+ "sep_token_id": null,
166
+ "decoder_start_token_id": null,
167
+ "task_specific_params": null,
168
+ "problem_type": null,
169
+ "_name_or_path": "",
170
+ "_attn_implementation_autoset": false,
171
+ "model_type": "qwen2_5_vl_text",
172
+ "vision_end_token_id": 151653,
173
+ "vision_start_token_id": 151652,
174
+ "vision_token_id": 151654
175
+ },
176
  "tf_legacy_loss": false,
177
  "tie_encoder_decoder": false,
178
  "tie_word_embeddings": false,
 
269
  23,
270
  31
271
  ],
272
+ "out_hidden_size": 3584,
273
+ "initializer_range": 0.02
274
  },
275
  "vision_end_token_id": 151653,
276
  "vision_start_token_id": 151652,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3306f9a3496e0b6042f43d50db081352631bab06e03a3dcab61c2da9951661f1
3
- size 4869266620
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce8590b9132b87b897c1827a79120754d0f047d4034778076ddcc2eaf185b817
3
+ size 4869266642
tokenizer_config.json CHANGED
@@ -195,7 +195,6 @@
195
  "<|video_pad|>"
196
  ],
197
  "bos_token": null,
198
- "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
  "clean_up_tokenization_spaces": false,
200
  "eos_token": "<|im_end|>",
201
  "errors": "replace",
 
195
  "<|video_pad|>"
196
  ],
197
  "bos_token": null,
 
198
  "clean_up_tokenization_spaces": false,
199
  "eos_token": "<|im_end|>",
200
  "errors": "replace",