Upload folder using huggingface_hub
Browse files- README.md +2 -2
- special_tokens_map.json +1 -1
- tokenizer_config.json +1 -9
README.md
CHANGED
@@ -20,7 +20,7 @@ tags:
|
|
20 |
- meta
|
21 |
- pytorch
|
22 |
- llama
|
23 |
-
-
|
24 |
extra_gated_prompt: >-
|
25 |
**LLAMA 4 COMMUNITY LICENSE AGREEMENT**
|
26 |
|
@@ -269,7 +269,7 @@ In this section, we report the results for Llama 4 relative to our previous mode
|
|
269 |
| Image Understanding | ChartQA | 0 | relaxed\_accuracy | | | 88.8 | 90.0 |
|
270 |
| | DocVQA (test) | 0 | anls | | | 94.4 | 94.4 |
|
271 |
| Coding | LiveCodeBench (10/01/2024-02/01/2025) | 0 | pass@1 | 33.3 | 27.7 | 32.8 | 43.4 |
|
272 |
-
| Reasoning & Knowledge | MMLU Pro | 0 | macro\_avg/
|
273 |
| | GPQA Diamond | 0 | accuracy | 50.5 | 49.0 | 57.2 | 69.8 |
|
274 |
| Multilingual | MGSM | 0 | average/em | 91.1 | 91.6 | 90.6 | 92.3 |
|
275 |
| Long context | MTOB (half book) eng-\>kgv/kgv-\>eng | \- | chrF | Context window is 128K | | 42.2/36.6 | 54.0/46.4 |
|
|
|
20 |
- meta
|
21 |
- pytorch
|
22 |
- llama
|
23 |
+
- llama4
|
24 |
extra_gated_prompt: >-
|
25 |
**LLAMA 4 COMMUNITY LICENSE AGREEMENT**
|
26 |
|
|
|
269 |
| Image Understanding | ChartQA | 0 | relaxed\_accuracy | | | 88.8 | 90.0 |
|
270 |
| | DocVQA (test) | 0 | anls | | | 94.4 | 94.4 |
|
271 |
| Coding | LiveCodeBench (10/01/2024-02/01/2025) | 0 | pass@1 | 33.3 | 27.7 | 32.8 | 43.4 |
|
272 |
+
| Reasoning & Knowledge | MMLU Pro | 0 | macro\_avg/acc | 68.9 | 73.4 | 74.3 | 80.5 |
|
273 |
| | GPQA Diamond | 0 | accuracy | 50.5 | 49.0 | 57.2 | 69.8 |
|
274 |
| Multilingual | MGSM | 0 | average/em | 91.1 | 91.6 | 90.6 | 92.3 |
|
275 |
| Long context | MTOB (half book) eng-\>kgv/kgv-\>eng | \- | chrF | Context window is 128K | | 42.2/36.6 | 54.0/46.4 |
|
special_tokens_map.json
CHANGED
@@ -14,7 +14,7 @@
|
|
14 |
"single_word": false
|
15 |
},
|
16 |
"pad_token": {
|
17 |
-
"content": "<|
|
18 |
"lstrip": false,
|
19 |
"normalized": false,
|
20 |
"rstrip": false,
|
|
|
14 |
"single_word": false
|
15 |
},
|
16 |
"pad_token": {
|
17 |
+
"content": "<|finetune_right_pad|>",
|
18 |
"lstrip": false,
|
19 |
"normalized": false,
|
20 |
"rstrip": false,
|
tokenizer_config.json
CHANGED
@@ -9071,14 +9071,6 @@
|
|
9071 |
"rstrip": false,
|
9072 |
"single_word": false,
|
9073 |
"special": true
|
9074 |
-
},
|
9075 |
-
"201134": {
|
9076 |
-
"content": "<|finetune_right_pad_id|>",
|
9077 |
-
"lstrip": false,
|
9078 |
-
"normalized": false,
|
9079 |
-
"rstrip": false,
|
9080 |
-
"single_word": false,
|
9081 |
-
"special": true
|
9082 |
}
|
9083 |
},
|
9084 |
"bos_token": "<|begin_of_text|>",
|
@@ -9091,7 +9083,7 @@
|
|
9091 |
"attention_mask"
|
9092 |
],
|
9093 |
"model_max_length": 1048576,
|
9094 |
-
"pad_token": "<|
|
9095 |
"processor_class": "Llama4Processor",
|
9096 |
"tokenizer_class": "PreTrainedTokenizer"
|
9097 |
}
|
|
|
9071 |
"rstrip": false,
|
9072 |
"single_word": false,
|
9073 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9074 |
}
|
9075 |
},
|
9076 |
"bos_token": "<|begin_of_text|>",
|
|
|
9083 |
"attention_mask"
|
9084 |
],
|
9085 |
"model_max_length": 1048576,
|
9086 |
+
"pad_token": "<|finetune_right_pad|>",
|
9087 |
"processor_class": "Llama4Processor",
|
9088 |
"tokenizer_class": "PreTrainedTokenizer"
|
9089 |
}
|