Update README.md
Browse files
README.md
CHANGED
@@ -40,6 +40,39 @@ Total 201,981 samples.
|
|
40 |
- TokenBender/python_eval_instruct_51k: “python” in output .40,309 samples
|
41 |
- Spider: 8,659 samples
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
| | |
|
45 |
|------ | ------ |
|
@@ -76,31 +109,3 @@ A100-40G x 4
|
|
76 |
| eeval_samples_per_second | 19.385 |
|
77 |
| eeval_steps_per_second | 4.846 |
|
78 |
|
79 |
-
| Metric | Value |
|
80 |
-
| --- | --- |
|
81 |
-
| humaneval-python | 46.341|
|
82 |
-
|
83 |
-
[Big Code Models Leaderboard](https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard)
|
84 |
-
|
85 |
-
CodeLlama-34B-Python: 53.29
|
86 |
-
|
87 |
-
CodeLlama-34B-Instruct: 50.79
|
88 |
-
|
89 |
-
CodeLlama-13B-Instruct: 50.6
|
90 |
-
|
91 |
-
CodeLlama-34B: 45.11
|
92 |
-
|
93 |
-
CodeLlama-13B-Python: 42.89
|
94 |
-
|
95 |
-
CodeLlama-13B: 35.07
|
96 |
-
|
97 |
-
[Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
98 |
-
| Metric | Value |
|
99 |
-
| --- | --- |
|
100 |
-
| ARC | |
|
101 |
-
| HellaSwag | |
|
102 |
-
| MMLU | |
|
103 |
-
| TruthfulQA | |
|
104 |
-
| Average | |
|
105 |
-
|
106 |
-
|
|
|
40 |
- TokenBender/python_eval_instruct_51k: “python” in output .40,309 samples
|
41 |
- Spider: 8,659 samples
|
42 |
|
43 |
+
## HumanEval
|
44 |
+
|
45 |
+
| Metric | Value |
|
46 |
+
| --- | --- |
|
47 |
+
| humaneval-python | 46.341|
|
48 |
+
|
49 |
+
[Big Code Models Leaderboard](https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard)
|
50 |
+
|
51 |
+
CodeLlama-34B-Python: 53.29
|
52 |
+
|
53 |
+
CodeLlama-34B-Instruct: 50.79
|
54 |
+
|
55 |
+
CodeLlama-13B-Instruct: 50.6
|
56 |
+
|
57 |
+
CodeLlama-34B: 45.11
|
58 |
+
|
59 |
+
CodeLlama-13B-Python: 42.89
|
60 |
+
|
61 |
+
CodeLlama-13B: 35.07
|
62 |
+
|
63 |
+
## lm-evaluation-harness
|
64 |
+
|
65 |
+
[Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
66 |
+
| Metric | Value |
|
67 |
+
| --- | --- |
|
68 |
+
| ARC | |
|
69 |
+
| HellaSwag | |
|
70 |
+
| MMLU | |
|
71 |
+
| TruthfulQA | |
|
72 |
+
| Average | |
|
73 |
+
|
74 |
+
## Parameters
|
75 |
+
|
76 |
|
77 |
| | |
|
78 |
|------ | ------ |
|
|
|
109 |
| eeval_samples_per_second | 19.385 |
|
110 |
| eeval_steps_per_second | 4.846 |
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|