Spaces:
Build error
Build error
internlm2 best results
Browse files
llm_toolkit/eval_logical_reasoning.py
CHANGED
|
@@ -91,10 +91,10 @@ print(f"(3) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
|
|
| 91 |
print(f"{start_gpu_memory} GB of memory reserved.")
|
| 92 |
|
| 93 |
if adapter_name_or_path is not None:
|
| 94 |
-
model_name += "
|
| 95 |
|
| 96 |
save_results(
|
| 97 |
-
model_name,
|
| 98 |
results_path,
|
| 99 |
datasets["test"],
|
| 100 |
predictions,
|
|
|
|
| 91 |
print(f"{start_gpu_memory} GB of memory reserved.")
|
| 92 |
|
| 93 |
if adapter_name_or_path is not None:
|
| 94 |
+
model_name += "/" + adapter_name_or_path.split("/")[-1]
|
| 95 |
|
| 96 |
save_results(
|
| 97 |
+
f"{model_name}_{dtype}{'_4bit' if load_in_4bit else ''}",
|
| 98 |
results_path,
|
| 99 |
datasets["test"],
|
| 100 |
predictions,
|
results/mgtv-results_internlm_best.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
scripts/eval-mgtv-best.sh
CHANGED
|
@@ -17,14 +17,30 @@ grep MemTotal /proc/meminfo
|
|
| 17 |
#cd ../LLaMA-Factory && pip install -e .[torch,bitsandbytes] && cd $BASEDIR
|
| 18 |
#pip install transformers==4.41.2
|
| 19 |
|
| 20 |
-
export USING_LLAMA_FACTORY=
|
|
|
|
| 21 |
|
| 22 |
export MODEL_NAME=internlm/internlm2_5-7b-chat-1m
|
| 23 |
-
export ADAPTER_NAME_OR_PATH=
|
| 24 |
-
#export ADAPTER_NAME_OR_PATH=inflaton-ai/InternLM_2_5-7b_LoRA-Adapter
|
| 25 |
export LOGICAL_REASONING_DATA_PATH=datasets/mgtv
|
| 26 |
export LOGICAL_REASONING_RESULTS_PATH=results/mgtv-results_internlm_best.csv
|
|
|
|
| 27 |
export USE_FLOAT32_FOR_INFERENCE=true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
|
|
|
|
|
|
|
|
|
| 29 |
echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
|
| 30 |
python llm_toolkit/eval_logical_reasoning.py
|
|
|
|
| 17 |
#cd ../LLaMA-Factory && pip install -e .[torch,bitsandbytes] && cd $BASEDIR
|
| 18 |
#pip install transformers==4.41.2
|
| 19 |
|
| 20 |
+
export USING_LLAMA_FACTORY=false
|
| 21 |
+
export LOAD_IN_4BIT=false
|
| 22 |
|
| 23 |
export MODEL_NAME=internlm/internlm2_5-7b-chat-1m
|
| 24 |
+
export ADAPTER_NAME_OR_PATH=inflaton-ai/InternLM_2_5-7b_LoRA-Adapter
|
|
|
|
| 25 |
export LOGICAL_REASONING_DATA_PATH=datasets/mgtv
|
| 26 |
export LOGICAL_REASONING_RESULTS_PATH=results/mgtv-results_internlm_best.csv
|
| 27 |
+
|
| 28 |
export USE_FLOAT32_FOR_INFERENCE=true
|
| 29 |
+
echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
|
| 30 |
+
python llm_toolkit/eval_logical_reasoning.py
|
| 31 |
+
|
| 32 |
+
export USE_FLOAT32_FOR_INFERENCE=false
|
| 33 |
+
export USE_BF16_FOR_INFERENCE=true
|
| 34 |
+
echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
|
| 35 |
+
python llm_toolkit/eval_logical_reasoning.py
|
| 36 |
+
|
| 37 |
+
export USE_FLOAT32_FOR_INFERENCE=false
|
| 38 |
+
export USE_BF16_FOR_INFERENCE=false
|
| 39 |
+
echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
|
| 40 |
+
python llm_toolkit/eval_logical_reasoning.py
|
| 41 |
|
| 42 |
+
export LOAD_IN_4BIT=true
|
| 43 |
+
export USE_FLOAT32_FOR_INFERENCE=false
|
| 44 |
+
export USE_BF16_FOR_INFERENCE=true
|
| 45 |
echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
|
| 46 |
python llm_toolkit/eval_logical_reasoning.py
|
scripts/eval-mgtv.sh
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
eval-mgtv-
|
|
|
|
| 1 |
+
eval-mgtv-best.sh
|