Update README.md
Browse files
README.md
CHANGED
@@ -100,6 +100,14 @@ import torch
|
|
100 |
model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ"
|
101 |
adapter="nmarafo/Mistral-7B-Instruct-v0.2-TrueFalse-Feedback-GPTQ"
|
102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
def generate_prompt(data_point):
|
104 |
system_message = "Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. An answer will be considered correct if it accurately identifies the key information requested in the question, even if expressed differently. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect."
|
105 |
question = data_point["question"][0]
|
|
|
100 |
model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ"
|
101 |
adapter="nmarafo/Mistral-7B-Instruct-v0.2-TrueFalse-Feedback-GPTQ"
|
102 |
|
103 |
+
# To perform inference on the test dataset example load the model from the checkpoint
|
104 |
+
persisted_model = AutoPeftModelForCausalLM.from_pretrained(
|
105 |
+
adapter,
|
106 |
+
low_cpu_mem_usage=True,
|
107 |
+
return_dict=True,
|
108 |
+
torch_dtype=torch.float16,
|
109 |
+
device_map="cuda")
|
110 |
+
|
111 |
def generate_prompt(data_point):
|
112 |
system_message = "Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. An answer will be considered correct if it accurately identifies the key information requested in the question, even if expressed differently. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect."
|
113 |
question = data_point["question"][0]
|