burtenshaw
commited on
Commit
·
5ba66b9
1
Parent(s):
95114d7
improve questions for SFT
Browse files- example.json +383 -20
- images/1.png +0 -0
- images/2.png +0 -0
- images/3.png +0 -0
- images/4.png +0 -0
example.json
CHANGED
@@ -1,32 +1,395 @@
|
|
1 |
[
|
2 |
{
|
3 |
-
"challenge": "
|
4 |
-
"solution": "
|
5 |
-
"placeholder": "
|
6 |
-
"context": "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
},
|
8 |
{
|
9 |
-
"challenge": "
|
10 |
-
"solution": "
|
11 |
-
"placeholder": "
|
12 |
-
"context": "
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
},
|
14 |
{
|
15 |
-
"challenge": "
|
16 |
-
"solution": "
|
17 |
-
"placeholder": "
|
18 |
-
"context": "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
},
|
20 |
{
|
21 |
-
"challenge": "
|
22 |
-
"solution": "
|
23 |
-
"placeholder": "
|
24 |
-
"context": "
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
},
|
26 |
{
|
27 |
-
"challenge": "
|
28 |
-
"solution": "
|
29 |
-
"placeholder": "
|
30 |
-
"context": "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
}
|
32 |
]
|
|
|
1 |
[
|
2 |
{
|
3 |
+
"challenge": "Integrate Early Stopping Callback to Prevent Over-training",
|
4 |
+
"solution": "early_stopping = EarlyStoppingCallback(\n early_stopping_patience=3,\n early_stopping_threshold=0.01\n)\ntrainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(),\n callbacks=[early_stopping]\n)",
|
5 |
+
"placeholder": "# The current early stopping settings allow training to proceed too long. Adjust to stop training promptly upon divergence:\nearly_stopping = EarlyStoppingCallback(\n early_stopping_patience=10, # Patience value is too high\n early_stopping_threshold=0.1 # Threshold is too loose\n)\ntrainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(),\n callbacks=[] # Add the early stopping callback here\n)",
|
6 |
+
"context": "Validation loss starts diverging around step 400 and increases by approximately 0.02 per step for 3 consecutive steps. The early stopping mechanism should be sensitive enough (patience between 2-4 steps and a threshold between 0.005-0.02) to halt training when overfitting begins.",
|
7 |
+
"assessment_criteria": [
|
8 |
+
"Ensure the early_stopping_patience is within 2 to 4 steps.",
|
9 |
+
"Verify that the early_stopping_threshold is between 0.005 and 0.02.",
|
10 |
+
"Confirm that EarlyStoppingCallback is added to the callbacks list.",
|
11 |
+
"Make sure EarlyStoppingCallback is correctly imported."
|
12 |
+
],
|
13 |
+
"image": "/Users/ben/code/code_assignment_app/images/2.png"
|
14 |
},
|
15 |
{
|
16 |
+
"challenge": "Set Up a Linear Learning Rate Scheduler Reflecting Gradual Loss Reduction",
|
17 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n learning_rate=3e-4,\n lr_scheduler_type='linear',\n num_train_epochs=3\n )\n)",
|
18 |
+
"placeholder": "# The current configuration uses an inappropriate scheduler and parameter values. Update to match a linear decay:\ntrainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n learning_rate=1e-4, # Learning rate is too low\n lr_scheduler_type='cosine', # Incorrect scheduler type\n num_train_epochs=5 # Too many epochs\n )\n)",
|
19 |
+
"context": "The image shows a linear decrease in loss from 0.8 to 0.2 over approximately 3 epochs. The learning rate scheduler should follow a linear decay pattern, so parameters must be adjusted to reflect this behavior.",
|
20 |
+
"assessment_criteria": [
|
21 |
+
"Ensure lr_scheduler_type is explicitly set to 'linear'.",
|
22 |
+
"Verify that learning_rate is within the range of 2e-4 to 4e-4.",
|
23 |
+
"Confirm that num_train_epochs is set between 2 and 4 to match the convergence pattern."
|
24 |
+
],
|
25 |
+
"image": "/Users/ben/code/code_assignment_app/images/3.png"
|
26 |
},
|
27 |
{
|
28 |
+
"challenge": "Tune TRL Training Arguments for Stable Convergence",
|
29 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n args=TrainingArguments(\n max_steps=2000,\n learning_rate=5e-5,\n gradient_accumulation_steps=4,\n logging_steps=10\n )\n)",
|
30 |
+
"placeholder": "trainer = SFTTrainer(\n model=model,\n args=TrainingArguments(\n max_steps=____, # Pick a value between 1000-3000 steps\n learning_rate=____, # Set a learning rate between 1e-5 and 1e-4\n gradient_accumulation_steps=____, # Choose between 1 and 8\n logging_steps=____ # Choose a value between 5 and 50\n )\n)",
|
31 |
+
"context": "The provided image suggests a smooth and stable convergence over about 2000 steps, with a final loss near 0.1 and logs generated roughly every 10 steps. The training arguments must mirror this stability and reporting frequency.",
|
32 |
+
"assessment_criteria": [
|
33 |
+
"Confirm that max_steps is set between 1800 and 2200.",
|
34 |
+
"Ensure learning_rate lies between 4e-5 and 6e-5.",
|
35 |
+
"Verify that gradient_accumulation_steps is within 2 to 6.",
|
36 |
+
"Check that logging_steps is between 8 and 12."
|
37 |
+
],
|
38 |
+
"image": "/Users/ben/code/code_assignment_app/images/4.png"
|
39 |
},
|
40 |
{
|
41 |
+
"challenge": "Optimize PEFT and Enable 4-bit Quantization for Memory-Efficient Training",
|
42 |
+
"solution": "peft_config = LoraConfig(r=16, lora_alpha=32)\nquant_config = BitsAndBytesConfig(load_in_4bit=True)\n\ntrainer = SFTTrainer(\n model=model,\n peft_config=peft_config,\n quantization_config=quant_config\n)",
|
43 |
+
"placeholder": "peft_config = LoraConfig(\n r=____, # Select a value between 4 and 32\n lora_alpha=____ # Set to 4 times the chosen r\n)\nquant_config = BitsAndBytesConfig(\n load_in_4bit=____ # Set to True or False\n)\n\ntrainer = SFTTrainer(\n model=model,\n peft_config=____,\n quantization_config=____\n)",
|
44 |
+
"context": "For a 7B parameter model running on 24GB GPU, efficient training is critical. Adjust the PEFT settings with a LoRA adapter—choose r within 8 and 24 and set lora_alpha to 4 times the chosen r—to ensure low memory usage and effective regularization. Additionally, enable 4-bit quantization to further reduce resource consumption.",
|
45 |
+
"assessment_criteria": [
|
46 |
+
"Verify that r is set between 8 and 24.",
|
47 |
+
"Confirm that lora_alpha is exactly 4 times the r value.",
|
48 |
+
"Ensure that 4-bit quantization (load_in_4bit) is enabled (set to True).",
|
49 |
+
"Check that both peft_config and quantization_config are properly passed to the trainer."
|
50 |
+
]
|
51 |
},
|
52 |
{
|
53 |
+
"challenge": "Format Multi-turn Chat Conversation for Llama 2 Inference",
|
54 |
+
"solution": "tokenizer.apply_chat_template(\n conversation=[\n {\"role\": \"user\", \"content\": \"Hello!\"},\n {\"role\": \"assistant\", \"content\": \"Hi there!\"},\n {\"role\": \"user\", \"content\": \"How are you?\"}\n ],\n tokenize=False,\n add_generation_prompt=True\n)",
|
55 |
+
"placeholder": "tokenizer.apply_chat_template(\n conversation=____, # Provide a list of message dictionaries with 'role' and 'content'\n tokenize=____, # Set to False to return a formatted string\n add_generation_prompt=____ # Set to True to include the generation prompt\n)",
|
56 |
+
"context": "For proper inference with Llama 2, the conversation must be formatted as a multi-turn dialogue with clearly defined roles. The tokenizer should output a concatenated string (not tokenized) while also including a generation prompt to initiate the response.",
|
57 |
+
"assessment_criteria": [
|
58 |
+
"Ensure the conversation is formatted as a list of dictionaries each containing 'role' and 'content'.",
|
59 |
+
"Check that tokenize is explicitly set to False.",
|
60 |
+
"Confirm that add_generation_prompt is set to True."
|
61 |
+
]
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"challenge": "Set Up a LoRA Adapter Configuration for Efficient Model Fine-tuning",
|
65 |
+
"solution": "config = LoraConfig(\n r=8,\n lora_alpha=32,\n target_modules=[\"q_proj\", \"v_proj\"],\n lora_dropout=0.05,\n bias=\"none\"\n)",
|
66 |
+
"placeholder": "config = LoraConfig(\n r=____, # Choose rank within 4 to 16\n lora_alpha=____, # Should be 4 times the chosen rank\n target_modules=____, # Specify the attention modules (e.g., ['q_proj', 'v_proj'])\n lora_dropout=____, # Set dropout between 0.01 and 0.1\n bias=____ # Choose from 'none', 'all', or 'lora_only'\n)",
|
67 |
+
"context": "When fine-tuning a large (7B) model on limited GPU resources, a LoRA adapter helps reduce memory consumption and computational overhead.",
|
68 |
+
"assessment_criteria": [
|
69 |
+
"Confirm that r is within the range of 4 to 16.",
|
70 |
+
"Verify that lora_alpha is exactly 4 times the r value.",
|
71 |
+
"Ensure that target_modules is set to an appropriate list",
|
72 |
+
"Check that lora_dropout lies between 0.01 and 0.1."
|
73 |
+
]
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"challenge": "Combine Multiple LoRA Adapters Using Weighted Sum",
|
77 |
+
"solution": "merged_model = merge_lora_weights(\n base_model=model,\n adapters=[\n (adapter1, 0.7),\n (adapter2, 0.3)\n ],\n merge_strategy=\"weighted_sum\"\n)",
|
78 |
+
"placeholder": "merged_model = merge_lora_weights(\n base_model=model,\n adapters=[\n (____, ____), # Add the first adapter and its weight\n (____, ____) # Add the second adapter and its weight\n ],\n merge_strategy=____ # Specify the merge strategy (e.g., 'weighted_sum')\n)",
|
79 |
+
"context": "For enhanced performance, you may need to merge different LoRA adapters (for example, one tuned for general instruction and another for task-specific nuances). The weighted sum should reflect the relative contribution of each adapter, with the weights summing to 1.0.",
|
80 |
+
"assessment_criteria": [
|
81 |
+
"Ensure that the weights for the adapters sum up to 1.0 (or very close, accounting for rounding).",
|
82 |
+
"Confirm that an appropriate merge_strategy (such as 'weighted_sum') is specified.",
|
83 |
+
"Verify that the adapters being merged have compatible architectures."
|
84 |
+
]
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"challenge": "Load Base Causal LM and Integrate a Pre-trained LoRA Adapter for Inference",
|
88 |
+
"solution": "model = AutoModelForCausalLM.from_pretrained(\n \"base_model\",\n device_map=\"auto\"\n)\nmodel = PeftModel.from_pretrained(\n model,\n \"lora_adapter\",\n adapter_name=\"default\"\n).merge_and_unload()",
|
89 |
+
"placeholder": "model = AutoModelForCausalLM.from_pretrained(\n ____, # Specify the base model identifier\n device_map=____ # Configure device mapping, e.g., 'auto'\n)\nmodel = PeftModel.from_pretrained(\n ____, # Provide the loaded base model\n ____, # Provide the LoRA adapter path\n adapter_name=____ # Use the correct adapter name\n).____() # Call the method to merge and unload adapter weights (e.g., merge_and_unload)\n",
|
90 |
+
"context": "For inference, first load the base model with device mapping, then incorporate the LoRA adapter",
|
91 |
+
"assessment_criteria": [
|
92 |
+
"Verify correct base model loading with device mapping",
|
93 |
+
"Ensure correct adapter loading",
|
94 |
+
"Confirm proper merging for inference"
|
95 |
+
]
|
96 |
+
},
|
97 |
+
{
|
98 |
+
"challenge": "Configure SFTTrainer Learning Rate",
|
99 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n learning_rate=2e-5\n )\n)",
|
100 |
+
"placeholder": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n learning_rate=5e-5 # TODO: Lower this value to prevent overfitting (should be < 3e-5)\n )\n)",
|
101 |
+
"context": "The model is showing signs of overfitting with the current learning rate of 5e-5. A lower learning rate is needed for more stable training.",
|
102 |
+
"assessment_criteria": [
|
103 |
+
"Verify that learning_rate is below 3e-5"
|
104 |
+
]
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"challenge": "Configure LoRA Adapter Rank",
|
108 |
+
"solution": "config = LoraConfig(\n r=16\n)",
|
109 |
+
"placeholder": "config = LoraConfig(\n r=4 # TODO: Increase rank for better adaptation (should be between 8-24)\n)",
|
110 |
+
"context": "The current LoRA rank is too low for effective model adaptation. A higher rank will improve model capacity while keeping memory usage reasonable.",
|
111 |
+
"assessment_criteria": [
|
112 |
+
"Verify that r is set between 8 and 24"
|
113 |
+
]
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"challenge": "Configure SFTTrainer: Set max_steps for training duration",
|
117 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n max_steps=1000\n )\n)",
|
118 |
+
"placeholder": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n max_steps=____ # Choose between 800-1200 steps\n )\n)",
|
119 |
+
"context": "Based on the training curves, setting an appropriate number of steps is crucial to avoid overfitting while allowing sufficient training progress.",
|
120 |
+
"assessment_criteria": [
|
121 |
+
"Verify that max_steps is set between 800 and 1200 steps."
|
122 |
+
]
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"challenge": "Refine SFTTrainer: Adjust learning_rate to prevent overfitting",
|
126 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n learning_rate=2e-5\n )\n)",
|
127 |
+
"placeholder": "sft_config = SFTConfig(\n max_steps=____, \n learning_rate=____, \n weight_decay=____, \n warmup_steps=____\n)",
|
128 |
+
"context": "A cautious learning rate is essential to prevent overly aggressive updates that can lead to overfitting.",
|
129 |
+
"assessment_criteria": [
|
130 |
+
"Verify that learning_rate is below 3e-5."
|
131 |
+
]
|
132 |
+
},
|
133 |
+
{
|
134 |
+
"challenge": "Refine SFTTrainer: Increase weight_decay for stronger regularization",
|
135 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n weight_decay=0.02\n )\n)",
|
136 |
+
"placeholder": "sft_config = SFTConfig(\n max_steps=____, \n learning_rate=____, \n weight_decay=____, \n warmup_steps=____\n)",
|
137 |
+
"context": "Increasing weight decay enhances regularization, which helps mitigate overfitting issues.",
|
138 |
+
"assessment_criteria": [
|
139 |
+
"Confirm that weight_decay is increased (greater than 0.01)."
|
140 |
+
]
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"challenge": "Refine SFTTrainer: Set appropriate warmup_steps relative to max_steps",
|
144 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n warmup_steps=100\n )\n)",
|
145 |
+
"placeholder": "sft_config = SFTConfig(\n max_steps=____, \n learning_rate=____, \n weight_decay=____, \n warmup_steps=____\n)",
|
146 |
+
"context": "A sufficient warmup period helps the optimizer gradually adjust and avoids sudden gradient spikes.",
|
147 |
+
"assessment_criteria": [
|
148 |
+
"Check that warmup_steps is at least 10% of max_steps."
|
149 |
+
]
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"challenge": "Integrate Early Stopping: Set Callback Patience Correctly",
|
153 |
+
"solution": "early_stopping = EarlyStoppingCallback(\n early_stopping_patience=3\n)",
|
154 |
+
"placeholder": "early_stopping = EarlyStoppingCallback(\n early_stopping_patience=____ # Choose between 2-4 steps\n)",
|
155 |
+
"context": "An appropriate patience value helps stop training promptly when validation loss begins to increase.",
|
156 |
+
"assessment_criteria": [
|
157 |
+
"Confirm that early_stopping_patience is set between 2 and 4 steps."
|
158 |
+
]
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"challenge": "Integrate Early Stopping: Define Threshold for Early Stopping Trigger",
|
162 |
+
"solution": "early_stopping = EarlyStoppingCallback(\n early_stopping_threshold=0.01\n)",
|
163 |
+
"placeholder": "early_stopping = EarlyStoppingCallback(\n early_stopping_threshold=____\n)",
|
164 |
+
"context": "The threshold determines how sensitive the early stopping callback is when detecting divergence in validation loss.",
|
165 |
+
"assessment_criteria": [
|
166 |
+
"Verify that early_stopping_threshold is between 0.005 and 0.02."
|
167 |
+
]
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"challenge": "Configure Linear LR Scheduler: Set Correct Learning Rate",
|
171 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n learning_rate=3e-4\n )\n)",
|
172 |
+
"placeholder": "sft_config = SFTConfig(\n max_steps=____, \n learning_rate=____, \n weight_decay=____, \n warmup_steps=____\n)",
|
173 |
+
"context": "A proper learning rate within the recommended range ensures a smooth linear decay as observed in training curves.",
|
174 |
+
"assessment_criteria": [
|
175 |
+
"Verify that learning_rate is within the range of 2e-4 to 4e-4."
|
176 |
+
],
|
177 |
+
"image": "/Users/ben/code/code_assignment_app/images/3.png"
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"challenge": "Configure Linear LR Scheduler: Set Proper Scheduler Type",
|
181 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n lr_scheduler_type='linear'\n )\n)",
|
182 |
+
"placeholder": "sft_config = SFTConfig(\n max_steps=____, \n learning_rate=____, \n weight_decay=____, \n warmup_steps=____\n)",
|
183 |
+
"context": "Specifying a 'linear' scheduler type ensures that the learning rate decays uniformly.",
|
184 |
+
"assessment_criteria": [
|
185 |
+
"Ensure lr_scheduler_type is explicitly set to 'linear'."
|
186 |
+
],
|
187 |
+
"image": "/Users/ben/code/code_assignment_app/images/3.png"
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"challenge": "Configure Linear LR Scheduler: Adjust Number of Training Epochs",
|
191 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n config=SFTConfig(\n num_train_epochs=3\n )\n)",
|
192 |
+
"placeholder": "sft_config = SFTConfig(\n max_steps=____, \n learning_rate=____, \n weight_decay=____, \n warmup_steps=____\n)",
|
193 |
+
"context": "Setting the proper number of epochs helps the model converge in line with the observed linear loss reduction.",
|
194 |
+
"assessment_criteria": [
|
195 |
+
"Confirm that num_train_epochs is set between 2 and 4."
|
196 |
+
],
|
197 |
+
"image": "/Users/ben/code/code_assignment_app/images/3.png"
|
198 |
+
},
|
199 |
+
{
|
200 |
+
"challenge": "Set TRL Training Args: Choose appropriate max_steps",
|
201 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n args=TrainingArguments(\n max_steps=2000\n )\n)",
|
202 |
+
"placeholder": "sft_config = SFTConfig(\n max_steps=____, \n learning_rate=____, \n weight_decay=____, \n warmup_steps=____\n)",
|
203 |
+
"context": "Choosing an optimal value for max_steps ensures the training process is neither too short nor unnecessarily long.",
|
204 |
+
"assessment_criteria": [
|
205 |
+
"Confirm that max_steps is set between 1800 and 2200."
|
206 |
+
],
|
207 |
+
"image": "/Users/ben/code/code_assignment_app/images/4.png"
|
208 |
+
},
|
209 |
+
{
|
210 |
+
"challenge": "Set TRL Training Args: Adjust learning_rate for stability",
|
211 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n args=TrainingArguments(\n learning_rate=5e-5\n )\n)",
|
212 |
+
"placeholder": "sft_config = SFTConfig(\n max_steps=____, \n learning_rate=____, \n weight_decay=____, \n warmup_steps=____\n)",
|
213 |
+
"context": "A stable learning rate helps maintain smooth and consistent training progress.",
|
214 |
+
"assessment_criteria": [
|
215 |
+
"Ensure learning_rate lies between 4e-5 and 6e-5."
|
216 |
+
],
|
217 |
+
"image": "/Users/ben/code/code_assignment_app/images/4.png"
|
218 |
+
},
|
219 |
+
{
|
220 |
+
"challenge": "Set TRL Training Args: Optimize gradient_accumulation_steps",
|
221 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n args=TrainingArguments(\n gradient_accumulation_steps=4\n )\n)",
|
222 |
+
"placeholder": "sft_config = SFTConfig(\n max_steps=____, \n learning_rate=____, \n weight_decay=____, \n warmup_steps=____\n)",
|
223 |
+
"context": "Optimizing gradient accumulation helps smooth updates and is key for training stability.",
|
224 |
+
"assessment_criteria": [
|
225 |
+
"Verify that gradient_accumulation_steps is within 2 to 6."
|
226 |
+
],
|
227 |
+
"image": "/Users/ben/code/code_assignment_app/images/4.png"
|
228 |
+
},
|
229 |
+
{
|
230 |
+
"challenge": "Set TRL Training Args: Define proper logging_steps frequency",
|
231 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n args=SFTConfig(\n logging_steps=10\n )\n)",
|
232 |
+
"placeholder": "trainer = SFTTrainer(\n model=model,\n args=SFTConfig(\n logging_steps=____ # Choose between 8 and 12\n )\n)",
|
233 |
+
"context": "Logging at the correct frequency provides clear insights into training without excessive output.",
|
234 |
+
"assessment_criteria": [
|
235 |
+
"Check that logging_steps is between 8 and 12."
|
236 |
+
],
|
237 |
+
"image": "/Users/ben/code/code_assignment_app/images/4.png"
|
238 |
+
},
|
239 |
+
{
|
240 |
+
"challenge": "Optimize PEFT: Select appropriate LoRA rank (r)",
|
241 |
+
"solution": "peft_config = LoraConfig(\n r=16, \n lora_alpha=32, \n target_modules=[\"q_proj\", \"v_proj\"]\n)",
|
242 |
+
"placeholder": "peft_config = LoraConfig(\n r=____ # Choose r value\n ora_alpha=32, \n target_modules=[\"q_proj\", \"v_proj\"]\n)",
|
243 |
+
"context": "The LoRA rank (r) directly affects model complexity and resource usage, so it should fall within an optimal range.",
|
244 |
+
"assessment_criteria": [
|
245 |
+
"Verify that r is set between 8 and 24."
|
246 |
+
]
|
247 |
+
},
|
248 |
+
{
|
249 |
+
"challenge": "Optimize PEFT: Choose correct lora_alpha based on r",
|
250 |
+
"solution": "peft_config = LoraConfig(\n r=16, \n lora_alpha=32, \n target_modules=[\"q_proj\", \"v_proj\"]\n)",
|
251 |
+
"placeholder": "peft_config = LoraConfig(\n r=____, # Choose r value\n lora_alpha=____, # Should be 4 times the chosen r (e.g., if r=8, then lora_alpha=32)\n target_modules=[\"q_proj\", \"v_proj\"]\n)",
|
252 |
+
"context": "Setting lora_alpha proportionally (4× the rank, r) ensures balanced adaptive scaling as recommended in TRL examples.",
|
253 |
+
"assessment_criteria": [
|
254 |
+
"Confirm that lora_alpha is exactly 4 times the r value."
|
255 |
+
]
|
256 |
+
},
|
257 |
+
{
|
258 |
+
"challenge": "Enable 4-bit Quantization for Efficient Training",
|
259 |
+
"solution": "quant_config = BitsAndBytesConfig(\n load_in_4bit=True\n)",
|
260 |
+
"placeholder": "quant_config = BitsAndBytesConfig(\n load_in_4bit=____\n)",
|
261 |
+
"context": "4-bit quantization significantly reduces memory requirements while maintaining acceptable performance.",
|
262 |
+
"assessment_criteria": [
|
263 |
+
"Ensure that load_in_4bit is set to True."
|
264 |
+
]
|
265 |
+
},
|
266 |
+
{
|
267 |
+
"challenge": "Format Chat Conversation: Provide proper conversation list",
|
268 |
+
"solution": "tokenizer.apply_chat_template(\n conversation=[\n {\"role\": \"user\", \"content\": \"Hello!\"},\n {\"role\": \"assistant\", \"content\": \"Hi there!\"}\n ]\n)",
|
269 |
+
"placeholder": "tokenizer.apply_chat_template(\n conversation=____ # Provide a list of dictionaries with 'role' and 'content'\n)",
|
270 |
+
"context": "A correctly formatted conversation list is essential to initiate multi-turn chat inference.",
|
271 |
+
"assessment_criteria": [
|
272 |
+
"Ensure the conversation is formatted as a list of dictionaries with 'role' and 'content'."
|
273 |
+
]
|
274 |
+
},
|
275 |
+
{
|
276 |
+
"challenge": "Format Chat Conversation: Set tokenize option appropriately",
|
277 |
+
"solution": "tokenizer.apply_chat_template(\n tokenize=False\n)",
|
278 |
+
"placeholder": "tokenizer.apply_chat_template(_____)",
|
279 |
+
"context": "Setting tokenize to False makes sure that the output remains a fully formatted string.",
|
280 |
+
"assessment_criteria": [
|
281 |
+
"Check that tokenize is explicitly set to False."
|
282 |
+
]
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"challenge": "Format Chat Conversation: Enable Generation Prompt",
|
286 |
+
"solution": "tokenizer.apply_chat_template(\n add_generation_prompt=True\n)",
|
287 |
+
"placeholder": "tokenizer.apply_chat_template(_____)",
|
288 |
+
"context": "Enabling the generation prompt helps trigger the model's response generation effectively.",
|
289 |
+
"assessment_criteria": [
|
290 |
+
"Confirm that add_generation_prompt is set to True."
|
291 |
+
]
|
292 |
+
},
|
293 |
+
{
|
294 |
+
"challenge": "Configure LoRA Adapter: Set rank parameter for efficient adaptation",
|
295 |
+
"solution": "config = LoraConfig(\n r=8\n)",
|
296 |
+
"placeholder": "config = LoraConfig(\n r=____, # Choose r value\n lora_alpha=16,\n)",
|
297 |
+
"context": "Choosing a proper rank for the LoRA adapter is key for efficient fine-tuning with limited resources.",
|
298 |
+
"assessment_criteria": [
|
299 |
+
"Confirm that r is within the range of 4 to 16."
|
300 |
+
]
|
301 |
+
},
|
302 |
+
{
|
303 |
+
"challenge": "Configure LoRA Adapter: Set lora_alpha as 4 times r",
|
304 |
+
"solution": "config = LoraConfig(\n lora_alpha=32\n)",
|
305 |
+
"placeholder": "config = LoraConfig(\n lora_alpha=____, # Should be 4 times the chosen r\n r=4\n)",
|
306 |
+
"context": "Maintaining the ratio between lora_alpha and r is important for balanced adapter scaling.",
|
307 |
+
"assessment_criteria": [
|
308 |
+
"Verify that lora_alpha is exactly 4 times the r value."
|
309 |
+
]
|
310 |
+
},
|
311 |
+
{
|
312 |
+
"challenge": "Configure LoRA Adapter: Specify target attention modules",
|
313 |
+
"solution": "config = LoraConfig(\n target_modules=[\"q_proj\", \"v_proj\"]\n)",
|
314 |
+
"placeholder": "config = LoraConfig(\n target_modules=____, # Specify a list of attention modules\n r=4\n)",
|
315 |
+
"context": "Identifying and targeting the relevant attention modules helps focus the adapter's adjustments.",
|
316 |
+
"assessment_criteria": [
|
317 |
+
"Ensure that target_modules is set to an appropriate list (e.g., ['q_proj', 'v_proj'])."
|
318 |
+
]
|
319 |
+
},
|
320 |
+
{
|
321 |
+
"challenge": "Configure LoRA Adapter: Define dropout rate",
|
322 |
+
"solution": "config = LoraConfig(\n lora_dropout=0.05\n)",
|
323 |
+
"placeholder": "config = LoraConfig(\n lora_dropout=____, # Set value between 0.01 and 0.1\n r=4\n)",
|
324 |
+
"context": "An optimal dropout rate helps prevent overfitting during fine-tuning.",
|
325 |
+
"assessment_criteria": [
|
326 |
+
"Check that lora_dropout is between 0.01 and 0.1."
|
327 |
+
]
|
328 |
+
},
|
329 |
+
{
|
330 |
+
"challenge": "Combine LoRA Adapters: Verify adapter weight sum",
|
331 |
+
"solution": "merged_model = merge_lora_weights(\n base_model=model,\n adapters=[(adapter1, 0.7), (adapter2, 0.3)],\n merge_strategy=\"weighted_sum\"\n)",
|
332 |
+
"placeholder": "merged_model = merge_lora_weights(\n base_model=model,\n adapters=[(adapter1, 0.7), (adapter2, 0.3)],\n merge_strategy=____\n)",
|
333 |
+
"context": "For a balanced merge of multiple adapters, their weights must sum to 1.0 (or very close, accounting for rounding).",
|
334 |
+
"assessment_criteria": [
|
335 |
+
"Ensure that the weights for the adapters sum up to 1.0 (or very close, accounting for rounding)."
|
336 |
+
]
|
337 |
+
},
|
338 |
+
{
|
339 |
+
"challenge": "Combine LoRA Adapters: Specify a valid merge strategy",
|
340 |
+
"solution": "merged_model = merge_lora_weights(\n base_model=model,\n adapters=[(adapter1, 0.7), (adapter2, 0.3)],\n merge_strategy=\"weighted_sum\"\n)",
|
341 |
+
"placeholder": "merged_model = merge_lora_weights(\n base_model=model,\n adapters=[(adapter1, 0.7), (adapter2, 0.3)],\n merge_strategy=____\n)",
|
342 |
+
"context": "A valid merge strategy must be specified to correctly combine the contributions of each adapter.",
|
343 |
+
"assessment_criteria": [
|
344 |
+
"Confirm that an appropriate merge_strategy is specified (e.g., 'weighted_sum')."
|
345 |
+
]
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"challenge": "Load Base Model: Provide correct model identifier and device mapping",
|
349 |
+
"solution": "model = AutoModelForCausalLM.from_pretrained(\n \"base_model\",\n device_map=\"auto\"\n)",
|
350 |
+
"placeholder": "model = AutoModelForCausalLM.from_pretrained(____)\npeft_model = PeftModel.from_pretrained(____, ____)\n# Merge weights\nmodel = peft_model.____",
|
351 |
+
"context": "The base model must be loaded correctly with its device mapping before integrating adapters.",
|
352 |
+
"assessment_criteria": [
|
353 |
+
"Verify that the base model is loaded correctly with the proper device mapping."
|
354 |
+
]
|
355 |
+
},
|
356 |
+
{
|
357 |
+
"challenge": "Load Pre-trained LoRA Adapter: Use correct adapter identifier",
|
358 |
+
"solution": "model = PeftModel.from_pretrained(\n model,\n \"lora_adapter\",\n adapter_name=\"default\"\n)\n # Merge LoRA weights into base model\nmodel = peft_model.merge_and_unload()",
|
359 |
+
"placeholder": "Ensure to provide the correct adapter identifier",
|
360 |
+
"context": "model = PeftModel.from_pretrained(model, \"lora_adapter\")\n# Merge LoRA weights into base model",
|
361 |
+
"assessment_criteria": [
|
362 |
+
"Ensure that the correct adapter identifier is used to load the LoRA adapter."
|
363 |
+
]
|
364 |
+
},
|
365 |
+
{
|
366 |
+
"challenge": "Merge LoRA Adapter: Successfully merge and unload adapter weights",
|
367 |
+
"solution": "model = PeftModel.from_pretrained(\n model,\n \"lora_adapter\",\n adapter_name=\"default\"\n).merge_and_unload()",
|
368 |
+
"placeholder": "model = PeftModel.from_pretrained(____, ____)\n# Merge weights\nmodel = peft_model.____",
|
369 |
+
"context": "Merging and unloading the adapter weights prepares the model for efficient inference.",
|
370 |
+
"assessment_criteria": [
|
371 |
+
"Confirm that the adapter is merged with the base model and unloaded appropriately to optimize inference performance."
|
372 |
+
]
|
373 |
+
},
|
374 |
+
{
|
375 |
+
"challenge": "Merge a LoRA adapter into the base model for inference",
|
376 |
+
"solution": "model = AutoModelForCausalLM.from_pretrained(\"base_model\")\npeft_model = PeftModel.from_pretrained(model, \"lora_adapter\")\n# Merge LoRA weights into base model\nmodel = peft_model.merge_and_unload()",
|
377 |
+
"placeholder": "model = AutoModelForCausalLM.from_pretrained(____)\npeft_model = PeftModel.from_pretrained(____, ____)\n# Merge weights\nmodel = peft_model.____",
|
378 |
+
"context": "You need to merge a trained LoRA adapter back into the base model for efficient inference",
|
379 |
+
"assessment_criteria": [
|
380 |
+
"Is base model loaded correctly?",
|
381 |
+
"Is LoRA adapter loaded with PeftModel?",
|
382 |
+
"Is merge_and_unload() used to combine weights?"
|
383 |
+
]
|
384 |
+
},
|
385 |
+
{
|
386 |
+
"challenge": "Configure Training Duration for Fine-tuning",
|
387 |
+
"solution": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n num_train_epochs=3,\n max_steps=None\n)",
|
388 |
+
"placeholder": "trainer = SFTTrainer(\n model=model,\n train_dataset=dataset,\n num_train_epochs=10, \n max_steps=None\n)",
|
389 |
+
"context": "The model is showing signs of overfitting after epoch 5. Configure the trainer to use fewer epochs (2-4) to prevent this.",
|
390 |
+
"assessment_criteria": [
|
391 |
+
"Is num_train_epochs set between 2 and 4?",
|
392 |
+
"Is max_steps left as None to use epoch-based training?"
|
393 |
+
]
|
394 |
}
|
395 |
]
|
images/1.png
ADDED
![]() |
images/2.png
ADDED
![]() |
images/3.png
ADDED
![]() |
images/4.png
ADDED
![]() |