tpoisonooo
commited on
docs(config.qmd): add loraplus example (#1577)
Browse files* Update qwen2-moe-lora.yaml
* feat(project): update
- docs/config.qmd +6 -0
docs/config.qmd
CHANGED
|
@@ -227,6 +227,12 @@ lora_modules_to_save:
|
|
| 227 |
|
| 228 |
lora_fan_in_fan_out: false
|
| 229 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
peft:
|
| 231 |
# Configuration options for loftq initialization for LoRA
|
| 232 |
# https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization
|
|
|
|
| 227 |
|
| 228 |
lora_fan_in_fan_out: false
|
| 229 |
|
| 230 |
+
# LoRA+ hyperparameters
|
| 231 |
+
# For more details about the following options, see:
|
| 232 |
+
# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`
|
| 233 |
+
loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.
|
| 234 |
+
loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.
|
| 235 |
+
|
| 236 |
peft:
|
| 237 |
# Configuration options for loftq initialization for LoRA
|
| 238 |
# https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization
|