add support for gradient accumulation steps
Browse files- scripts/finetune.py +3 -3
- src/axolotl/utils/validation.py +4 -0
- tests/test_validation.py +27 -0
scripts/finetune.py
CHANGED
|
@@ -149,8 +149,10 @@ def train(
|
|
| 149 |
else:
|
| 150 |
cfg[k] = kwargs[k]
|
| 151 |
|
|
|
|
|
|
|
| 152 |
# setup some derived config / hyperparams
|
| 153 |
-
cfg.gradient_accumulation_steps = cfg.batch_size // cfg.micro_batch_size
|
| 154 |
cfg.world_size = int(os.environ.get("WORLD_SIZE", 1))
|
| 155 |
cfg.local_rank = int(os.environ.get("LOCAL_RANK", 0))
|
| 156 |
choose_device(cfg)
|
|
@@ -168,8 +170,6 @@ def train(
|
|
| 168 |
cfg.fp16 = True
|
| 169 |
cfg.bf16 = False
|
| 170 |
|
| 171 |
-
validate_config(cfg)
|
| 172 |
-
|
| 173 |
# load the tokenizer first
|
| 174 |
logging.info("loading tokenizer...")
|
| 175 |
tokenizer = load_tokenizer(cfg.base_model_config, cfg.tokenizer_type, cfg)
|
|
|
|
| 149 |
else:
|
| 150 |
cfg[k] = kwargs[k]
|
| 151 |
|
| 152 |
+
validate_config(cfg)
|
| 153 |
+
|
| 154 |
# setup some derived config / hyperparams
|
| 155 |
+
cfg.gradient_accumulation_steps = cfg.gradient_accumulation_steps or (cfg.batch_size // cfg.micro_batch_size)
|
| 156 |
cfg.world_size = int(os.environ.get("WORLD_SIZE", 1))
|
| 157 |
cfg.local_rank = int(os.environ.get("LOCAL_RANK", 0))
|
| 158 |
choose_device(cfg)
|
|
|
|
| 170 |
cfg.fp16 = True
|
| 171 |
cfg.bf16 = False
|
| 172 |
|
|
|
|
|
|
|
| 173 |
# load the tokenizer first
|
| 174 |
logging.info("loading tokenizer...")
|
| 175 |
tokenizer = load_tokenizer(cfg.base_model_config, cfg.tokenizer_type, cfg)
|
src/axolotl/utils/validation.py
CHANGED
|
@@ -4,6 +4,10 @@ import logging
|
|
| 4 |
|
| 5 |
|
| 6 |
def validate_config(cfg):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
if cfg.load_4bit:
|
| 8 |
raise ValueError(
|
| 9 |
"cfg.load_4bit parameter has been deprecated and replaced by cfg.gptq"
|
|
|
|
| 4 |
|
| 5 |
|
| 6 |
def validate_config(cfg):
|
| 7 |
+
if cfg.gradient_accumulation_steps and cfg.batch_size:
|
| 8 |
+
raise ValueError(
|
| 9 |
+
"please set only one of gradient_accumulation_steps or batch_size"
|
| 10 |
+
)
|
| 11 |
if cfg.load_4bit:
|
| 12 |
raise ValueError(
|
| 13 |
"cfg.load_4bit parameter has been deprecated and replaced by cfg.gptq"
|
tests/test_validation.py
CHANGED
|
@@ -117,3 +117,30 @@ class ValidationTest(unittest.TestCase):
|
|
| 117 |
}
|
| 118 |
)
|
| 119 |
validate_config(cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
}
|
| 118 |
)
|
| 119 |
validate_config(cfg)
|
| 120 |
+
|
| 121 |
+
def test_gradient_accumulations_or_batch_size(self):
|
| 122 |
+
cfg = DictDefault(
|
| 123 |
+
{
|
| 124 |
+
"gradient_accumulation_steps": 1,
|
| 125 |
+
"batch_size": 1,
|
| 126 |
+
}
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
with pytest.raises(ValueError, match=r".*gradient_accumulation_steps or batch_size.*"):
|
| 130 |
+
validate_config(cfg)
|
| 131 |
+
|
| 132 |
+
cfg = DictDefault(
|
| 133 |
+
{
|
| 134 |
+
"batch_size": 1,
|
| 135 |
+
}
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
validate_config(cfg)
|
| 139 |
+
|
| 140 |
+
cfg = DictDefault(
|
| 141 |
+
{
|
| 142 |
+
"gradient_accumulation_steps": 1,
|
| 143 |
+
}
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
validate_config(cfg)
|