File size: 5,130 Bytes
2ee22d4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
{
  "dataset.debug": false,
  "dataset.eval_path": "./output/datasets/self_ds_Llama-2-7b-chat-hf/",
  "dataset.git_diff": "",
  "dataset.git_sha1": "unknown",
  "dataset.manual_sample_ids": [],
  "dataset.max_read_items": null,
  "dataset.output_dir": "output",
  "dataset.path": "output/datasets/ds_Llama-2-7b-chat-hf",
  "dataset.read_eagle_format": false,
  "dataset.run_name": "temp_run",
  "dataset.seed": 42,
  "dataset_generation.batch_size": 1,
  "dataset_generation.debug": false,
  "dataset_generation.debug_target": null,
  "dataset_generation.ds_prefix": "ds_",
  "dataset_generation.git_diff": "",
  "dataset_generation.git_sha1": "unknown",
  "dataset_generation.max_length": 4096,
  "dataset_generation.output_dir": "output",
  "dataset_generation.run_name": "temp_run",
  "dataset_generation.save_every": 1000,
  "dataset_generation.seed": 42,
  "dataset_generation.sharegpt_path": "Aeala/ShareGPT_Vicuna_unfiltered",
  "device_names": [
    "NVIDIA L40S"
  ],
  "inference.alpha_stats": false,
  "inference.debug": false,
  "inference.draft_growing": false,
  "inference.draft_tree_shape": "mc_sim_7b_64",
  "inference.dynamic_draft": true,
  "inference.dynamic_draft_all_top_k": 59,
  "inference.dynamic_draft_max_depth": 5,
  "inference.dynamic_draft_top_k": 10,
  "inference.git_diff": "",
  "inference.git_sha1": "unknown",
  "inference.interactive": false,
  "inference.max_draft_growing_depth": 100,
  "inference.max_new_tokens": 512,
  "inference.mode": "speculative",
  "inference.output_dir": "output",
  "inference.run_name": "temp_run",
  "inference.seed": 42,
  "inference.timer": false,
  "modeling.add_noise": true,
  "modeling.attention_wind": "3",
  "modeling.ckpt_path": null,
  "modeling.debug": false,
  "modeling.decoder_key_remap": {},
  "modeling.dtype": "torch.float32",
  "modeling.frozen_targets": [],
  "modeling.git_diff": "",
  "modeling.git_sha1": "unknown",
  "modeling.layer_path": "model.layers",
  "modeling.lmhead_path": "lm_head",
  "modeling.load_config_from_model_path": false,
  "modeling.mask_token": "\u2581blank",
  "modeling.mask_token_rate": 0,
  "modeling.model_path": "beagle/models/llama/Llama-2-7b-chat-hf/",
  "modeling.norm_path": "model.norm",
  "modeling.only_first_state_distill": false,
  "modeling.output_dir": "output",
  "modeling.reuse_layer": null,
  "modeling.rotary_path": "model.rotary_emb",
  "modeling.run_name": "temp_run",
  "modeling.save_loading": true,
  "modeling.seed": 42,
  "modeling.strictly_follow_eagle_decoder": false,
  "modeling.tokenizer_path": "meta-llama/Llama-2-7b-chat-hf",
  "modeling.use_dyt": false,
  "modeling.use_fc_eagle": false,
  "modeling.use_lower_layers": 0,
  "modeling.use_moe": false,
  "modeling.use_state_distill": true,
  "training.adam_beta1": 0.9,
  "training.adam_beta2": 0.95,
  "training.average_tokens_across_devices": false,
  "training.bf16": true,
  "training.ddp_find_unused_parameters": false,
  "training.debug": false,
  "training.disable_sampled_print": false,
  "training.eval_max_tti_wind": null,
  "training.eval_steps": 100,
  "training.eval_strategy": "steps",
  "training.fast_dry_run": false,
  "training.filter_out_shorts": true,
  "training.force_model_parallel": false,
  "training.git_diff": "diff --git a/beagle/configs.ini b/beagle/configs.ini\nindex 7e119f9..6c1545a 100644\n--- a/beagle/configs.ini\n+++ b/beagle/configs.ini\n@@ -157,6 +157,11 @@ per_device_train_batch_size = 1\n gradient_accumulation_steps = 8\n max_length = 4096\n \n+[training.@l40s_bs8_ctx2048_sm]\n+per_device_train_batch_size = 2\n+gradient_accumulation_steps = 4\n+max_length = 2048\n+\n [dataset]\n #manual_sample_ids = ['v4PzAY8_0', 'oM7QCY2_0', 'efVCaLN_0']\n manual_sample_ids = []",
  "training.git_sha1": "0fd5ab514678b181c0602502811571cb9ffb5944",
  "training.gradient_accumulation_steps": 4,
  "training.learning_rate": 3e-05,
  "training.logging_first_step": true,
  "training.logging_steps": 20,
  "training.loss_pivot_only": false,
  "training.loss_reweighting": false,
  "training.loss_reweighting_use_surrogate": false,
  "training.lr_scheduler_type": "constant_with_warmup",
  "training.max_grad_norm": 0.5,
  "training.max_length": 2048,
  "training.max_steps": -1,
  "training.max_tti_wind": 1,
  "training.model_init_ckpt": null,
  "training.num_train_epochs": 10,
  "training.number_sampled_print": "index",
  "training.optim": "adamw_torch_fused",
  "training.output_dir": "output/old-force-560",
  "training.overwrite_output_dir": true,
  "training.per_device_eval_batch_size": 1,
  "training.per_device_train_batch_size": 2,
  "training.project": "beagle",
  "training.ranking_distill_topk": null,
  "training.report_to": "wandb",
  "training.resume_from_checkpoint": false,
  "training.resume_wandb_runid": null,
  "training.run_name": "temp_run",
  "training.save_steps": 500,
  "training.save_strategy": "steps",
  "training.save_total_limit": 2,
  "training.save_vram": true,
  "training.seed": 42,
  "training.slow_dry_run": false,
  "training.tf32": false,
  "training.topk_w": 1.0,
  "training.use_eagle_pipeline": false,
  "training.warmup_steps": 0,
  "training.world_size": 1
}