rbelanec commited on
Commit
3f9e84e
verified
1 Parent(s): 6da535f

Training in progress, step 200

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
adapter_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "google/gemma-3-1b-it",
4
+ "exclude_modules": null,
5
+ "fan_in_fan_out": false,
6
+ "feedforward_modules": "down_proj",
7
+ "inference_mode": true,
8
+ "init_ia3_weights": true,
9
+ "modules_to_save": null,
10
+ "peft_type": "IA3",
11
+ "revision": null,
12
+ "target_modules": [
13
+ "v_proj",
14
+ "k_proj",
15
+ "down_proj"
16
+ ],
17
+ "task_type": "CAUSAL_LM"
18
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8472b89f90aab80dc490a46b386f3b02ee13d84020c2bc0e94c0a40df5c55f28
3
+ size 182352
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<end_of_turn>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ ],
11
+ "boi_token": "<start_of_image>",
12
+ "bos_token": {
13
+ "content": "<bos>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eoi_token": "<end_of_image>",
20
+ "eos_token": {
21
+ "content": "<eos>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "image_token": "<image_soft_token>",
28
+ "pad_token": {
29
+ "content": "<pad>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "unk_token": {
36
+ "content": "<unk>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ }
42
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
trainer_log.jsonl ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 5, "total_steps": 20000, "loss": 8.5731, "lr": 0.29999997039118775, "epoch": 0.0008148631029986962, "percentage": 0.03, "elapsed_time": "0:00:07", "remaining_time": "8:47:49", "throughput": 3475.04, "total_tokens": 27520}
2
+ {"current_steps": 10, "total_steps": 20000, "loss": 0.8412, "lr": 0.29999985010540814, "epoch": 0.0016297262059973925, "percentage": 0.05, "elapsed_time": "0:00:15", "remaining_time": "8:20:19", "throughput": 3592.61, "total_tokens": 53952}
3
+ {"current_steps": 15, "total_steps": 20000, "loss": 0.5005, "lr": 0.29999963729218443, "epoch": 0.0024445893089960887, "percentage": 0.07, "elapsed_time": "0:00:22", "remaining_time": "8:13:13", "throughput": 3690.97, "total_tokens": 81984}
4
+ {"current_steps": 20, "total_steps": 20000, "loss": 0.343, "lr": 0.29999933195164796, "epoch": 0.003259452411994785, "percentage": 0.1, "elapsed_time": "0:00:29", "remaining_time": "8:08:37", "throughput": 3746.58, "total_tokens": 109952}
5
+ {"current_steps": 25, "total_steps": 20000, "loss": 0.2982, "lr": 0.2999989340839871, "epoch": 0.004074315514993481, "percentage": 0.12, "elapsed_time": "0:00:36", "remaining_time": "8:07:23", "throughput": 3764.73, "total_tokens": 137792}
6
+ {"current_steps": 30, "total_steps": 20000, "loss": 0.2922, "lr": 0.2999984436894472, "epoch": 0.0048891786179921775, "percentage": 0.15, "elapsed_time": "0:00:44", "remaining_time": "8:09:12", "throughput": 3766.42, "total_tokens": 166080}
7
+ {"current_steps": 35, "total_steps": 20000, "loss": 0.2898, "lr": 0.29999786076833085, "epoch": 0.005704041720990874, "percentage": 0.18, "elapsed_time": "0:00:51", "remaining_time": "8:06:22", "throughput": 3732.96, "total_tokens": 190976}
8
+ {"current_steps": 40, "total_steps": 20000, "loss": 0.2943, "lr": 0.29999718532099756, "epoch": 0.00651890482398957, "percentage": 0.2, "elapsed_time": "0:00:58", "remaining_time": "8:04:00", "throughput": 3718.07, "total_tokens": 216384}
9
+ {"current_steps": 45, "total_steps": 20000, "loss": 0.2885, "lr": 0.29999641734786403, "epoch": 0.007333767926988266, "percentage": 0.22, "elapsed_time": "0:01:05", "remaining_time": "8:03:09", "throughput": 3727.0, "total_tokens": 243648}
10
+ {"current_steps": 50, "total_steps": 20000, "loss": 0.2964, "lr": 0.29999555684940393, "epoch": 0.008148631029986962, "percentage": 0.25, "elapsed_time": "0:01:12", "remaining_time": "8:04:57", "throughput": 3745.59, "total_tokens": 273152}
11
+ {"current_steps": 55, "total_steps": 20000, "loss": 0.3276, "lr": 0.2999946038261481, "epoch": 0.008963494132985658, "percentage": 0.27, "elapsed_time": "0:01:19", "remaining_time": "8:03:19", "throughput": 3756.7, "total_tokens": 300416}
12
+ {"current_steps": 60, "total_steps": 20000, "loss": 0.3155, "lr": 0.2999935582786844, "epoch": 0.009778357235984355, "percentage": 0.3, "elapsed_time": "0:01:26", "remaining_time": "8:01:20", "throughput": 3761.11, "total_tokens": 326848}
13
+ {"current_steps": 65, "total_steps": 20000, "loss": 0.3214, "lr": 0.29999242020765776, "epoch": 0.01059322033898305, "percentage": 0.33, "elapsed_time": "0:01:33", "remaining_time": "8:00:13", "throughput": 3755.52, "total_tokens": 352832}
14
+ {"current_steps": 70, "total_steps": 20000, "loss": 0.3108, "lr": 0.2999911896137702, "epoch": 0.011408083441981747, "percentage": 0.35, "elapsed_time": "0:01:41", "remaining_time": "8:00:24", "throughput": 3772.08, "total_tokens": 381888}
15
+ {"current_steps": 75, "total_steps": 20000, "loss": 0.3121, "lr": 0.29998986649778087, "epoch": 0.012222946544980443, "percentage": 0.38, "elapsed_time": "0:01:48", "remaining_time": "8:00:07", "throughput": 3772.72, "total_tokens": 409088}
16
+ {"current_steps": 80, "total_steps": 20000, "loss": 0.293, "lr": 0.29998845086050585, "epoch": 0.01303780964797914, "percentage": 0.4, "elapsed_time": "0:01:55", "remaining_time": "7:59:50", "throughput": 3776.06, "total_tokens": 436608}
17
+ {"current_steps": 85, "total_steps": 20000, "loss": 0.3045, "lr": 0.29998694270281845, "epoch": 0.013852672750977835, "percentage": 0.43, "elapsed_time": "0:02:02", "remaining_time": "8:00:01", "throughput": 3789.6, "total_tokens": 465856}
18
+ {"current_steps": 90, "total_steps": 20000, "loss": 0.2926, "lr": 0.2999853420256489, "epoch": 0.014667535853976532, "percentage": 0.45, "elapsed_time": "0:02:09", "remaining_time": "7:59:18", "throughput": 3783.49, "total_tokens": 491840}
19
+ {"current_steps": 95, "total_steps": 20000, "loss": 0.2852, "lr": 0.29998364882998463, "epoch": 0.015482398956975228, "percentage": 0.47, "elapsed_time": "0:02:17", "remaining_time": "7:59:05", "throughput": 3784.26, "total_tokens": 519168}
20
+ {"current_steps": 100, "total_steps": 20000, "loss": 0.2832, "lr": 0.2999818631168701, "epoch": 0.016297262059973925, "percentage": 0.5, "elapsed_time": "0:02:24", "remaining_time": "7:59:05", "throughput": 3792.2, "total_tokens": 547776}
21
+ {"current_steps": 105, "total_steps": 20000, "loss": 0.2865, "lr": 0.2999799848874068, "epoch": 0.01711212516297262, "percentage": 0.53, "elapsed_time": "0:02:31", "remaining_time": "7:58:19", "throughput": 3785.49, "total_tokens": 573376}
22
+ {"current_steps": 110, "total_steps": 20000, "loss": 0.2856, "lr": 0.2999780141427533, "epoch": 0.017926988265971316, "percentage": 0.55, "elapsed_time": "0:02:38", "remaining_time": "7:57:59", "throughput": 3782.03, "total_tokens": 599872}
23
+ {"current_steps": 115, "total_steps": 20000, "loss": 0.2857, "lr": 0.2999759508841253, "epoch": 0.018741851368970015, "percentage": 0.57, "elapsed_time": "0:02:45", "remaining_time": "7:57:36", "throughput": 3776.4, "total_tokens": 625856}
24
+ {"current_steps": 120, "total_steps": 20000, "loss": 0.2729, "lr": 0.2999737951127955, "epoch": 0.01955671447196871, "percentage": 0.6, "elapsed_time": "0:02:53", "remaining_time": "7:57:43", "throughput": 3791.12, "total_tokens": 655936}
25
+ {"current_steps": 125, "total_steps": 20000, "loss": 0.2799, "lr": 0.2999715468300937, "epoch": 0.020371577574967405, "percentage": 0.62, "elapsed_time": "0:03:00", "remaining_time": "7:57:59", "throughput": 3781.67, "total_tokens": 682112}
26
+ {"current_steps": 130, "total_steps": 20000, "loss": 0.2919, "lr": 0.29996920603740673, "epoch": 0.0211864406779661, "percentage": 0.65, "elapsed_time": "0:03:07", "remaining_time": "7:57:45", "throughput": 3772.17, "total_tokens": 707456}
27
+ {"current_steps": 135, "total_steps": 20000, "loss": 0.3096, "lr": 0.2999667727361785, "epoch": 0.0220013037809648, "percentage": 0.68, "elapsed_time": "0:03:14", "remaining_time": "7:57:31", "throughput": 3766.43, "total_tokens": 733376}
28
+ {"current_steps": 140, "total_steps": 20000, "loss": 0.3114, "lr": 0.29996424692791, "epoch": 0.022816166883963495, "percentage": 0.7, "elapsed_time": "0:03:22", "remaining_time": "7:57:35", "throughput": 3767.74, "total_tokens": 761088}
29
+ {"current_steps": 145, "total_steps": 20000, "loss": 0.2823, "lr": 0.2999616286141593, "epoch": 0.02363102998696219, "percentage": 0.73, "elapsed_time": "0:03:29", "remaining_time": "7:57:33", "throughput": 3767.1, "total_tokens": 788288}
30
+ {"current_steps": 150, "total_steps": 20000, "loss": 0.2843, "lr": 0.29995891779654155, "epoch": 0.024445893089960886, "percentage": 0.75, "elapsed_time": "0:03:36", "remaining_time": "7:57:54", "throughput": 3769.68, "total_tokens": 816832}
31
+ {"current_steps": 155, "total_steps": 20000, "loss": 0.2787, "lr": 0.2999561144767288, "epoch": 0.02526075619295958, "percentage": 0.78, "elapsed_time": "0:03:43", "remaining_time": "7:57:53", "throughput": 3776.41, "total_tokens": 845760}
32
+ {"current_steps": 160, "total_steps": 20000, "loss": 0.2788, "lr": 0.29995321865645036, "epoch": 0.02607561929595828, "percentage": 0.8, "elapsed_time": "0:03:51", "remaining_time": "7:57:43", "throughput": 3778.19, "total_tokens": 873344}
33
+ {"current_steps": 165, "total_steps": 20000, "loss": 0.2758, "lr": 0.2999502303374925, "epoch": 0.026890482398956975, "percentage": 0.83, "elapsed_time": "0:03:58", "remaining_time": "7:57:26", "throughput": 3785.47, "total_tokens": 902080}
34
+ {"current_steps": 170, "total_steps": 20000, "loss": 0.2761, "lr": 0.2999471495216986, "epoch": 0.02770534550195567, "percentage": 0.85, "elapsed_time": "0:04:05", "remaining_time": "7:57:14", "throughput": 3778.53, "total_tokens": 927552}
35
+ {"current_steps": 175, "total_steps": 20000, "loss": 0.2664, "lr": 0.29994397621096897, "epoch": 0.028520208604954366, "percentage": 0.88, "elapsed_time": "0:04:12", "remaining_time": "7:56:55", "throughput": 3778.81, "total_tokens": 954496}
36
+ {"current_steps": 180, "total_steps": 20000, "loss": 0.2604, "lr": 0.29994071040726117, "epoch": 0.029335071707953065, "percentage": 0.9, "elapsed_time": "0:04:19", "remaining_time": "7:57:04", "throughput": 3778.51, "total_tokens": 982272}
37
+ {"current_steps": 185, "total_steps": 20000, "loss": 0.2752, "lr": 0.29993735211258965, "epoch": 0.03014993481095176, "percentage": 0.92, "elapsed_time": "0:04:27", "remaining_time": "7:56:53", "throughput": 3776.3, "total_tokens": 1008832}
38
+ {"current_steps": 190, "total_steps": 20000, "loss": 0.2686, "lr": 0.299933901329026, "epoch": 0.030964797913950456, "percentage": 0.95, "elapsed_time": "0:04:34", "remaining_time": "7:57:04", "throughput": 3768.08, "total_tokens": 1034496}
39
+ {"current_steps": 195, "total_steps": 20000, "loss": 0.2749, "lr": 0.2999303580586988, "epoch": 0.03177966101694915, "percentage": 0.97, "elapsed_time": "0:04:41", "remaining_time": "7:56:47", "throughput": 3765.4, "total_tokens": 1060608}
40
+ {"current_steps": 200, "total_steps": 20000, "loss": 0.2729, "lr": 0.2999267223037937, "epoch": 0.03259452411994785, "percentage": 1.0, "elapsed_time": "0:04:48", "remaining_time": "7:56:44", "throughput": 3765.81, "total_tokens": 1088064}
41
+ {"current_steps": 200, "total_steps": 20000, "epoch": 0.03259452411994785, "percentage": 1.0, "elapsed_time": "0:06:01", "remaining_time": "9:56:29", "throughput": 3009.78, "total_tokens": 1088064}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be4bab4b3afaef83a72136c5eddc797690b609936a6e0f1a8b1e7b8bbd3273f4
3
+ size 5752