lmytimagedata commited on
Commit
9f2f8d3
·
verified ·
1 Parent(s): 848b6a2

Upload checkpoint-5200

Browse files
checkpoint-5200/config.json ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "batch_size": 32,
3
+ "buffer_size": 64,
4
+ "eval_mix": "chris_aubo",
5
+ "frozen_keys": [
6
+ "*hf_model*"
7
+ ],
8
+ "lora": false,
9
+ "lora_config": {
10
+ "bias": "none",
11
+ "lora_alpha": 16,
12
+ "lora_dropout": 0.05,
13
+ "r": 8
14
+ },
15
+ "model": {
16
+ "heads": {
17
+ "action": {
18
+ "args": [],
19
+ "kwargs": {
20
+ "action_dim": 7,
21
+ "action_horizon": 4,
22
+ "dropout_rate": 0.0,
23
+ "n_diffusion_samples": 1,
24
+ "readout_key": "readout_action",
25
+ "token_embedding_size": 768,
26
+ "use_map": false
27
+ },
28
+ "module": "octo.model.components.action_heads",
29
+ "name": "DiffusionActionHead"
30
+ }
31
+ },
32
+ "max_horizon": 10,
33
+ "observation_tokenizers": {
34
+ "primary": {
35
+ "args": [],
36
+ "kwargs": {
37
+ "encoder": {
38
+ "args": [],
39
+ "kwargs": {
40
+ "in_features": 6
41
+ },
42
+ "module": "octo.model.components.vit_encoders",
43
+ "name": "SmallStem16"
44
+ },
45
+ "obs_stack_keys": [
46
+ "image_primary"
47
+ ],
48
+ "task_stack_keys": [
49
+ "image_primary"
50
+ ]
51
+ },
52
+ "module": "octo.model.components.tokenizers",
53
+ "name": "ImageTokenizer"
54
+ }
55
+ },
56
+ "readouts": {
57
+ "action": 1
58
+ },
59
+ "repeat_task_tokens": true,
60
+ "task_tokenizers": {
61
+ "language": {
62
+ "args": [],
63
+ "kwargs": {
64
+ "encoder": "t5-base",
65
+ "finetune_encoder": false
66
+ },
67
+ "module": "octo.model.components.tokenizers",
68
+ "name": "LanguageTokenizer"
69
+ }
70
+ },
71
+ "token_embedding_size": 768,
72
+ "transformer_kwargs": {
73
+ "add_position_embedding": false,
74
+ "attention_dropout_rate": 0.0,
75
+ "dropout_rate": 0.0,
76
+ "mlp_dim": 3072,
77
+ "num_attention_heads": 12,
78
+ "num_layers": 12
79
+ },
80
+ "use_correct_attention": true
81
+ },
82
+ "normalize_method": "sign",
83
+ "obs_token_nums": {
84
+ "primary": 256
85
+ },
86
+ "output_dir": "ljp_aubo_20250610_test",
87
+ "pretrained": {
88
+ "kwargs": {
89
+ "subpath": "oxe-g2-checkpoint-300000"
90
+ },
91
+ "model": "hf://chuanmew/octo_torch"
92
+ },
93
+ "run_name": "aubo",
94
+ "sampler_num_samples": 2560000,
95
+ "seed": 42,
96
+ "subsample_length": 99999,
97
+ "text_processor": {
98
+ "args": [],
99
+ "kwargs": {
100
+ "encode_with_model": false,
101
+ "tokenizer_kwargs": {
102
+ "max_length": 16,
103
+ "padding": "max_length",
104
+ "return_tensors": "np",
105
+ "truncation": true
106
+ },
107
+ "tokenizer_name": "t5-base"
108
+ },
109
+ "module": "octo.components.text_processing",
110
+ "name": "HFTokenizer"
111
+ },
112
+ "train_mix": "ljp_aubo_merged_20250519to20250529_20250604",
113
+ "training_arguments": {
114
+ "bf16": true,
115
+ "dataloader_pin_memory": true,
116
+ "ddp_find_unused_parameters": true,
117
+ "eval_steps": 1000,
118
+ "gradient_accumulation_steps": 1,
119
+ "learning_rate": 3e-05,
120
+ "logging_nan_inf_filter": false,
121
+ "logging_steps": 1000,
122
+ "lr_scheduler_kwargs": {
123
+ "decay_type": "cosine",
124
+ "min_lr_ratio": 0.05,
125
+ "num_decay_steps": 0
126
+ },
127
+ "lr_scheduler_type": "warmup_stable_decay",
128
+ "max_grad_norm": 1.0,
129
+ "max_steps": 5200,
130
+ "optim": "adamw_torch_fused",
131
+ "per_device_eval_batch_size": 1,
132
+ "per_device_train_batch_size": 1,
133
+ "report_to": "wandb",
134
+ "save_safetensors": true,
135
+ "save_steps": 5200,
136
+ "torch_compile": true,
137
+ "warmup_steps": 100,
138
+ "weight_decay": 0.1
139
+ },
140
+ "training_keys": [
141
+ "*action*",
142
+ "*observation_tokenizers*",
143
+ "*transformer.transformer*",
144
+ "*task_projections*",
145
+ "*obs_projections*",
146
+ "*readout_embeddings*",
147
+ "*task_pos_embeddings*",
148
+ "*obs_pos_embeddings*"
149
+ ]
150
+ }
checkpoint-5200/dataset_statistics.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "action": {
4
+ "mean": [
5
+ -0.00010803448094520718,
6
+ -0.00029065669514238834,
7
+ -0.0002709113177843392,
8
+ -2.0056504581589252e-05,
9
+ 2.6392210656922543e-06,
10
+ -0.0002713081194087863,
11
+ 0.532845139503479
12
+ ],
13
+ "std": [
14
+ 0.006096565164625645,
15
+ 0.006544822361320257,
16
+ 0.007622068747878075,
17
+ 0.0045709386467933655,
18
+ 0.0024091359227895737,
19
+ 0.011421299539506435,
20
+ 0.4991016685962677
21
+ ],
22
+ "min": [
23
+ -0.009999999776482582,
24
+ -0.009999999776482582,
25
+ -0.009999999776482582,
26
+ -0.06283185631036758,
27
+ -0.06283185631036758,
28
+ -0.06283185631036758,
29
+ 0.0
30
+ ],
31
+ "max": [
32
+ 0.009999999776482582,
33
+ 0.009999999776482582,
34
+ 0.009999999776482582,
35
+ 0.06283185631036758,
36
+ 0.06283185631036758,
37
+ 0.06283185631036758,
38
+ 1.0
39
+ ],
40
+ "mask": [
41
+ true,
42
+ true,
43
+ true,
44
+ true,
45
+ true,
46
+ true,
47
+ false
48
+ ]
49
+ },
50
+ "state": {
51
+ "mean": [
52
+ -0.5312747955322266,
53
+ -0.18303827941417694,
54
+ 0.00013026571832597256,
55
+ -1.1791322231292725,
56
+ 0.008879062719643116,
57
+ -0.04497119411826134,
58
+ 0.5303144454956055
59
+ ],
60
+ "std": [
61
+ 0.0928654745221138,
62
+ 0.11658772081136703,
63
+ 0.04497113078832626,
64
+ 2.8499042987823486,
65
+ 0.053659822791814804,
66
+ 0.9866101145744324,
67
+ 0.49907076358795166
68
+ ],
69
+ "min": [
70
+ -0.7517302632331848,
71
+ -0.5310887694358826,
72
+ -0.11154260486364365,
73
+ -3.1415765285491943,
74
+ -0.6673082113265991,
75
+ -3.099342107772827,
76
+ 0.0
77
+ ],
78
+ "max": [
79
+ -0.25762036442756653,
80
+ 0.1041782796382904,
81
+ 0.1655297726392746,
82
+ 3.141587018966675,
83
+ 0.3089899718761444,
84
+ 3.1375341415405273,
85
+ 1.0
86
+ ],
87
+ "mask": [
88
+ true,
89
+ true,
90
+ true,
91
+ true,
92
+ true,
93
+ true,
94
+ false
95
+ ]
96
+ }
97
+ }
98
+ ]
checkpoint-5200/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0888b1128263528119416f410a46000f6ad96cf426ac7315d40d9cb356da0012
3
+ size 401078544
checkpoint-5200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdf8a8e17e7e057b7258aca5f96c6e56a1e45950b48d435e5c8c1d14ca182ebb
3
+ size 363714362
checkpoint-5200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4086c8bca4da6f79892494c895b36c82784fd90616a8ff70d0acb0be57d6988
3
+ size 15958
checkpoint-5200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfcbd8326c1d981dae3ac84fb07d1afe2482644aa7866ceef8b55cd8846798d5
3
+ size 1064
checkpoint-5200/trainer_state.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.065,
6
+ "eval_steps": 1000,
7
+ "global_step": 5200,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0125,
14
+ "grad_norm": 19.25,
15
+ "learning_rate": 3e-05,
16
+ "loss": 0.6663,
17
+ "step": 1000
18
+ },
19
+ {
20
+ "epoch": 0.0125,
21
+ "eval_loss": 0.5592955350875854,
22
+ "eval_runtime": 10.3881,
23
+ "eval_samples_per_second": 364.457,
24
+ "eval_steps_per_second": 364.457,
25
+ "step": 1000
26
+ },
27
+ {
28
+ "epoch": 0.025,
29
+ "grad_norm": 14.4375,
30
+ "learning_rate": 3e-05,
31
+ "loss": 0.521,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.025,
36
+ "eval_loss": 0.5978671908378601,
37
+ "eval_runtime": 9.3435,
38
+ "eval_samples_per_second": 405.202,
39
+ "eval_steps_per_second": 405.202,
40
+ "step": 2000
41
+ },
42
+ {
43
+ "epoch": 0.0375,
44
+ "grad_norm": 14.3125,
45
+ "learning_rate": 3e-05,
46
+ "loss": 0.48,
47
+ "step": 3000
48
+ },
49
+ {
50
+ "epoch": 0.0375,
51
+ "eval_loss": 0.5614296793937683,
52
+ "eval_runtime": 9.222,
53
+ "eval_samples_per_second": 410.542,
54
+ "eval_steps_per_second": 410.542,
55
+ "step": 3000
56
+ },
57
+ {
58
+ "epoch": 0.05,
59
+ "grad_norm": 8.75,
60
+ "learning_rate": 3e-05,
61
+ "loss": 0.4596,
62
+ "step": 4000
63
+ },
64
+ {
65
+ "epoch": 0.05,
66
+ "eval_loss": 0.5450547337532043,
67
+ "eval_runtime": 9.3144,
68
+ "eval_samples_per_second": 406.469,
69
+ "eval_steps_per_second": 406.469,
70
+ "step": 4000
71
+ },
72
+ {
73
+ "epoch": 0.0625,
74
+ "grad_norm": 8.625,
75
+ "learning_rate": 3e-05,
76
+ "loss": 0.4418,
77
+ "step": 5000
78
+ },
79
+ {
80
+ "epoch": 0.0625,
81
+ "eval_loss": 0.5600486993789673,
82
+ "eval_runtime": 9.2291,
83
+ "eval_samples_per_second": 410.225,
84
+ "eval_steps_per_second": 410.225,
85
+ "step": 5000
86
+ }
87
+ ],
88
+ "logging_steps": 1000,
89
+ "max_steps": 5200,
90
+ "num_input_tokens_seen": 0,
91
+ "num_train_epochs": 1,
92
+ "save_steps": 5200,
93
+ "stateful_callbacks": {
94
+ "TrainerControl": {
95
+ "args": {
96
+ "should_epoch_stop": false,
97
+ "should_evaluate": false,
98
+ "should_log": false,
99
+ "should_save": true,
100
+ "should_training_stop": true
101
+ },
102
+ "attributes": {}
103
+ }
104
+ },
105
+ "total_flos": 0.0,
106
+ "train_batch_size": 1,
107
+ "trial_name": null,
108
+ "trial_params": null
109
+ }
checkpoint-5200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:094ed2b645a2be2ced00774718dbfb2861c458e427f38f3a2f7f1b5120e2274b
3
+ size 5432