satvikahuja commited on
Commit
27dfd60
·
verified ·
1 Parent(s): 7be1aff

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. config.json +72 -0
  2. model.safetensors +3 -0
  3. train_config.json +178 -0
config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "act",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.state": {
11
+ "type": "STATE",
12
+ "shape": [
13
+ 7
14
+ ]
15
+ },
16
+ "observation.images.front": {
17
+ "type": "VISUAL",
18
+ "shape": [
19
+ 3,
20
+ 480,
21
+ 640
22
+ ]
23
+ },
24
+ "observation.images.gripper": {
25
+ "type": "VISUAL",
26
+ "shape": [
27
+ 3,
28
+ 480,
29
+ 640
30
+ ]
31
+ },
32
+ "observation.images.top": {
33
+ "type": "VISUAL",
34
+ "shape": [
35
+ 3,
36
+ 480,
37
+ 640
38
+ ]
39
+ }
40
+ },
41
+ "output_features": {
42
+ "action": {
43
+ "type": "ACTION",
44
+ "shape": [
45
+ 7
46
+ ]
47
+ }
48
+ },
49
+ "device": "cuda",
50
+ "use_amp": false,
51
+ "chunk_size": 100,
52
+ "n_action_steps": 100,
53
+ "vision_backbone": "resnet18",
54
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
55
+ "replace_final_stride_with_dilation": false,
56
+ "pre_norm": false,
57
+ "dim_model": 512,
58
+ "n_heads": 8,
59
+ "dim_feedforward": 3200,
60
+ "feedforward_activation": "relu",
61
+ "n_encoder_layers": 4,
62
+ "n_decoder_layers": 1,
63
+ "use_vae": true,
64
+ "latent_dim": 32,
65
+ "n_vae_encoder_layers": 4,
66
+ "temporal_ensemble_coeff": null,
67
+ "dropout": 0.1,
68
+ "kl_weight": 10.0,
69
+ "optimizer_lr": 1e-05,
70
+ "optimizer_weight_decay": 0.0001,
71
+ "optimizer_lr_backbone": 1e-05
72
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03a586983e845f45b734ebde57d660a31d3b219f274b5dcbc845cfa7b3c66707
3
+ size 206709564
train_config.json ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "satvikahuja/picknplace_vegetables_pan_new_11",
4
+ "root": null,
5
+ "episodes": null,
6
+ "image_transforms": {
7
+ "enable": false,
8
+ "max_num_transforms": 3,
9
+ "random_order": false,
10
+ "tfs": {
11
+ "brightness": {
12
+ "weight": 1.0,
13
+ "type": "ColorJitter",
14
+ "kwargs": {
15
+ "brightness": [
16
+ 0.8,
17
+ 1.2
18
+ ]
19
+ }
20
+ },
21
+ "contrast": {
22
+ "weight": 1.0,
23
+ "type": "ColorJitter",
24
+ "kwargs": {
25
+ "contrast": [
26
+ 0.8,
27
+ 1.2
28
+ ]
29
+ }
30
+ },
31
+ "saturation": {
32
+ "weight": 1.0,
33
+ "type": "ColorJitter",
34
+ "kwargs": {
35
+ "saturation": [
36
+ 0.5,
37
+ 1.5
38
+ ]
39
+ }
40
+ },
41
+ "hue": {
42
+ "weight": 1.0,
43
+ "type": "ColorJitter",
44
+ "kwargs": {
45
+ "hue": [
46
+ -0.05,
47
+ 0.05
48
+ ]
49
+ }
50
+ },
51
+ "sharpness": {
52
+ "weight": 1.0,
53
+ "type": "SharpnessJitter",
54
+ "kwargs": {
55
+ "sharpness": [
56
+ 0.5,
57
+ 1.5
58
+ ]
59
+ }
60
+ }
61
+ }
62
+ },
63
+ "revision": null,
64
+ "use_imagenet_stats": true,
65
+ "video_backend": "torchcodec"
66
+ },
67
+ "env": null,
68
+ "policy": {
69
+ "type": "act",
70
+ "n_obs_steps": 1,
71
+ "normalization_mapping": {
72
+ "VISUAL": "MEAN_STD",
73
+ "STATE": "MEAN_STD",
74
+ "ACTION": "MEAN_STD"
75
+ },
76
+ "input_features": {
77
+ "observation.state": {
78
+ "type": "STATE",
79
+ "shape": [
80
+ 7
81
+ ]
82
+ },
83
+ "observation.images.front": {
84
+ "type": "VISUAL",
85
+ "shape": [
86
+ 3,
87
+ 480,
88
+ 640
89
+ ]
90
+ },
91
+ "observation.images.gripper": {
92
+ "type": "VISUAL",
93
+ "shape": [
94
+ 3,
95
+ 480,
96
+ 640
97
+ ]
98
+ },
99
+ "observation.images.top": {
100
+ "type": "VISUAL",
101
+ "shape": [
102
+ 3,
103
+ 480,
104
+ 640
105
+ ]
106
+ }
107
+ },
108
+ "output_features": {
109
+ "action": {
110
+ "type": "ACTION",
111
+ "shape": [
112
+ 7
113
+ ]
114
+ }
115
+ },
116
+ "device": "cuda",
117
+ "use_amp": false,
118
+ "chunk_size": 100,
119
+ "n_action_steps": 100,
120
+ "vision_backbone": "resnet18",
121
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
122
+ "replace_final_stride_with_dilation": false,
123
+ "pre_norm": false,
124
+ "dim_model": 512,
125
+ "n_heads": 8,
126
+ "dim_feedforward": 3200,
127
+ "feedforward_activation": "relu",
128
+ "n_encoder_layers": 4,
129
+ "n_decoder_layers": 1,
130
+ "use_vae": true,
131
+ "latent_dim": 32,
132
+ "n_vae_encoder_layers": 4,
133
+ "temporal_ensemble_coeff": null,
134
+ "dropout": 0.1,
135
+ "kl_weight": 10.0,
136
+ "optimizer_lr": 1e-05,
137
+ "optimizer_weight_decay": 0.0001,
138
+ "optimizer_lr_backbone": 1e-05
139
+ },
140
+ "output_dir": "outputs/train/ACT_veggies_latest_new",
141
+ "job_name": "act",
142
+ "resume": false,
143
+ "seed": 1000,
144
+ "num_workers": 4,
145
+ "batch_size": 32,
146
+ "steps": 200000,
147
+ "eval_freq": 5000,
148
+ "log_freq": 200,
149
+ "save_checkpoint": true,
150
+ "save_freq": 5000,
151
+ "use_policy_training_preset": true,
152
+ "optimizer": {
153
+ "type": "adamw",
154
+ "lr": 1e-05,
155
+ "weight_decay": 0.0001,
156
+ "grad_clip_norm": 10.0,
157
+ "betas": [
158
+ 0.9,
159
+ 0.999
160
+ ],
161
+ "eps": 1e-08
162
+ },
163
+ "scheduler": null,
164
+ "eval": {
165
+ "n_episodes": 50,
166
+ "batch_size": 50,
167
+ "use_async_envs": false
168
+ },
169
+ "wandb": {
170
+ "enable": true,
171
+ "disable_artifact": false,
172
+ "project": "lerobot",
173
+ "entity": null,
174
+ "notes": null,
175
+ "run_id": "vfndpntj",
176
+ "mode": null
177
+ }
178
+ }