Robotics
LeRobot
Safetensors
smolvla
jayyucippg commited on
Commit
5f663c1
·
verified ·
1 Parent(s): 17b8758

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +63 -0
  2. config.json +80 -0
  3. model.safetensors +3 -0
  4. train_config.json +233 -0
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: lerobot/smolvla_base
3
+ datasets: jayyucippg/record-side-views-stack3
4
+ library_name: lerobot
5
+ license: apache-2.0
6
+ model_name: smolvla
7
+ pipeline_tag: robotics
8
+ tags:
9
+ - lerobot
10
+ - robotics
11
+ - smolvla
12
+ ---
13
+
14
+ # Model Card for smolvla
15
+
16
+ <!-- Provide a quick summary of what the model is/does. -->
17
+
18
+
19
+ [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware.
20
+
21
+
22
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
23
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
24
+
25
+ ---
26
+
27
+ ## How to Get Started with the Model
28
+
29
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
30
+ Below is the short version on how to train and run inference/eval:
31
+
32
+ ### Train from scratch
33
+
34
+ ```bash
35
+ python -m lerobot.scripts.train \
36
+ --dataset.repo_id=${HF_USER}/<dataset> \
37
+ --policy.type=act \
38
+ --output_dir=outputs/train/<desired_policy_repo_id> \
39
+ --job_name=lerobot_training \
40
+ --policy.device=cuda \
41
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
42
+ --wandb.enable=true
43
+ ```
44
+
45
+ *Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`.*
46
+
47
+ ### Evaluate the policy/run inference
48
+
49
+ ```bash
50
+ python -m lerobot.record \
51
+ --robot.type=so100_follower \
52
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
53
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
54
+ --episodes=10
55
+ ```
56
+
57
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
58
+
59
+ ---
60
+
61
+ ## Model Details
62
+
63
+ * **License:** apache-2.0
config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "smolvla",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "IDENTITY",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.state": {
11
+ "type": "STATE",
12
+ "shape": [
13
+ 6
14
+ ]
15
+ },
16
+ "observation.images.side": {
17
+ "type": "VISUAL",
18
+ "shape": [
19
+ 3,
20
+ 480,
21
+ 640
22
+ ]
23
+ }
24
+ },
25
+ "output_features": {
26
+ "action": {
27
+ "type": "ACTION",
28
+ "shape": [
29
+ 6
30
+ ]
31
+ }
32
+ },
33
+ "device": "cuda",
34
+ "use_amp": false,
35
+ "push_to_hub": true,
36
+ "repo_id": "jayyucippg/smolvla-record-side-views-stack3",
37
+ "private": null,
38
+ "tags": null,
39
+ "license": null,
40
+ "chunk_size": 50,
41
+ "n_action_steps": 50,
42
+ "max_state_dim": 32,
43
+ "max_action_dim": 32,
44
+ "resize_imgs_with_padding": [
45
+ 512,
46
+ 512
47
+ ],
48
+ "empty_cameras": 0,
49
+ "adapt_to_pi_aloha": false,
50
+ "use_delta_joint_actions_aloha": false,
51
+ "tokenizer_max_length": 48,
52
+ "num_steps": 10,
53
+ "use_cache": true,
54
+ "freeze_vision_encoder": true,
55
+ "train_expert_only": true,
56
+ "train_state_proj": true,
57
+ "optimizer_lr": 0.0001,
58
+ "optimizer_betas": [
59
+ 0.9,
60
+ 0.95
61
+ ],
62
+ "optimizer_eps": 1e-08,
63
+ "optimizer_weight_decay": 1e-10,
64
+ "optimizer_grad_clip_norm": 10.0,
65
+ "scheduler_warmup_steps": 1000,
66
+ "scheduler_decay_steps": 30000,
67
+ "scheduler_decay_lr": 2.5e-06,
68
+ "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
69
+ "load_vlm_weights": true,
70
+ "add_image_special_tokens": false,
71
+ "attention_mode": "cross_attn",
72
+ "prefix_length": 0,
73
+ "pad_language_to": "max_length",
74
+ "num_expert_layers": 0,
75
+ "num_vlm_layers": 16,
76
+ "self_attn_every_n_layers": 2,
77
+ "expert_width_multiplier": 0.75,
78
+ "min_period": 0.004,
79
+ "max_period": 4.0
80
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:517945c48660b91fd001a16b105914b283814709154bf7e66918bb4ff626593a
3
+ size 906713296
train_config.json ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "jayyucippg/record-side-views-stack3",
4
+ "root": null,
5
+ "episodes": [
6
+ 0,
7
+ 1,
8
+ 2,
9
+ 3,
10
+ 4,
11
+ 5,
12
+ 6,
13
+ 7,
14
+ 8,
15
+ 9,
16
+ 10,
17
+ 11,
18
+ 12,
19
+ 13,
20
+ 14,
21
+ 15,
22
+ 16,
23
+ 17,
24
+ 18,
25
+ 19,
26
+ 20,
27
+ 21,
28
+ 22,
29
+ 23,
30
+ 24,
31
+ 25,
32
+ 26,
33
+ 27,
34
+ 28,
35
+ 29,
36
+ 30,
37
+ 31,
38
+ 32,
39
+ 33,
40
+ 34,
41
+ 35,
42
+ 36,
43
+ 37,
44
+ 38,
45
+ 39
46
+ ],
47
+ "image_transforms": {
48
+ "enable": false,
49
+ "max_num_transforms": 3,
50
+ "random_order": false,
51
+ "tfs": {
52
+ "brightness": {
53
+ "weight": 1.0,
54
+ "type": "ColorJitter",
55
+ "kwargs": {
56
+ "brightness": [
57
+ 0.8,
58
+ 1.2
59
+ ]
60
+ }
61
+ },
62
+ "contrast": {
63
+ "weight": 1.0,
64
+ "type": "ColorJitter",
65
+ "kwargs": {
66
+ "contrast": [
67
+ 0.8,
68
+ 1.2
69
+ ]
70
+ }
71
+ },
72
+ "saturation": {
73
+ "weight": 1.0,
74
+ "type": "ColorJitter",
75
+ "kwargs": {
76
+ "saturation": [
77
+ 0.5,
78
+ 1.5
79
+ ]
80
+ }
81
+ },
82
+ "hue": {
83
+ "weight": 1.0,
84
+ "type": "ColorJitter",
85
+ "kwargs": {
86
+ "hue": [
87
+ -0.05,
88
+ 0.05
89
+ ]
90
+ }
91
+ },
92
+ "sharpness": {
93
+ "weight": 1.0,
94
+ "type": "SharpnessJitter",
95
+ "kwargs": {
96
+ "sharpness": [
97
+ 0.5,
98
+ 1.5
99
+ ]
100
+ }
101
+ }
102
+ }
103
+ },
104
+ "revision": null,
105
+ "use_imagenet_stats": true,
106
+ "video_backend": "torchcodec"
107
+ },
108
+ "env": null,
109
+ "policy": {
110
+ "type": "smolvla",
111
+ "n_obs_steps": 1,
112
+ "normalization_mapping": {
113
+ "VISUAL": "IDENTITY",
114
+ "STATE": "MEAN_STD",
115
+ "ACTION": "MEAN_STD"
116
+ },
117
+ "input_features": {
118
+ "observation.state": {
119
+ "type": "STATE",
120
+ "shape": [
121
+ 6
122
+ ]
123
+ },
124
+ "observation.images.side": {
125
+ "type": "VISUAL",
126
+ "shape": [
127
+ 3,
128
+ 480,
129
+ 640
130
+ ]
131
+ }
132
+ },
133
+ "output_features": {
134
+ "action": {
135
+ "type": "ACTION",
136
+ "shape": [
137
+ 6
138
+ ]
139
+ }
140
+ },
141
+ "device": "cuda",
142
+ "use_amp": false,
143
+ "push_to_hub": true,
144
+ "repo_id": "jayyucippg/smolvla-record-side-views-stack3",
145
+ "private": null,
146
+ "tags": null,
147
+ "license": null,
148
+ "chunk_size": 50,
149
+ "n_action_steps": 50,
150
+ "max_state_dim": 32,
151
+ "max_action_dim": 32,
152
+ "resize_imgs_with_padding": [
153
+ 512,
154
+ 512
155
+ ],
156
+ "empty_cameras": 0,
157
+ "adapt_to_pi_aloha": false,
158
+ "use_delta_joint_actions_aloha": false,
159
+ "tokenizer_max_length": 48,
160
+ "num_steps": 10,
161
+ "use_cache": true,
162
+ "freeze_vision_encoder": true,
163
+ "train_expert_only": true,
164
+ "train_state_proj": true,
165
+ "optimizer_lr": 0.0001,
166
+ "optimizer_betas": [
167
+ 0.9,
168
+ 0.95
169
+ ],
170
+ "optimizer_eps": 1e-08,
171
+ "optimizer_weight_decay": 1e-10,
172
+ "optimizer_grad_clip_norm": 10.0,
173
+ "scheduler_warmup_steps": 1000,
174
+ "scheduler_decay_steps": 30000,
175
+ "scheduler_decay_lr": 2.5e-06,
176
+ "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
177
+ "load_vlm_weights": true,
178
+ "add_image_special_tokens": false,
179
+ "attention_mode": "cross_attn",
180
+ "prefix_length": 0,
181
+ "pad_language_to": "max_length",
182
+ "num_expert_layers": 0,
183
+ "num_vlm_layers": 16,
184
+ "self_attn_every_n_layers": 2,
185
+ "expert_width_multiplier": 0.75,
186
+ "min_period": 0.004,
187
+ "max_period": 4.0
188
+ },
189
+ "output_dir": "/mnt/hdd/jyq/tmp/train/record-side-views-stack3-smolvla-2",
190
+ "job_name": "record-side-views-stack3",
191
+ "resume": false,
192
+ "seed": 1000,
193
+ "num_workers": 4,
194
+ "batch_size": 8,
195
+ "steps": 100000,
196
+ "eval_freq": 20000,
197
+ "log_freq": 200,
198
+ "save_checkpoint": true,
199
+ "save_freq": 20000,
200
+ "use_policy_training_preset": true,
201
+ "optimizer": {
202
+ "type": "adamw",
203
+ "lr": 0.0001,
204
+ "weight_decay": 1e-10,
205
+ "grad_clip_norm": 10.0,
206
+ "betas": [
207
+ 0.9,
208
+ 0.95
209
+ ],
210
+ "eps": 1e-08
211
+ },
212
+ "scheduler": {
213
+ "type": "cosine_decay_with_warmup",
214
+ "num_warmup_steps": 1000,
215
+ "num_decay_steps": 30000,
216
+ "peak_lr": 0.0001,
217
+ "decay_lr": 2.5e-06
218
+ },
219
+ "eval": {
220
+ "n_episodes": 50,
221
+ "batch_size": 50,
222
+ "use_async_envs": false
223
+ },
224
+ "wandb": {
225
+ "enable": true,
226
+ "disable_artifact": false,
227
+ "project": "lerobot",
228
+ "entity": null,
229
+ "notes": null,
230
+ "run_id": "2mze8ux5",
231
+ "mode": null
232
+ }
233
+ }