File size: 2,839 Bytes
620dc33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import os
import numpy as np
import torch
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv
import time

# Verify GPU availability
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# Set working directory (assumed to be the dataset directory with stacking_env.py)
current_dir = os.getcwd()
model_save_path = os.path.join(current_dir, "stacking_model_20250615_1618")
os.makedirs(model_save_path, exist_ok=True)

# Load .npz files directly from the directory
npz_files = [f for f in os.listdir(current_dir) if f.endswith(".npz")]
data = {}
for npz_file in npz_files:
    data[npz_file] = np.load(os.path.join(current_dir, npz_file))
    print(f"Loaded {npz_file}")

# Prepare training data from .npz files
def prepare_training_data(data_dict):
    observations = []
    actions = []
    rewards = []
    dones = []
    for npz_file in data_dict:
        observations.append(data_dict[npz_file]["observations"])
        actions.append(data_dict[npz_file]["actions"])
        rewards.append(data_dict[npz_file]["rewards"])
        dones.append(data_dict[npz_file]["dones"])
    return (np.concatenate(observations, axis=0),
            np.concatenate(actions, axis=0),
            np.concatenate(rewards, axis=0),
            np.concatenate(dones, axis=0))

obs, acts, rews, dons = prepare_training_data(data)
print("Training data shapes:", obs.shape, acts.shape, rews.shape, dons.shape)

# Define environment from stacking_env.py in the same directory
import stacking_env
def make_env():
    return stacking_env.StackingEnv(render_mode=None)  # Disable rendering for training

env = DummyVecEnv([make_env])

# Initialize PPO policy with proper net_arch format
policy_kwargs = {
    "net_arch": {
        "pi": [64, 64],  # Policy network architecture
        "vf": [64, 64]   # Value network architecture
    }
}
model = PPO(
    "MlpPolicy",
    env,
    policy_kwargs=policy_kwargs,
    verbose=1,
    learning_rate=3e-4,
    n_steps=2048,
    batch_size=64,
    device=device  # Use detected device (cuda or cpu)
)

# RL training loop with simulated HIL interventions
total_timesteps = 50000  # Adjust based on time; reduce to 10000 if needed
for _ in range(int(total_timesteps / 2048)):
    model.learn(total_timesteps=2048, reset_num_timesteps=False)
    if np.random.random() < 0.1:  # 10% chance of simulated intervention
        print(f"Simulated human intervention at step {model.num_timesteps}")

# Save the trained model
model_path = os.path.join(model_save_path, "ppo_hil_serl_stacking_offline")
model.save(model_path)
print(f"Model saved to {model_path}")

# Cleanup
print("Training complete. Upload the dataset folder and a separate demonstration (e.g., using screenshots) to the Hugging Face LeRobot Worldwide Hackathon community.")