|
import os |
|
import numpy as np |
|
import torch |
|
from stable_baselines3 import PPO |
|
from stable_baselines3.common.vec_env import DummyVecEnv |
|
import time |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
print(f"Using device: {device}") |
|
|
|
|
|
current_dir = os.getcwd() |
|
model_save_path = os.path.join(current_dir, "stacking_model_20250615_1618") |
|
os.makedirs(model_save_path, exist_ok=True) |
|
|
|
|
|
npz_files = [f for f in os.listdir(current_dir) if f.endswith(".npz")] |
|
data = {} |
|
for npz_file in npz_files: |
|
data[npz_file] = np.load(os.path.join(current_dir, npz_file)) |
|
print(f"Loaded {npz_file}") |
|
|
|
|
|
def prepare_training_data(data_dict): |
|
observations = [] |
|
actions = [] |
|
rewards = [] |
|
dones = [] |
|
for npz_file in data_dict: |
|
observations.append(data_dict[npz_file]["observations"]) |
|
actions.append(data_dict[npz_file]["actions"]) |
|
rewards.append(data_dict[npz_file]["rewards"]) |
|
dones.append(data_dict[npz_file]["dones"]) |
|
return (np.concatenate(observations, axis=0), |
|
np.concatenate(actions, axis=0), |
|
np.concatenate(rewards, axis=0), |
|
np.concatenate(dones, axis=0)) |
|
|
|
obs, acts, rews, dons = prepare_training_data(data) |
|
print("Training data shapes:", obs.shape, acts.shape, rews.shape, dons.shape) |
|
|
|
|
|
import stacking_env |
|
def make_env(): |
|
return stacking_env.StackingEnv(render_mode=None) |
|
|
|
env = DummyVecEnv([make_env]) |
|
|
|
|
|
policy_kwargs = { |
|
"net_arch": { |
|
"pi": [64, 64], |
|
"vf": [64, 64] |
|
} |
|
} |
|
model = PPO( |
|
"MlpPolicy", |
|
env, |
|
policy_kwargs=policy_kwargs, |
|
verbose=1, |
|
learning_rate=3e-4, |
|
n_steps=2048, |
|
batch_size=64, |
|
device=device |
|
) |
|
|
|
|
|
total_timesteps = 50000 |
|
for _ in range(int(total_timesteps / 2048)): |
|
model.learn(total_timesteps=2048, reset_num_timesteps=False) |
|
if np.random.random() < 0.1: |
|
print(f"Simulated human intervention at step {model.num_timesteps}") |
|
|
|
|
|
model_path = os.path.join(model_save_path, "ppo_hil_serl_stacking_offline") |
|
model.save(model_path) |
|
print(f"Model saved to {model_path}") |
|
|
|
|
|
print("Training complete. Upload the dataset folder and a separate demonstration (e.g., using screenshots) to the Hugging Face LeRobot Worldwide Hackathon community.") |