patrickbdevaney's picture
upload of hacakthon dataset, related python code, xml files
620dc33 verified
import os
import numpy as np
import torch
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv
import time
# Verify GPU availability
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# Set working directory (assumed to be the dataset directory with stacking_env.py)
current_dir = os.getcwd()
model_save_path = os.path.join(current_dir, "stacking_model_20250615_1618")
os.makedirs(model_save_path, exist_ok=True)
# Load .npz files directly from the directory
npz_files = [f for f in os.listdir(current_dir) if f.endswith(".npz")]
data = {}
for npz_file in npz_files:
data[npz_file] = np.load(os.path.join(current_dir, npz_file))
print(f"Loaded {npz_file}")
# Prepare training data from .npz files
def prepare_training_data(data_dict):
observations = []
actions = []
rewards = []
dones = []
for npz_file in data_dict:
observations.append(data_dict[npz_file]["observations"])
actions.append(data_dict[npz_file]["actions"])
rewards.append(data_dict[npz_file]["rewards"])
dones.append(data_dict[npz_file]["dones"])
return (np.concatenate(observations, axis=0),
np.concatenate(actions, axis=0),
np.concatenate(rewards, axis=0),
np.concatenate(dones, axis=0))
obs, acts, rews, dons = prepare_training_data(data)
print("Training data shapes:", obs.shape, acts.shape, rews.shape, dons.shape)
# Define environment from stacking_env.py in the same directory
import stacking_env
def make_env():
return stacking_env.StackingEnv(render_mode=None) # Disable rendering for training
env = DummyVecEnv([make_env])
# Initialize PPO policy with proper net_arch format
policy_kwargs = {
"net_arch": {
"pi": [64, 64], # Policy network architecture
"vf": [64, 64] # Value network architecture
}
}
model = PPO(
"MlpPolicy",
env,
policy_kwargs=policy_kwargs,
verbose=1,
learning_rate=3e-4,
n_steps=2048,
batch_size=64,
device=device # Use detected device (cuda or cpu)
)
# RL training loop with simulated HIL interventions
total_timesteps = 50000 # Adjust based on time; reduce to 10000 if needed
for _ in range(int(total_timesteps / 2048)):
model.learn(total_timesteps=2048, reset_num_timesteps=False)
if np.random.random() < 0.1: # 10% chance of simulated intervention
print(f"Simulated human intervention at step {model.num_timesteps}")
# Save the trained model
model_path = os.path.join(model_save_path, "ppo_hil_serl_stacking_offline")
model.save(model_path)
print(f"Model saved to {model_path}")
# Cleanup
print("Training complete. Upload the dataset folder and a separate demonstration (e.g., using screenshots) to the Hugging Face LeRobot Worldwide Hackathon community.")