|
|
""" |
|
|
DatasetConverter - A utility for processing EgoSim dataset for machine learning. |
|
|
Image loading is slow thus a conversion to mp4 or hdf5 is benefitial. |
|
|
|
|
|
This script provides an example pipeline to convert raw EgoSim simulation |
|
|
data into standardized formats suitable for machine learning applications. The |
|
|
converter handles image sequences, SMPL pose parameters, and metadata to generate: |
|
|
- MP4 videos with standardized dimensions |
|
|
- HDF5 datasets for efficient storage (optional) |
|
|
- CSV metadata files for sequences |
|
|
- Processed SMPL body model data with joint positions |
|
|
""" |
|
|
import os |
|
|
import pickle |
|
|
import signal |
|
|
import time |
|
|
import traceback |
|
|
from pathlib import Path |
|
|
from typing import Tuple, List |
|
|
|
|
|
import cv2 |
|
|
import h5py |
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
import torch |
|
|
from smplx import SMPLX |
|
|
from tqdm import trange |
|
|
import subprocess |
|
|
|
|
|
|
|
|
class DatasetConverter: |
|
|
|
|
|
def __init__(self, label_idx, |
|
|
image_size: Tuple[int, int], |
|
|
images_root: Path, |
|
|
airsim_rec_root: Path, |
|
|
video_output_root: Path, |
|
|
hdf5_output_root: Path, |
|
|
csv_output_root: Path, |
|
|
smpl_root: Path, |
|
|
smpl_out_root: Path, |
|
|
smplx_model_path: Path, |
|
|
max_sequence_length: int = 150, |
|
|
save_as_mp4: bool = True, |
|
|
save_as_hdf5: bool = False |
|
|
): |
|
|
""" |
|
|
Initialize the DatasetConverter for transforming raw AirSim data to a format suitable for training models. |
|
|
|
|
|
This converter handles various data processing tasks: |
|
|
- Converting raw images to mp4 videos with standardized dimensions |
|
|
- Converting SMPL pose parameters to usable joint positions |
|
|
- Saving sequence data in HDF5 format (optional) |
|
|
- Creating CSV files with sequence metadata |
|
|
|
|
|
Parameters |
|
|
---------- |
|
|
label_idx : dict |
|
|
The Babel action dictionary containing segment information with keys like 'amass_path', 'start_s', 'end_s', 'seg_id', etc. |
|
|
|
|
|
image_size : Tuple[int, int] |
|
|
Desired output size for images/videos as (height, width) in pixels. |
|
|
|
|
|
images_root : Path |
|
|
Root directory containing the raw input images. |
|
|
|
|
|
airsim_rec_root : Path |
|
|
Root directory containing AirSim recording data (airsim_rec.txt files). |
|
|
|
|
|
video_output_root : Path |
|
|
Directory where converted mp4 videos will be saved. |
|
|
|
|
|
hdf5_output_root : Path |
|
|
Directory where HDF5 files of image sequences will be saved. |
|
|
|
|
|
csv_output_root : Path |
|
|
Directory where CSV files with sequence metadata will be saved. |
|
|
|
|
|
smpl_root : Path |
|
|
Root directory containing original SMPL model parameters. |
|
|
|
|
|
smpl_out_root : Path |
|
|
Directory where processed SMPL sequence data will be saved. |
|
|
|
|
|
smplx_model_path : Path |
|
|
Path to the directory containing SMPLX model files. |
|
|
|
|
|
max_sequence_length : int, optional |
|
|
Maximum number of frames to include in a sequence, default is 150. |
|
|
|
|
|
save_as_mp4 : bool, optional |
|
|
Whether to save sequences as MP4 videos, default is True. |
|
|
|
|
|
save_as_hdf5 : bool, optional |
|
|
Whether to save sequences as HDF5 files, default is False. |
|
|
""" |
|
|
self.label_idx = label_idx |
|
|
self.image_size = image_size |
|
|
self.airsim_rec_root = airsim_rec_root |
|
|
self.images_root = images_root |
|
|
self.video_output_root = video_output_root |
|
|
self.hdf5_output_root = hdf5_output_root |
|
|
self.max_sequence_length = max_sequence_length |
|
|
self.csv_output_root = csv_output_root |
|
|
self.smpl_root = smpl_root |
|
|
self.smpl_out_root = smpl_out_root |
|
|
self.interrupt_caught = False |
|
|
self.airsim_rec_cache = {} |
|
|
self.should_save_as_mp4 = save_as_mp4 |
|
|
self.should_save_as_hdf5 = save_as_hdf5 |
|
|
|
|
|
self.device = "cuda:0" if torch.cuda.is_available() else "cpu" |
|
|
self.body_model_female = SMPLX(str(smplx_model_path / "SMPLX_FEMALE.npz"), |
|
|
batch_size=1, |
|
|
gender='female', |
|
|
num_betas=16, |
|
|
num_expression_coeffs=10, |
|
|
).to(self.device) |
|
|
self.body_model_male = SMPLX(str(smplx_model_path / "SMPLX_MALE.npz"), |
|
|
batch_size=1, |
|
|
gender='male', |
|
|
num_betas=16, |
|
|
num_expression_coeffs=10).to(self.device) |
|
|
|
|
|
def load_images_from_disk(self, img_path): |
|
|
"""Loads image from disk as jpg.""" |
|
|
img_path = self.images_root / img_path.parent / "images" / img_path.name |
|
|
try: |
|
|
image = cv2.imread(str(img_path)) |
|
|
image = cv2.resize(image, (self.image_size[1], self.image_size[0])) |
|
|
except Exception as e: |
|
|
print(f"Failed to load {img_path} and resize image. Returning empty image", e) |
|
|
return np.zeros((self.image_size[0], self.image_size[1], 3), dtype=np.uint8) |
|
|
|
|
|
return image |
|
|
|
|
|
def load_images(self, img_paths): |
|
|
"""Loads images from disk as jpg files.""" |
|
|
images = [] |
|
|
for img_path in img_paths: |
|
|
image = self.load_images_from_disk(img_path) |
|
|
images.append(image) |
|
|
return images |
|
|
|
|
|
def save_to_hdf5(self, image_paths: List[Path], seg_id: str, view: str): |
|
|
timea = time.time() |
|
|
stacked_images = np.stack(self.load_images(image_paths), axis=0) |
|
|
timeb = time.time() |
|
|
|
|
|
dataset_name = image_paths[0].parts[0] + ".hdf5" |
|
|
participant_name = image_paths[0].parent.parent.name |
|
|
sequence = image_paths[0].parent.name |
|
|
os.makedirs(self.hdf5_output_root, exist_ok=True) |
|
|
try: |
|
|
with h5py.File(self.hdf5_output_root / dataset_name, 'a', libver='latest') as hdf5_file: |
|
|
seg_group = hdf5_file.require_group(participant_name).require_group(sequence).require_group(seg_id) |
|
|
if view in seg_group: |
|
|
return |
|
|
|
|
|
else: |
|
|
seg_group.create_dataset(view, data=stacked_images, dtype="uint8") |
|
|
except Exception as e: |
|
|
print(f"Error saving to hdf5: {e}, on {self.hdf5_output_root / dataset_name} with seg_id {seg_id} and view {view}") |
|
|
raise e |
|
|
timec = time.time() |
|
|
print(f"Time to load images: {timeb - timea}, time to save hdf5: {timec - timeb}") |
|
|
|
|
|
def valid_image(self, img_path): |
|
|
"""checks if the image exists and is not 0 size.""" |
|
|
if not img_path.exists(): |
|
|
print("Image does not exist", img_path) |
|
|
return False |
|
|
if os.path.getsize(img_path) == 0: |
|
|
print("Image is 0 size", img_path) |
|
|
return False |
|
|
return True |
|
|
|
|
|
def save_as_mp4(self, image_paths: List[Path], seg_id: str, view: str): |
|
|
sequence_part = Path(*image_paths[0].parts[-4:-1]) |
|
|
output_dir = self.video_output_root / sequence_part / seg_id |
|
|
|
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
image_file_list_path = output_dir / f"image_file_list_{view}.txt" |
|
|
video_output_path = output_dir / f"{view}.mp4" |
|
|
|
|
|
cap = cv2.VideoCapture(str(video_output_path)) |
|
|
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
|
cap.release() |
|
|
if length == len(image_paths): |
|
|
print("Video (with same number of frames) already exists, skipping") |
|
|
return |
|
|
|
|
|
with open(image_file_list_path, 'w') as file: |
|
|
image_paths = [images_root / sequence_part / "images" / image_path.name for image_path in image_paths] |
|
|
image_paths_str = "\n".join([f"file '{self.images_root / image_path}'" for image_path in image_paths if self.valid_image(self.images_root / image_path)]) |
|
|
file.write(image_paths_str) |
|
|
|
|
|
|
|
|
|
|
|
command = f"ffmpeg -hide_banner -loglevel error -f concat -r 30 -y -safe 0 -i '{image_file_list_path}' -c:v hevc_nvenc -vf 'scale={self.image_size[1]}:{self.image_size[0]}' -aspect 1:1 -preset p7 -cq:v 1 -pix_fmt yuv420p '{video_output_path}'" |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
result = subprocess.run(command, shell=True, check=True, text=True, stdout=subprocess.PIPE, |
|
|
stderr=subprocess.PIPE) |
|
|
except subprocess.CalledProcessError as e: |
|
|
|
|
|
print(f"An error occurred: {e.stderr}") |
|
|
with open("error_files.txt", "a") as error_file: |
|
|
error_file.write(f"Error in ffmpeg: {e.stderr}, on {image_file_list_path} with command: {command}\n") |
|
|
|
|
|
os.remove(image_file_list_path) |
|
|
|
|
|
|
|
|
cap = cv2.VideoCapture(str(video_output_path)) |
|
|
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
|
cap.release() |
|
|
if length != len(image_paths): |
|
|
print(f"Video {video_output_path} has {length} frames, should have {len(image_paths)}") |
|
|
|
|
|
def save_smpl_sequence(self, airsim_rec: pd.DataFrame, sequence_path: Path, vehicle_name: str, seg_id: str): |
|
|
|
|
|
out_file = (smpl_out_root / sequence_path / seg_id).with_suffix(".npz") |
|
|
out_file.parent.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
if out_file.exists(): |
|
|
try: |
|
|
npz = np.load(out_file) |
|
|
if "joint_positions" in npz: |
|
|
print(f"Skipping {out_file} as it already exists.") |
|
|
return |
|
|
except Exception as e: |
|
|
print(f"Error loading {out_file}: {e}") |
|
|
|
|
|
|
|
|
|
|
|
smpl_path = self.smpl_root / sequence_path |
|
|
smpl_path = smpl_path.with_suffix(".npz") |
|
|
smpl_data = np.load(smpl_path, allow_pickle=True) |
|
|
|
|
|
ts = airsim_rec[airsim_rec["VehicleName"] == vehicle_name]["TimeStampAnimationS"] |
|
|
|
|
|
def timestamps_to_idx(ts, smpl_data): |
|
|
fps = smpl_data["mocap_frame_rate"] |
|
|
idx = (ts * fps).astype(int) |
|
|
return idx |
|
|
|
|
|
|
|
|
idx = timestamps_to_idx(ts, smpl_data) |
|
|
idx_len = idx.shape[0] |
|
|
idx = idx[idx < smpl_data["poses"].shape[0]] |
|
|
if idx.shape[0] - idx_len > 2: |
|
|
print(f"Warning: More than 2 frames were removed from the smpl data. {idx_len - idx.shape[0]} frames were removed.") |
|
|
|
|
|
poses = smpl_data["poses"][idx] |
|
|
root_orient = smpl_data["root_orient"][idx] |
|
|
trans = smpl_data["trans"][idx] |
|
|
betas = smpl_data["betas"] |
|
|
gender = smpl_data["gender"] |
|
|
|
|
|
joint_positions = np.zeros((idx.shape[0], 127, 3)) |
|
|
poses_torch = torch.from_numpy(poses[:, 3:66]).float().to(self.device) |
|
|
betas_torch = torch.from_numpy(betas).float().to(self.device) |
|
|
for i in range(idx.shape[0]): |
|
|
if smpl_data["gender"] == "male": |
|
|
bm = self.body_model_male |
|
|
elif smpl_data["gender"] == "female": |
|
|
bm = self.body_model_female |
|
|
else: |
|
|
raise ValueError("Body model can either be male or female.") |
|
|
bp = { |
|
|
"body_pose": poses_torch[i].unsqueeze(0), |
|
|
"betas": betas_torch.unsqueeze(0), |
|
|
} |
|
|
with torch.no_grad(): |
|
|
joint_positions[i] = bm.forward(**bp, return_verts=False, use_only_num_joints=-1).joints.cpu().numpy() |
|
|
|
|
|
np.savez(out_file, |
|
|
poses=poses, |
|
|
root_orient=root_orient, |
|
|
trans=trans, |
|
|
betas=betas, |
|
|
gender=gender, |
|
|
joint_positions=joint_positions, |
|
|
) |
|
|
|
|
|
|
|
|
def save_segment_csv(self, sequence_path, seg_id, airsim_rec, airsim_rec_other_vehicles, vehicle_name): |
|
|
csv_out_path = self.csv_output_root / sequence_path / seg_id |
|
|
csv_out_path.mkdir(parents=True, exist_ok=True) |
|
|
airsim_rec_all = pd.concat([airsim_rec, airsim_rec_other_vehicles]) |
|
|
airsim_rec_all = airsim_rec_all.sort_index() |
|
|
|
|
|
self.save_smpl_sequence(airsim_rec_all, sequence_path, vehicle_name, seg_id) |
|
|
|
|
|
if airsim_rec_all.shape[0] == 0: |
|
|
print("WARNING: airsim_rec_all is empty: saving to empty csv: ", csv_out_path / "airsim_rec.csv") |
|
|
airsim_rec_all.to_csv(csv_out_path / "airsim_rec.csv", index=False) |
|
|
|
|
|
def convert_sample(self, idx: int): |
|
|
sequence_path = self.label_idx["amass_path"][idx] |
|
|
start_s = self.label_idx['start_s'][idx] |
|
|
end_s = self.label_idx['end_s'][idx] |
|
|
seg_id = self.label_idx['seg_id'][idx] |
|
|
chunk = self.label_idx['chunk_n'][idx] |
|
|
|
|
|
|
|
|
airsim_rec_path = self.airsim_rec_root / sequence_path |
|
|
|
|
|
|
|
|
if not airsim_rec_path.exists(): |
|
|
print(f"No airsim_rec.txt found for {sequence_path} in {seg_id}. Skipping Segment of length {start_s} {end_s} ({end_s-start_s})") |
|
|
|
|
|
with open("error_files.txt", "a") as file: |
|
|
file.write(f"No airsim_rec.txt found for {sequence_path} in {seg_id}. Skipping Segment of length {start_s} {end_s} ({end_s-start_s})\n") |
|
|
return |
|
|
|
|
|
airsim_rec_fp = airsim_rec_path / 'airsim_rec.txt' |
|
|
if airsim_rec_fp not in self.airsim_rec_cache: |
|
|
airsim_rec = pd.read_csv(airsim_rec_fp, sep="\t", engine="c", low_memory=False) |
|
|
self.airsim_rec_cache[airsim_rec_fp] = airsim_rec.copy() |
|
|
else: |
|
|
airsim_rec = self.airsim_rec_cache[airsim_rec_fp].copy() |
|
|
|
|
|
|
|
|
seq_path_split = sequence_path.split("/") |
|
|
vehicle_name = seq_path_split[-3] + "#" + seq_path_split[-2] |
|
|
|
|
|
max_time = airsim_rec['TimeStampAnimationS'].max() |
|
|
orig_airsim_rec = airsim_rec |
|
|
vehicle_mask = airsim_rec['VehicleName'] == vehicle_name |
|
|
time_mask = (airsim_rec["TimeStampAnimationS"] >= start_s) & (airsim_rec["TimeStampAnimationS"] < end_s) |
|
|
airsim_rec = airsim_rec[vehicle_mask & time_mask] |
|
|
|
|
|
frames = len(airsim_rec) |
|
|
if frames == 0: |
|
|
print(f"No frames found for {sequence_path} in {seg_id}. Skipping Segment of length {start_s} {end_s} ({end_s-start_s}. Vehicle max: {max_time})") |
|
|
|
|
|
with open("error_files.txt", "a") as file: |
|
|
file.write(f"No images in sequence {sequence_path} {seg_id} start: {start_s} end: {end_s}, airsim_rec_path: {airsim_rec_path}\n") |
|
|
return |
|
|
|
|
|
end = min(airsim_rec.index[-1] + 1, orig_airsim_rec.shape[0] - 1) |
|
|
orig_airsim_rec_window = orig_airsim_rec.iloc[airsim_rec.index[0]:end] |
|
|
airsim_rec_other_vehicles = orig_airsim_rec_window[orig_airsim_rec_window["VehicleName"] != vehicle_name] |
|
|
|
|
|
|
|
|
|
|
|
subsampling = 1 |
|
|
if frames > self.max_sequence_length: |
|
|
if self.label_idx['scale_factor'][idx] > 1.0: |
|
|
|
|
|
|
|
|
|
|
|
subsampling = int(self.label_idx['scale_factor'][idx]) |
|
|
airsim_rec = airsim_rec.iloc[::subsampling] |
|
|
frames = len(airsim_rec) |
|
|
|
|
|
|
|
|
print( |
|
|
f"Warning: More than {self.max_sequence_length} frames found ({frames}). Only using the first {self.max_sequence_length}, airsim_rec_path: {airsim_rec_path}. Start: {start_s} End: {end_s} Vehicle max: {max_time})") |
|
|
with open("error_files.txt", "a") as file: |
|
|
file.write( |
|
|
f"Warning: More than {self.max_sequence_length} frames found ({frames}). Only using the first {self.max_sequence_length}, airsim_rec_path: {airsim_rec_path}. Start: {start_s} End: {end_s} Vehicle max: {max_time})\n") |
|
|
airsim_rec = airsim_rec.head(self.max_sequence_length) |
|
|
frames = self.max_sequence_length |
|
|
|
|
|
|
|
|
if chunk != 0: |
|
|
seg_id = f"{seg_id}_chunk{chunk:02d}" |
|
|
|
|
|
|
|
|
|
|
|
self.save_segment_csv(sequence_path, seg_id, airsim_rec, airsim_rec_other_vehicles, vehicle_name) |
|
|
|
|
|
image_paths = airsim_rec["ImageFile"].dropna().str.split(';').explode().tolist() |
|
|
for view in ["socket1", "socket2", "socket3", "socket4", "socket5", "socket6"]: |
|
|
sequence_paths = [Path(sequence_path) / image_path.replace(".ppm", ".jpg") for image_path in image_paths if |
|
|
view in image_path] |
|
|
|
|
|
if len(sequence_paths) > 150: |
|
|
print(f"Warning: More than 150 images found ({len(sequence_paths)}). Only using the first 150, airsim_rec_path: {airsim_rec_path}. Start: {start_s} End: {end_s} Vehicle max: {max_time})") |
|
|
with open("error_files.txt", "a") as file: |
|
|
file.write( |
|
|
f"Warning: More than 150 images found ({len(sequence_paths)}). Only using the first 150, airsim_rec_path: {airsim_rec_path}. Start: {start_s} End: {end_s} Vehicle max: {max_time})\n") |
|
|
sequence_paths = sequence_paths[:150] |
|
|
|
|
|
|
|
|
sequence_part = Path(*sequence_paths[0].parts[-4:-1]) |
|
|
output_dir = self.video_output_root / sequence_part / seg_id / f"{view}.mp4" |
|
|
if output_dir.exists() and output_dir.stat().st_size != 0: |
|
|
|
|
|
continue |
|
|
|
|
|
|
|
|
if self.should_save_as_hdf5: |
|
|
self.save_to_hdf5(sequence_paths, seg_id, view) |
|
|
|
|
|
|
|
|
if self.should_save_as_mp4: |
|
|
self.save_as_mp4(sequence_paths, seg_id, view) |
|
|
|
|
|
|
|
|
def signal_handler(self, signal, frame): |
|
|
print('Interrupt received, finishing the current operation before exiting...') |
|
|
self.interrupt_caught = True |
|
|
|
|
|
def convert_dataset(self): |
|
|
|
|
|
signal.signal(signal.SIGINT, self.signal_handler) |
|
|
|
|
|
for i in trange(0, len(self.label_idx["amass_path"])): |
|
|
if self.interrupt_caught: |
|
|
print("Interrupt caught, exiting...") |
|
|
break |
|
|
try: |
|
|
self.convert_sample(i) |
|
|
except Exception as e: |
|
|
print(f"An error occurred: {e}, on {self.label_idx['amass_path'][i]} with seg_id {self.label_idx['seg_id'][i]}") |
|
|
traceback.print_exc() |
|
|
with open("error_files.txt", "a") as file: |
|
|
file.write(f"An error occurred: {e}, on {self.label_idx['amass_path'][i]} with seg_id {self.label_idx['seg_id'][i]}\n") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
dataset_paths = [r'babel_v1.0/train_label_60.pkl', r'babel_v1.0/val_label_60.pkl', r'babel_v1.0/test_label_60.pkl'] |
|
|
for dataset_path in dataset_paths: |
|
|
with open(dataset_path, 'rb') as f: |
|
|
dataset_split = pickle.load(f) |
|
|
|
|
|
|
|
|
images_root = Path("path to images root") |
|
|
airsim_rec_root = Path("paths to airsim rec root") |
|
|
|
|
|
|
|
|
video_output_root = Path("path to output video root") |
|
|
|
|
|
hdf5_output_root = Path("path to output hdf5 root") |
|
|
|
|
|
|
|
|
csv_output_root = Path("/path/to/output/csv") |
|
|
|
|
|
|
|
|
smpl_root = Path("/path/to/amass/smpl/models") |
|
|
|
|
|
|
|
|
smpl_out_root = Path("/path/to/output/smpl/sequences") |
|
|
|
|
|
|
|
|
smplx_model_path = Path("/path/to/smplx/models") |
|
|
|
|
|
converter = DatasetConverter(dataset_split, |
|
|
(224, 224), |
|
|
images_root, |
|
|
airsim_rec_root, |
|
|
video_output_root, |
|
|
hdf5_output_root, |
|
|
csv_output_root, |
|
|
smpl_root, |
|
|
smpl_out_root, |
|
|
smplx_model_path) |
|
|
|
|
|
converter.convert_dataset() |