|
|
from collections import defaultdict |
|
|
import json |
|
|
from pathlib import Path |
|
|
from typing import List, Tuple |
|
|
from datasets import Dataset, DatasetDict, Video |
|
|
import numpy as np |
|
|
from torch.utils.data import DataLoader |
|
|
|
|
|
from huggingface_hub import HfApi |
|
|
|
|
|
|
|
|
def create_splits(path: Path, split: Tuple[float, float, float]) -> Tuple[List[str], List[str], List[str]]: |
|
|
|
|
|
json_files = list(path.glob("**/*.json")) |
|
|
print(f"Found {len(json_files)} json files") |
|
|
|
|
|
participants_length = defaultdict(float) |
|
|
for json_file in json_files: |
|
|
with json_file.open("r") as f: |
|
|
participant = json_file.parts[-3] |
|
|
session = json_file.parts[-2] |
|
|
data = json.load(f) |
|
|
length = data[-1]["end_t"] |
|
|
participants_length[participant] += length |
|
|
|
|
|
|
|
|
total_length = sum(participants_length.values()) |
|
|
train_target = total_length * split[0] |
|
|
valid_target = total_length * split[1] |
|
|
|
|
|
|
|
|
sorted_participants = sorted(participants_length.items(), key=lambda x: x[1], reverse=True) |
|
|
|
|
|
|
|
|
train_participants = [] |
|
|
valid_participants = [] |
|
|
test_participants = [] |
|
|
|
|
|
train_length = 0 |
|
|
valid_length = 0 |
|
|
test_length = 0 |
|
|
|
|
|
for participant, length in sorted_participants: |
|
|
|
|
|
train_deficit = train_target - train_length if train_length < train_target else -float('inf') |
|
|
valid_deficit = valid_target - valid_length if valid_length < valid_target else -float('inf') |
|
|
|
|
|
|
|
|
if train_deficit >= valid_deficit and train_deficit > -float('inf'): |
|
|
train_participants.append(participant) |
|
|
train_length += length |
|
|
elif valid_deficit > -float('inf'): |
|
|
valid_participants.append(participant) |
|
|
valid_length += length |
|
|
else: |
|
|
test_participants.append(participant) |
|
|
test_length += length |
|
|
|
|
|
print(f"Effective splits: {train_length/total_length:.2f}, {valid_length/total_length:.2f}, {test_length/total_length:.2f}") |
|
|
|
|
|
return train_participants, valid_participants, test_participants |
|
|
|
|
|
def get_smpl_pose(smpl_path: Path, start_t: float, end_t: float, fps: int = 30): |
|
|
smpl_pose = np.load(smpl_path) |
|
|
start_frame = int(start_t * fps) |
|
|
end_frame = int(end_t * fps) |
|
|
pose = { |
|
|
"poses": smpl_pose["poses"][start_frame:end_frame], |
|
|
"trans": smpl_pose["trans"][start_frame:end_frame], |
|
|
"betas": smpl_pose["betas"], |
|
|
"gender": smpl_pose["gender"], |
|
|
} |
|
|
|
|
|
return pose |
|
|
|
|
|
|
|
|
def create_dataset_dict(path: Path, split: Tuple[float, float, float] = (0.7, 0.1, 0.2)): |
|
|
assert sum(split) == 1 |
|
|
|
|
|
splits = create_splits(path, split) |
|
|
|
|
|
ds = {"train": defaultdict(list), "val": defaultdict(list), "test": defaultdict(list)} |
|
|
for split, participants in zip(["train", "val", "test"], splits): |
|
|
for participant in participants: |
|
|
|
|
|
json_files = list(path.glob(f"**/{participant}/**/*.json")) |
|
|
for json_file in json_files: |
|
|
with json_file.open("r") as f: |
|
|
data = json.load(f) |
|
|
for action in data: |
|
|
|
|
|
if np.random.rand() < 0.95: |
|
|
continue |
|
|
session = json_file.parts[-2] |
|
|
data_folder_relative = json_file.parent.relative_to(path.parent) |
|
|
entry = { |
|
|
"participant": participant, |
|
|
"session": session, |
|
|
"start_t": action["start_t"], |
|
|
"end_t": action["end_t"], |
|
|
"action": action["act_cat"], |
|
|
"video_head": str(data_folder_relative / "Head_anonymized.mp4"), |
|
|
"video_pelvis": str(data_folder_relative / "Pelvis_anonymized.mp4"), |
|
|
"video_left_hand": str(data_folder_relative / "LeftHand_anonymized.mp4"), |
|
|
"video_right_hand": str(data_folder_relative / "RightHand_anonymized.mp4"), |
|
|
"video_left_knee": str(data_folder_relative / "LeftKnee_anonymized.mp4"), |
|
|
"video_right_knee": str(data_folder_relative / "RightKnee_anonymized.mp4"), |
|
|
**get_smpl_pose(data_folder_relative / "smplx.npz", action["start_t"], action["end_t"]) |
|
|
} |
|
|
for key in entry: |
|
|
ds[split][key].append(entry[key]) |
|
|
|
|
|
return ds |
|
|
|
|
|
def create_huggingface_dataset(ds): |
|
|
huggingface_ds = DatasetDict({ |
|
|
"train": Dataset.from_dict(ds["train"]), |
|
|
"val": Dataset.from_dict(ds["val"]), |
|
|
"test": Dataset.from_dict(ds["test"]) |
|
|
}) |
|
|
print(f"Dataset sizes: Train: {len(huggingface_ds['train'])}, Val: {len(huggingface_ds['val'])}, Test: {len(huggingface_ds['test'])}") |
|
|
|
|
|
for split in huggingface_ds: |
|
|
for col in huggingface_ds[split].column_names: |
|
|
if "video" in col: |
|
|
huggingface_ds[split] = huggingface_ds[split].cast_column(col, Video()) |
|
|
|
|
|
return huggingface_ds |
|
|
|
|
|
if __name__ == "__main__": |
|
|
ds = create_dataset_dict(Path("path/to/data/of/uncompressed/folders/of/subjects")) |
|
|
|
|
|
huggingface_ds = create_huggingface_dataset(ds) |
|
|
|
|
|
dataset_sizes = { |
|
|
"train": len(huggingface_ds["train"]), |
|
|
"val": len(huggingface_ds["val"]), |
|
|
"test": len(huggingface_ds["test"]) |
|
|
} |
|
|
|
|
|
|