Datasets:
File size: 8,459 Bytes
744113d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
#!/usr/bin/env python3
"""
Parse AVM (Around View Monitoring) semantic segmentation dataset into FiftyOne format.
This script converts the AVM dataset with YAML polygon annotations and ground truth
segmentation masks into a FiftyOne dataset, preserving all semantic classes and metadata.
Dataset source: https://github.com/ChulhoonJang/avm_dataset
"""
import os
import yaml
import numpy as np
from typing import Dict, List, Tuple
from PIL import Image
import fiftyone as fo
import fiftyone.core.labels as fol
def load_yaml_annotation(yaml_path: str) -> Dict:
"""Load and parse a YAML annotation file."""
with open(yaml_path, 'r') as f:
content = f.read()
if content.startswith('%YAML'):
content = '\n'.join(content.split('\n')[1:])
return yaml.safe_load(content)
def parse_annotation_to_polylines(annotation: Dict, image_width: int, image_height: int) -> Tuple[List[fol.Polyline], Dict[str, int]]:
"""Convert AVM annotation polygons to FiftyOne Polyline objects."""
polylines = []
class_counts = {}
class_colors = {
'ego_vehicle': '#000000',
'marker': '#FFFFFF',
'vehicle': '#FF0000',
'curb': '#00FF00',
'other': '#00FF00',
'pillar': '#00FF00',
'wall': '#00FF00'
}
for attr in annotation.get('attribute', []):
if attr in annotation:
polygons = annotation[attr]
class_counts[attr] = len(polygons)
for poly_idx, poly_data in enumerate(polygons):
if 'x' in poly_data and 'y' in poly_data:
x_coords = poly_data['x']
y_coords = poly_data['y']
# Normalize coordinates to [0, 1] range
points = [[x / image_width, y / image_height] for x, y in zip(x_coords, y_coords)]
polyline = fol.Polyline(
label=attr,
points=[points],
index=poly_idx,
closed=True,
filled=True,
fillColor=class_colors.get(attr, '#0000FF'),
lineColor=class_colors.get(attr, '#0000FF')
)
polylines.append(polyline)
return polylines, class_counts
def create_segmentation_from_mask(mask: np.ndarray) -> fol.Segmentation:
"""Create a FiftyOne Segmentation object from a ground truth mask."""
color_to_class = {
(0, 0, 255): 0, # Blue - Free space
(255, 255, 255): 1, # White - Marker
(255, 0, 0): 2, # Red - Vehicle
(0, 255, 0): 3, # Green - Other objects
(0, 0, 0): 4 # Black - Ego vehicle
}
height, width = mask.shape[:2]
class_mask = np.zeros((height, width), dtype=np.uint8)
for color, class_id in color_to_class.items():
color_mask = np.all(mask == color, axis=2)
class_mask[color_mask] = class_id
return fol.Segmentation(mask=class_mask)
def parse_train_file(train_file: str, base_dir: str) -> List[Tuple[str, str]]:
"""Parse train_db.txt to get image-mask pairs."""
pairs = []
with open(train_file, 'r') as f:
for line in f:
line = line.strip()
if line:
parts = line.split()
if len(parts) == 2:
image_path = os.path.join(base_dir, parts[0].lstrip('/'))
mask_path = os.path.join(base_dir, parts[1].lstrip('/'))
pairs.append((image_path, mask_path))
return pairs
def extract_metadata_from_filename(filename: str) -> Dict:
"""Extract metadata from the AVM filename."""
base_name = os.path.splitext(filename)[0]
try:
sample_id = int(base_name)
except ValueError:
sample_id = base_name
return {
"sample_id": sample_id,
"filename_base": base_name
}
def determine_environment_and_parking_type(annotation: Dict, sample_id: int) -> Tuple[str, str, str]:
"""Determine environment, parking type, and slot type from annotation."""
has_curb = 'curb' in annotation.get('attribute', [])
has_marker = 'marker' in annotation.get('attribute', [])
environment = "outdoor" if has_curb else "indoor"
parking_type = "perpendicular" # Most common in dataset
slot_type = "closed" if has_marker else "no_marker"
return environment, parking_type, slot_type
def process_avm_dataset(dataset_root: str) -> fo.Dataset:
"""Process the AVM dataset and create a FiftyOne dataset."""
seg_db_dir = os.path.join(dataset_root, "avm_seg_db")
annotations_dir = os.path.join(seg_db_dir, "annotations")
train_file = os.path.join(seg_db_dir, "train_db.txt")
# Create dataset
dataset = fo.Dataset(name="AVM_Segmentation", overwrite=True, persistent=True)
# Add dataset metadata
dataset.info = {
"description": "AVM (Around View Monitoring) System Dataset for Auto Parking - Semantic Segmentation",
"source": "https://github.com/ChulhoonJang/avm_dataset",
"classes": {
"0": {"name": "free_space", "color": [0, 0, 255]},
"1": {"name": "marker", "color": [255, 255, 255]},
"2": {"name": "vehicle", "color": [255, 0, 0]},
"3": {"name": "other", "color": [0, 255, 0]},
"4": {"name": "ego_vehicle", "color": [0, 0, 0]}
},
"image_dimensions": {"width": 320, "height": 160}
}
# Get train pairs
train_pairs = parse_train_file(train_file, seg_db_dir)
samples = []
print(f"Processing {len(train_pairs)} training samples...")
for i, (image_path, mask_path) in enumerate(train_pairs):
filename = os.path.basename(image_path)
base_name = os.path.splitext(filename)[0]
annotation_path = os.path.join(annotations_dir, f"{base_name}.yml")
if not all(os.path.exists(p) for p in [image_path, mask_path, annotation_path]):
continue
# Get image dimensions
with Image.open(image_path) as img:
width, height = img.size
# Load annotation and create polylines
annotation = load_yaml_annotation(annotation_path)
polylines, class_counts = parse_annotation_to_polylines(annotation, width, height)
# Extract metadata
metadata = extract_metadata_from_filename(filename)
environment, parking_type, slot_type = determine_environment_and_parking_type(
annotation, metadata["sample_id"]
)
# Load mask and create segmentation
mask = np.array(Image.open(mask_path))
segmentation = create_segmentation_from_mask(mask)
# Create sample with all metadata
sample = fo.Sample(
filepath=image_path,
split="train",
sample_id=metadata["sample_id"],
environment=fol.Classification(label=environment),
parking_type=fol.Classification(label=parking_type),
slot_type=fol.Classification(label=slot_type),
polygon_annotations=fol.Polylines(polylines=polylines),
classes_present=annotation.get('attribute', []),
num_markers=class_counts.get('marker', 0),
num_vehicles=class_counts.get('vehicle', 0),
has_curb=('curb' in annotation.get('attribute', [])),
has_ego_vehicle=('ego_vehicle' in annotation.get('attribute', [])),
ground_truth=segmentation,
mask_path=mask_path
)
samples.append(sample)
if (i + 1) % 100 == 0:
print(f" Processed {i + 1} samples...")
# Add samples to dataset
dataset.add_samples(samples)
dataset.compute_metadata()
dataset.add_dynamic_sample_fields()
print(f"✅ Dataset created with {len(samples)} samples!")
return dataset
def main():
"""Main function."""
dataset_root = "/Users/harpreetsahota/workspace/avm_dataset"
dataset = process_avm_dataset(dataset_root)
print("Launch FiftyOne app with:")
print(" import fiftyone as fo")
print(" dataset = fo.load_dataset('AVM_Segmentation')")
print(" session = fo.launch_app(dataset)")
if __name__ == "__main__":
main()
|