#!/usr/bin/env python3 """ Parse AVM (Around View Monitoring) semantic segmentation dataset into FiftyOne format. This script converts the AVM dataset with YAML polygon annotations and ground truth segmentation masks into a FiftyOne dataset, preserving all semantic classes and metadata. Dataset source: https://github.com/ChulhoonJang/avm_dataset """ import os import yaml import numpy as np from typing import Dict, List, Tuple from PIL import Image import fiftyone as fo import fiftyone.core.labels as fol def load_yaml_annotation(yaml_path: str) -> Dict: """Load and parse a YAML annotation file.""" with open(yaml_path, 'r') as f: content = f.read() if content.startswith('%YAML'): content = '\n'.join(content.split('\n')[1:]) return yaml.safe_load(content) def parse_annotation_to_polylines(annotation: Dict, image_width: int, image_height: int) -> Tuple[List[fol.Polyline], Dict[str, int]]: """Convert AVM annotation polygons to FiftyOne Polyline objects.""" polylines = [] class_counts = {} class_colors = { 'ego_vehicle': '#000000', 'marker': '#FFFFFF', 'vehicle': '#FF0000', 'curb': '#00FF00', 'other': '#00FF00', 'pillar': '#00FF00', 'wall': '#00FF00' } for attr in annotation.get('attribute', []): if attr in annotation: polygons = annotation[attr] class_counts[attr] = len(polygons) for poly_idx, poly_data in enumerate(polygons): if 'x' in poly_data and 'y' in poly_data: x_coords = poly_data['x'] y_coords = poly_data['y'] # Normalize coordinates to [0, 1] range points = [[x / image_width, y / image_height] for x, y in zip(x_coords, y_coords)] polyline = fol.Polyline( label=attr, points=[points], index=poly_idx, closed=True, filled=True, fillColor=class_colors.get(attr, '#0000FF'), lineColor=class_colors.get(attr, '#0000FF') ) polylines.append(polyline) return polylines, class_counts def create_segmentation_from_mask(mask: np.ndarray) -> fol.Segmentation: """Create a FiftyOne Segmentation object from a ground truth mask.""" color_to_class = { (0, 0, 255): 0, # Blue - Free space (255, 255, 255): 1, # White - Marker (255, 0, 0): 2, # Red - Vehicle (0, 255, 0): 3, # Green - Other objects (0, 0, 0): 4 # Black - Ego vehicle } height, width = mask.shape[:2] class_mask = np.zeros((height, width), dtype=np.uint8) for color, class_id in color_to_class.items(): color_mask = np.all(mask == color, axis=2) class_mask[color_mask] = class_id return fol.Segmentation(mask=class_mask) def parse_train_file(train_file: str, base_dir: str) -> List[Tuple[str, str]]: """Parse train_db.txt to get image-mask pairs.""" pairs = [] with open(train_file, 'r') as f: for line in f: line = line.strip() if line: parts = line.split() if len(parts) == 2: image_path = os.path.join(base_dir, parts[0].lstrip('/')) mask_path = os.path.join(base_dir, parts[1].lstrip('/')) pairs.append((image_path, mask_path)) return pairs def extract_metadata_from_filename(filename: str) -> Dict: """Extract metadata from the AVM filename.""" base_name = os.path.splitext(filename)[0] try: sample_id = int(base_name) except ValueError: sample_id = base_name return { "sample_id": sample_id, "filename_base": base_name } def determine_environment_and_parking_type(annotation: Dict, sample_id: int) -> Tuple[str, str, str]: """Determine environment, parking type, and slot type from annotation.""" has_curb = 'curb' in annotation.get('attribute', []) has_marker = 'marker' in annotation.get('attribute', []) environment = "outdoor" if has_curb else "indoor" parking_type = "perpendicular" # Most common in dataset slot_type = "closed" if has_marker else "no_marker" return environment, parking_type, slot_type def process_avm_dataset(dataset_root: str) -> fo.Dataset: """Process the AVM dataset and create a FiftyOne dataset.""" seg_db_dir = os.path.join(dataset_root, "avm_seg_db") annotations_dir = os.path.join(seg_db_dir, "annotations") train_file = os.path.join(seg_db_dir, "train_db.txt") # Create dataset dataset = fo.Dataset(name="AVM_Segmentation", overwrite=True, persistent=True) # Add dataset metadata dataset.info = { "description": "AVM (Around View Monitoring) System Dataset for Auto Parking - Semantic Segmentation", "source": "https://github.com/ChulhoonJang/avm_dataset", "classes": { "0": {"name": "free_space", "color": [0, 0, 255]}, "1": {"name": "marker", "color": [255, 255, 255]}, "2": {"name": "vehicle", "color": [255, 0, 0]}, "3": {"name": "other", "color": [0, 255, 0]}, "4": {"name": "ego_vehicle", "color": [0, 0, 0]} }, "image_dimensions": {"width": 320, "height": 160} } # Get train pairs train_pairs = parse_train_file(train_file, seg_db_dir) samples = [] print(f"Processing {len(train_pairs)} training samples...") for i, (image_path, mask_path) in enumerate(train_pairs): filename = os.path.basename(image_path) base_name = os.path.splitext(filename)[0] annotation_path = os.path.join(annotations_dir, f"{base_name}.yml") if not all(os.path.exists(p) for p in [image_path, mask_path, annotation_path]): continue # Get image dimensions with Image.open(image_path) as img: width, height = img.size # Load annotation and create polylines annotation = load_yaml_annotation(annotation_path) polylines, class_counts = parse_annotation_to_polylines(annotation, width, height) # Extract metadata metadata = extract_metadata_from_filename(filename) environment, parking_type, slot_type = determine_environment_and_parking_type( annotation, metadata["sample_id"] ) # Load mask and create segmentation mask = np.array(Image.open(mask_path)) segmentation = create_segmentation_from_mask(mask) # Create sample with all metadata sample = fo.Sample( filepath=image_path, split="train", sample_id=metadata["sample_id"], environment=fol.Classification(label=environment), parking_type=fol.Classification(label=parking_type), slot_type=fol.Classification(label=slot_type), polygon_annotations=fol.Polylines(polylines=polylines), classes_present=annotation.get('attribute', []), num_markers=class_counts.get('marker', 0), num_vehicles=class_counts.get('vehicle', 0), has_curb=('curb' in annotation.get('attribute', [])), has_ego_vehicle=('ego_vehicle' in annotation.get('attribute', [])), ground_truth=segmentation, mask_path=mask_path ) samples.append(sample) if (i + 1) % 100 == 0: print(f" Processed {i + 1} samples...") # Add samples to dataset dataset.add_samples(samples) dataset.compute_metadata() dataset.add_dynamic_sample_fields() print(f"✅ Dataset created with {len(samples)} samples!") return dataset def main(): """Main function.""" dataset_root = "/Users/harpreetsahota/workspace/avm_dataset" dataset = process_avm_dataset(dataset_root) print("Launch FiftyOne app with:") print(" import fiftyone as fo") print(" dataset = fo.load_dataset('AVM_Segmentation')") print(" session = fo.launch_app(dataset)") if __name__ == "__main__": main()