harpreetsahota commited on
Commit
744113d
·
verified ·
1 Parent(s): 652067e

Upload parse_avm_to_fiftyone.py

Browse files
Files changed (1) hide show
  1. parse_avm_to_fiftyone.py +237 -0
parse_avm_to_fiftyone.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Parse AVM (Around View Monitoring) semantic segmentation dataset into FiftyOne format.
4
+
5
+ This script converts the AVM dataset with YAML polygon annotations and ground truth
6
+ segmentation masks into a FiftyOne dataset, preserving all semantic classes and metadata.
7
+
8
+ Dataset source: https://github.com/ChulhoonJang/avm_dataset
9
+ """
10
+
11
+ import os
12
+ import yaml
13
+ import numpy as np
14
+ from typing import Dict, List, Tuple
15
+ from PIL import Image
16
+
17
+ import fiftyone as fo
18
+ import fiftyone.core.labels as fol
19
+
20
+
21
+ def load_yaml_annotation(yaml_path: str) -> Dict:
22
+ """Load and parse a YAML annotation file."""
23
+ with open(yaml_path, 'r') as f:
24
+ content = f.read()
25
+ if content.startswith('%YAML'):
26
+ content = '\n'.join(content.split('\n')[1:])
27
+ return yaml.safe_load(content)
28
+
29
+
30
+ def parse_annotation_to_polylines(annotation: Dict, image_width: int, image_height: int) -> Tuple[List[fol.Polyline], Dict[str, int]]:
31
+ """Convert AVM annotation polygons to FiftyOne Polyline objects."""
32
+ polylines = []
33
+ class_counts = {}
34
+
35
+ class_colors = {
36
+ 'ego_vehicle': '#000000',
37
+ 'marker': '#FFFFFF',
38
+ 'vehicle': '#FF0000',
39
+ 'curb': '#00FF00',
40
+ 'other': '#00FF00',
41
+ 'pillar': '#00FF00',
42
+ 'wall': '#00FF00'
43
+ }
44
+
45
+ for attr in annotation.get('attribute', []):
46
+ if attr in annotation:
47
+ polygons = annotation[attr]
48
+ class_counts[attr] = len(polygons)
49
+
50
+ for poly_idx, poly_data in enumerate(polygons):
51
+ if 'x' in poly_data and 'y' in poly_data:
52
+ x_coords = poly_data['x']
53
+ y_coords = poly_data['y']
54
+
55
+ # Normalize coordinates to [0, 1] range
56
+ points = [[x / image_width, y / image_height] for x, y in zip(x_coords, y_coords)]
57
+
58
+ polyline = fol.Polyline(
59
+ label=attr,
60
+ points=[points],
61
+ index=poly_idx,
62
+ closed=True,
63
+ filled=True,
64
+ fillColor=class_colors.get(attr, '#0000FF'),
65
+ lineColor=class_colors.get(attr, '#0000FF')
66
+ )
67
+
68
+ polylines.append(polyline)
69
+
70
+ return polylines, class_counts
71
+
72
+
73
+ def create_segmentation_from_mask(mask: np.ndarray) -> fol.Segmentation:
74
+ """Create a FiftyOne Segmentation object from a ground truth mask."""
75
+ color_to_class = {
76
+ (0, 0, 255): 0, # Blue - Free space
77
+ (255, 255, 255): 1, # White - Marker
78
+ (255, 0, 0): 2, # Red - Vehicle
79
+ (0, 255, 0): 3, # Green - Other objects
80
+ (0, 0, 0): 4 # Black - Ego vehicle
81
+ }
82
+
83
+ height, width = mask.shape[:2]
84
+ class_mask = np.zeros((height, width), dtype=np.uint8)
85
+
86
+ for color, class_id in color_to_class.items():
87
+ color_mask = np.all(mask == color, axis=2)
88
+ class_mask[color_mask] = class_id
89
+
90
+ return fol.Segmentation(mask=class_mask)
91
+
92
+
93
+ def parse_train_file(train_file: str, base_dir: str) -> List[Tuple[str, str]]:
94
+ """Parse train_db.txt to get image-mask pairs."""
95
+ pairs = []
96
+
97
+ with open(train_file, 'r') as f:
98
+ for line in f:
99
+ line = line.strip()
100
+ if line:
101
+ parts = line.split()
102
+ if len(parts) == 2:
103
+ image_path = os.path.join(base_dir, parts[0].lstrip('/'))
104
+ mask_path = os.path.join(base_dir, parts[1].lstrip('/'))
105
+ pairs.append((image_path, mask_path))
106
+
107
+ return pairs
108
+
109
+
110
+ def extract_metadata_from_filename(filename: str) -> Dict:
111
+ """Extract metadata from the AVM filename."""
112
+ base_name = os.path.splitext(filename)[0]
113
+
114
+ try:
115
+ sample_id = int(base_name)
116
+ except ValueError:
117
+ sample_id = base_name
118
+
119
+ return {
120
+ "sample_id": sample_id,
121
+ "filename_base": base_name
122
+ }
123
+
124
+
125
+ def determine_environment_and_parking_type(annotation: Dict, sample_id: int) -> Tuple[str, str, str]:
126
+ """Determine environment, parking type, and slot type from annotation."""
127
+ has_curb = 'curb' in annotation.get('attribute', [])
128
+ has_marker = 'marker' in annotation.get('attribute', [])
129
+
130
+ environment = "outdoor" if has_curb else "indoor"
131
+ parking_type = "perpendicular" # Most common in dataset
132
+ slot_type = "closed" if has_marker else "no_marker"
133
+
134
+ return environment, parking_type, slot_type
135
+
136
+
137
+ def process_avm_dataset(dataset_root: str) -> fo.Dataset:
138
+ """Process the AVM dataset and create a FiftyOne dataset."""
139
+ seg_db_dir = os.path.join(dataset_root, "avm_seg_db")
140
+ annotations_dir = os.path.join(seg_db_dir, "annotations")
141
+ train_file = os.path.join(seg_db_dir, "train_db.txt")
142
+
143
+ # Create dataset
144
+ dataset = fo.Dataset(name="AVM_Segmentation", overwrite=True, persistent=True)
145
+
146
+ # Add dataset metadata
147
+ dataset.info = {
148
+ "description": "AVM (Around View Monitoring) System Dataset for Auto Parking - Semantic Segmentation",
149
+ "source": "https://github.com/ChulhoonJang/avm_dataset",
150
+ "classes": {
151
+ "0": {"name": "free_space", "color": [0, 0, 255]},
152
+ "1": {"name": "marker", "color": [255, 255, 255]},
153
+ "2": {"name": "vehicle", "color": [255, 0, 0]},
154
+ "3": {"name": "other", "color": [0, 255, 0]},
155
+ "4": {"name": "ego_vehicle", "color": [0, 0, 0]}
156
+ },
157
+ "image_dimensions": {"width": 320, "height": 160}
158
+ }
159
+
160
+ # Get train pairs
161
+ train_pairs = parse_train_file(train_file, seg_db_dir)
162
+
163
+ samples = []
164
+ print(f"Processing {len(train_pairs)} training samples...")
165
+
166
+ for i, (image_path, mask_path) in enumerate(train_pairs):
167
+ filename = os.path.basename(image_path)
168
+ base_name = os.path.splitext(filename)[0]
169
+ annotation_path = os.path.join(annotations_dir, f"{base_name}.yml")
170
+
171
+ if not all(os.path.exists(p) for p in [image_path, mask_path, annotation_path]):
172
+ continue
173
+
174
+ # Get image dimensions
175
+ with Image.open(image_path) as img:
176
+ width, height = img.size
177
+
178
+ # Load annotation and create polylines
179
+ annotation = load_yaml_annotation(annotation_path)
180
+ polylines, class_counts = parse_annotation_to_polylines(annotation, width, height)
181
+
182
+ # Extract metadata
183
+ metadata = extract_metadata_from_filename(filename)
184
+ environment, parking_type, slot_type = determine_environment_and_parking_type(
185
+ annotation, metadata["sample_id"]
186
+ )
187
+
188
+ # Load mask and create segmentation
189
+ mask = np.array(Image.open(mask_path))
190
+ segmentation = create_segmentation_from_mask(mask)
191
+
192
+ # Create sample with all metadata
193
+ sample = fo.Sample(
194
+ filepath=image_path,
195
+ split="train",
196
+ sample_id=metadata["sample_id"],
197
+ environment=fol.Classification(label=environment),
198
+ parking_type=fol.Classification(label=parking_type),
199
+ slot_type=fol.Classification(label=slot_type),
200
+ polygon_annotations=fol.Polylines(polylines=polylines),
201
+ classes_present=annotation.get('attribute', []),
202
+ num_markers=class_counts.get('marker', 0),
203
+ num_vehicles=class_counts.get('vehicle', 0),
204
+ has_curb=('curb' in annotation.get('attribute', [])),
205
+ has_ego_vehicle=('ego_vehicle' in annotation.get('attribute', [])),
206
+ ground_truth=segmentation,
207
+ mask_path=mask_path
208
+ )
209
+
210
+ samples.append(sample)
211
+
212
+ if (i + 1) % 100 == 0:
213
+ print(f" Processed {i + 1} samples...")
214
+
215
+ # Add samples to dataset
216
+ dataset.add_samples(samples)
217
+ dataset.compute_metadata()
218
+ dataset.add_dynamic_sample_fields()
219
+
220
+ print(f"✅ Dataset created with {len(samples)} samples!")
221
+ return dataset
222
+
223
+
224
+ def main():
225
+ """Main function."""
226
+ dataset_root = "/Users/harpreetsahota/workspace/avm_dataset"
227
+
228
+ dataset = process_avm_dataset(dataset_root)
229
+
230
+ print("Launch FiftyOne app with:")
231
+ print(" import fiftyone as fo")
232
+ print(" dataset = fo.load_dataset('AVM_Segmentation')")
233
+ print(" session = fo.launch_app(dataset)")
234
+
235
+
236
+ if __name__ == "__main__":
237
+ main()