Murad Mebrahtu
commited on
Commit
·
2cad3e7
1
Parent(s):
7a5a207
updated script
Browse files
EMT.py
CHANGED
|
@@ -92,33 +92,45 @@ class EMT(datasets.GeneratorBasedBuilder):
|
|
| 92 |
"annotation_path": annotation_paths["test"],
|
| 93 |
},
|
| 94 |
),
|
| 95 |
-
|
| 96 |
-
|
| 97 |
def _generate_examples(self, images, annotation_path):
|
| 98 |
"""Generate examples from annotations and image archive."""
|
| 99 |
|
| 100 |
-
#
|
| 101 |
annotations = {}
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
for line in f:
|
| 106 |
parts = line.strip().split()
|
|
|
|
|
|
|
|
|
|
| 107 |
frame_id, track_id, class_name = parts[:3]
|
| 108 |
bbox = list(map(float, parts[4:8])) # Extract bounding box
|
| 109 |
-
class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
if
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
|
|
|
| 122 |
|
| 123 |
# Yield dataset entries
|
| 124 |
idx = 0
|
|
@@ -130,3 +142,40 @@ class EMT(datasets.GeneratorBasedBuilder):
|
|
| 130 |
"objects": annotations[img_name],
|
| 131 |
}
|
| 132 |
idx += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
"annotation_path": annotation_paths["test"],
|
| 93 |
},
|
| 94 |
),
|
| 95 |
+
]
|
|
|
|
| 96 |
def _generate_examples(self, images, annotation_path):
|
| 97 |
"""Generate examples from annotations and image archive."""
|
| 98 |
|
| 99 |
+
# Dictionary to store annotations
|
| 100 |
annotations = {}
|
| 101 |
+
|
| 102 |
+
# Process each image in the dataset
|
| 103 |
+
for file_path, file_obj in images:
|
| 104 |
+
img_name = os.path.basename(file_path) # e.g., "000001.jpg"
|
| 105 |
+
video_name = os.path.basename(os.path.dirname(file_path)) # e.g., "video_1112"
|
| 106 |
+
|
| 107 |
+
# Expected annotation file
|
| 108 |
+
ann_file = os.path.join(annotation_path, f"{video_name}.txt")
|
| 109 |
+
|
| 110 |
+
# Read annotations only for the current video
|
| 111 |
+
if os.path.exists(ann_file):
|
| 112 |
+
with open(ann_file, "r", encoding="utf-8") as f:
|
| 113 |
for line in f:
|
| 114 |
parts = line.strip().split()
|
| 115 |
+
if len(parts) < 8: # Ensure there are enough elements
|
| 116 |
+
continue
|
| 117 |
+
|
| 118 |
frame_id, track_id, class_name = parts[:3]
|
| 119 |
bbox = list(map(float, parts[4:8])) # Extract bounding box
|
| 120 |
+
class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID
|
| 121 |
+
|
| 122 |
+
# Match annotations to the correct image
|
| 123 |
+
if f"{frame_id}.jpg" == img_name:
|
| 124 |
+
if img_name not in annotations:
|
| 125 |
+
annotations[img_name] = []
|
| 126 |
+
annotations[img_name].append(
|
| 127 |
+
{
|
| 128 |
+
"bbox": bbox,
|
| 129 |
+
"class_id": class_id,
|
| 130 |
+
"track_id": int(track_id),
|
| 131 |
+
"class_name": class_name,
|
| 132 |
+
}
|
| 133 |
+
)
|
| 134 |
|
| 135 |
# Yield dataset entries
|
| 136 |
idx = 0
|
|
|
|
| 142 |
"objects": annotations[img_name],
|
| 143 |
}
|
| 144 |
idx += 1
|
| 145 |
+
|
| 146 |
+
# def _generate_examples(self, images, annotation_path):
|
| 147 |
+
# """Generate examples from annotations and image archive."""
|
| 148 |
+
|
| 149 |
+
# # Load annotation files
|
| 150 |
+
# annotations = {}
|
| 151 |
+
# for root, _, files in os.walk(annotation_path):
|
| 152 |
+
# for file in files:
|
| 153 |
+
# with open(os.path.join(root, file), "r", encoding="utf-8") as f:
|
| 154 |
+
# for line in f:
|
| 155 |
+
# parts = line.strip().split()
|
| 156 |
+
# frame_id, track_id, class_name = parts[:3]
|
| 157 |
+
# bbox = list(map(float, parts[4:8])) # Extract bounding box
|
| 158 |
+
# class_id = _GT_OBJECT_CLASSES.get(class_name, -1) # Convert class_name to numeric ID, default to -1 if not found
|
| 159 |
+
|
| 160 |
+
# img_path = f"{frame_id}.jpg"
|
| 161 |
+
# if img_path not in annotations:
|
| 162 |
+
# annotations[img_path] = []
|
| 163 |
+
# annotations[img_path].append(
|
| 164 |
+
# {
|
| 165 |
+
# "bbox": bbox,
|
| 166 |
+
# "class_id": class_id,
|
| 167 |
+
# "track_id": int(track_id),
|
| 168 |
+
# "class_name": class_name,
|
| 169 |
+
# }
|
| 170 |
+
# )
|
| 171 |
+
|
| 172 |
+
# # Yield dataset entries
|
| 173 |
+
# idx = 0
|
| 174 |
+
# for file_path, file_obj in images:
|
| 175 |
+
# img_name = os.path.basename(file_path)
|
| 176 |
+
# if img_name in annotations:
|
| 177 |
+
# yield idx, {
|
| 178 |
+
# "image": {"path": file_path, "bytes": file_obj.read()},
|
| 179 |
+
# "objects": annotations[img_name],
|
| 180 |
+
# }
|
| 181 |
+
# idx += 1
|