Commit
·
14fe0b7
1
Parent(s):
f58e957
Create coco.py
Browse files
coco.py
ADDED
@@ -0,0 +1,707 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from builtins import isinstance
|
2 |
+
import os
|
3 |
+
import glob
|
4 |
+
import json
|
5 |
+
import logging
|
6 |
+
import zipfile
|
7 |
+
import functools
|
8 |
+
import collections
|
9 |
+
|
10 |
+
import datasets
|
11 |
+
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
|
14 |
+
_VERSION = datasets.Version("1.0.0", "")
|
15 |
+
|
16 |
+
_URL = "https://cocodataset.org/#home"
|
17 |
+
|
18 |
+
# Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py
|
19 |
+
_CITATION = """\
|
20 |
+
@article{DBLP:journals/corr/LinMBHPRDZ14,
|
21 |
+
author = {Tsung{-}Yi Lin and
|
22 |
+
Michael Maire and
|
23 |
+
Serge J. Belongie and
|
24 |
+
Lubomir D. Bourdev and
|
25 |
+
Ross B. Girshick and
|
26 |
+
James Hays and
|
27 |
+
Pietro Perona and
|
28 |
+
Deva Ramanan and
|
29 |
+
Piotr Doll{\'{a}}r and
|
30 |
+
C. Lawrence Zitnick},
|
31 |
+
title = {Microsoft {COCO:} Common Objects in Context},
|
32 |
+
journal = {CoRR},
|
33 |
+
volume = {abs/1405.0312},
|
34 |
+
year = {2014},
|
35 |
+
url = {http://arxiv.org/abs/1405.0312},
|
36 |
+
archivePrefix = {arXiv},
|
37 |
+
eprint = {1405.0312},
|
38 |
+
timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
|
39 |
+
biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},
|
40 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
41 |
+
}
|
42 |
+
"""
|
43 |
+
|
44 |
+
# Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py
|
45 |
+
_DESCRIPTION = """COCO is a large-scale object detection, segmentation, and
|
46 |
+
captioning dataset.
|
47 |
+
Note:
|
48 |
+
* Some images from the train and validation sets don't have annotations.
|
49 |
+
* Coco 2014 and 2017 uses the same images, but different train/val/test splits
|
50 |
+
* The test split don't have any annotations (only images).
|
51 |
+
* Coco defines 91 classes but the data only uses 80 classes.
|
52 |
+
* Panotptic annotations defines defines 200 classes but only uses 133.
|
53 |
+
"""
|
54 |
+
|
55 |
+
# Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py
|
56 |
+
_CONFIG_DESCRIPTION = """
|
57 |
+
This version contains images, bounding boxes and labels for the {year} version.
|
58 |
+
"""
|
59 |
+
|
60 |
+
Split = collections.namedtuple(
|
61 |
+
'Split', ['name', 'images', 'annotations', 'annotation_type']
|
62 |
+
)
|
63 |
+
|
64 |
+
# stuffing class 'none' for index 0
|
65 |
+
CAT = [
|
66 |
+
"none",
|
67 |
+
"person",
|
68 |
+
"bicycle",
|
69 |
+
"car",
|
70 |
+
"motorcycle",
|
71 |
+
"airplane",
|
72 |
+
"bus",
|
73 |
+
"train",
|
74 |
+
"truck",
|
75 |
+
"boat",
|
76 |
+
"traffic light",
|
77 |
+
"fire hydrant",
|
78 |
+
"street sign",
|
79 |
+
"stop sign",
|
80 |
+
"parking meter",
|
81 |
+
"bench",
|
82 |
+
"bird",
|
83 |
+
"cat",
|
84 |
+
"dog",
|
85 |
+
"horse",
|
86 |
+
"sheep",
|
87 |
+
"cow",
|
88 |
+
"elephant",
|
89 |
+
"bear",
|
90 |
+
"zebra",
|
91 |
+
"giraffe",
|
92 |
+
"hat",
|
93 |
+
"backpack",
|
94 |
+
"umbrella",
|
95 |
+
"shoe",
|
96 |
+
"eye glasses",
|
97 |
+
"handbag",
|
98 |
+
"tie",
|
99 |
+
"suitcase",
|
100 |
+
"frisbee",
|
101 |
+
"skis",
|
102 |
+
"snowboard",
|
103 |
+
"sports ball",
|
104 |
+
"kite",
|
105 |
+
"baseball bat",
|
106 |
+
"baseball glove",
|
107 |
+
"skateboard",
|
108 |
+
"surfboard",
|
109 |
+
"tennis racket",
|
110 |
+
"bottle",
|
111 |
+
"plate",
|
112 |
+
"wine glass",
|
113 |
+
"cup",
|
114 |
+
"fork",
|
115 |
+
"knife",
|
116 |
+
"spoon",
|
117 |
+
"bowl",
|
118 |
+
"banana",
|
119 |
+
"apple",
|
120 |
+
"sandwich",
|
121 |
+
"orange",
|
122 |
+
"broccoli",
|
123 |
+
"carrot",
|
124 |
+
"hot dog",
|
125 |
+
"pizza",
|
126 |
+
"donut",
|
127 |
+
"cake",
|
128 |
+
"chair",
|
129 |
+
"couch",
|
130 |
+
"potted plant",
|
131 |
+
"bed",
|
132 |
+
"mirror",
|
133 |
+
"dining table",
|
134 |
+
"window",
|
135 |
+
"desk",
|
136 |
+
"toilet",
|
137 |
+
"door",
|
138 |
+
"tv",
|
139 |
+
"laptop",
|
140 |
+
"mouse",
|
141 |
+
"remote",
|
142 |
+
"keyboard",
|
143 |
+
"cell phone",
|
144 |
+
"microwave",
|
145 |
+
"oven",
|
146 |
+
"toaster",
|
147 |
+
"sink",
|
148 |
+
"refrigerator",
|
149 |
+
"blender",
|
150 |
+
"book",
|
151 |
+
"clock",
|
152 |
+
"vase",
|
153 |
+
"scissors",
|
154 |
+
"teddy bear",
|
155 |
+
"hair drier",
|
156 |
+
"toothbrush",
|
157 |
+
"hair brush",
|
158 |
+
]
|
159 |
+
|
160 |
+
SUPER_CAT = [
|
161 |
+
"none",
|
162 |
+
"person",
|
163 |
+
"vehicle",
|
164 |
+
"outdoor",
|
165 |
+
"animal",
|
166 |
+
"accessory",
|
167 |
+
"sports",
|
168 |
+
"kitchen",
|
169 |
+
"food",
|
170 |
+
"furniture",
|
171 |
+
"electronic",
|
172 |
+
"appliance",
|
173 |
+
"indoor",
|
174 |
+
]
|
175 |
+
|
176 |
+
CAT2SUPER_CAT = [
|
177 |
+
"none",
|
178 |
+
"person",
|
179 |
+
"vehicle",
|
180 |
+
"vehicle",
|
181 |
+
"vehicle",
|
182 |
+
"vehicle",
|
183 |
+
"vehicle",
|
184 |
+
"vehicle",
|
185 |
+
"vehicle",
|
186 |
+
"vehicle",
|
187 |
+
"outdoor",
|
188 |
+
"outdoor",
|
189 |
+
"outdoor",
|
190 |
+
"outdoor",
|
191 |
+
"outdoor",
|
192 |
+
"outdoor",
|
193 |
+
"animal",
|
194 |
+
"animal",
|
195 |
+
"animal",
|
196 |
+
"animal",
|
197 |
+
"animal",
|
198 |
+
"animal",
|
199 |
+
"animal",
|
200 |
+
"animal",
|
201 |
+
"animal",
|
202 |
+
"animal",
|
203 |
+
"accessory",
|
204 |
+
"accessory",
|
205 |
+
"accessory",
|
206 |
+
"accessory",
|
207 |
+
"accessory",
|
208 |
+
"accessory",
|
209 |
+
"accessory",
|
210 |
+
"accessory",
|
211 |
+
"sports",
|
212 |
+
"sports",
|
213 |
+
"sports",
|
214 |
+
"sports",
|
215 |
+
"sports",
|
216 |
+
"sports",
|
217 |
+
"sports",
|
218 |
+
"sports",
|
219 |
+
"sports",
|
220 |
+
"sports",
|
221 |
+
"kitchen",
|
222 |
+
"kitchen",
|
223 |
+
"kitchen",
|
224 |
+
"kitchen",
|
225 |
+
"kitchen",
|
226 |
+
"kitchen",
|
227 |
+
"kitchen",
|
228 |
+
"kitchen",
|
229 |
+
"food",
|
230 |
+
"food",
|
231 |
+
"food",
|
232 |
+
"food",
|
233 |
+
"food",
|
234 |
+
"food",
|
235 |
+
"food",
|
236 |
+
"food",
|
237 |
+
"food",
|
238 |
+
"food",
|
239 |
+
"furniture",
|
240 |
+
"furniture",
|
241 |
+
"furniture",
|
242 |
+
"furniture",
|
243 |
+
"furniture",
|
244 |
+
"furniture",
|
245 |
+
"furniture",
|
246 |
+
"furniture",
|
247 |
+
"furniture",
|
248 |
+
"furniture",
|
249 |
+
"electronic",
|
250 |
+
"electronic",
|
251 |
+
"electronic",
|
252 |
+
"electronic",
|
253 |
+
"electronic",
|
254 |
+
"electronic",
|
255 |
+
"appliance",
|
256 |
+
"appliance",
|
257 |
+
"appliance",
|
258 |
+
"appliance",
|
259 |
+
"appliance",
|
260 |
+
"appliance",
|
261 |
+
"indoor",
|
262 |
+
"indoor",
|
263 |
+
"indoor",
|
264 |
+
"indoor",
|
265 |
+
"indoor",
|
266 |
+
"indoor",
|
267 |
+
"indoor",
|
268 |
+
"indoor",
|
269 |
+
]
|
270 |
+
|
271 |
+
|
272 |
+
|
273 |
+
|
274 |
+
|
275 |
+
|
276 |
+
class AnnotationType(object):
|
277 |
+
"""Enum of the annotation format types.
|
278 |
+
Splits are annotated with different formats.
|
279 |
+
"""
|
280 |
+
|
281 |
+
BBOXES = 'bboxes'
|
282 |
+
PANOPTIC = 'panoptic'
|
283 |
+
NONE = 'none'
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
DETECTION_FEATURE = datasets.Features(
|
288 |
+
{
|
289 |
+
"image": datasets.Image(),
|
290 |
+
"image/filename": datasets.Value("string"),
|
291 |
+
"image/id": datasets.Value("int64"),
|
292 |
+
"objects": datasets.Sequence(feature=datasets.Features({
|
293 |
+
"id": datasets.Value("int64"),
|
294 |
+
"area": datasets.Value("float32"),
|
295 |
+
"bbox": datasets.Sequence(
|
296 |
+
feature=datasets.Value("float32")
|
297 |
+
),
|
298 |
+
"label": datasets.ClassLabel(names=CAT),
|
299 |
+
"super_cat_label": datasets.ClassLabel(names=SUPER_CAT),
|
300 |
+
"is_crowd": datasets.Value("bool"),
|
301 |
+
})),
|
302 |
+
}
|
303 |
+
)
|
304 |
+
|
305 |
+
PANOPTIC_FEATURE = datasets.Features(
|
306 |
+
{
|
307 |
+
"image": datasets.Image(),
|
308 |
+
"image/filename": datasets.Value("string"),
|
309 |
+
"image/id": datasets.Value("int64"),
|
310 |
+
"panoptic_objects": datasets.Sequence(feature=datasets.Features({
|
311 |
+
"id": datasets.Value("int64"),
|
312 |
+
"area": datasets.Value("float32"),
|
313 |
+
"bbox": datasets.Sequence(
|
314 |
+
feature=datasets.Value("float32")
|
315 |
+
),
|
316 |
+
"label": datasets.ClassLabel(names=CAT),
|
317 |
+
"super_cat_label": datasets.ClassLabel(names=SUPER_CAT),
|
318 |
+
"is_crowd": datasets.Value("bool"),
|
319 |
+
})),
|
320 |
+
"panoptic_image": datasets.Image(),
|
321 |
+
"panoptic_image/filename": datasets.Value("string"),
|
322 |
+
}
|
323 |
+
)
|
324 |
+
# More info could be added, like segmentation (as png mask), captions,
|
325 |
+
# person key-points, more metadata (original flickr url,...).
|
326 |
+
|
327 |
+
|
328 |
+
|
329 |
+
# Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py
|
330 |
+
class CocoConfig(datasets.BuilderConfig):
|
331 |
+
"""BuilderConfig for CocoConfig."""
|
332 |
+
|
333 |
+
def __init__(self, features, splits=None, has_panoptic=False, **kwargs):
|
334 |
+
super(CocoConfig, self).__init__(
|
335 |
+
**kwargs
|
336 |
+
)
|
337 |
+
self.features = features
|
338 |
+
self.splits = splits
|
339 |
+
self.has_panoptic = has_panoptic
|
340 |
+
|
341 |
+
|
342 |
+
# Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py
|
343 |
+
class Coco(datasets.GeneratorBasedBuilder):
|
344 |
+
"""Base MS Coco dataset."""
|
345 |
+
|
346 |
+
BUILDER_CONFIGS = [
|
347 |
+
CocoConfig(
|
348 |
+
name='2014',
|
349 |
+
features=DETECTION_FEATURE,
|
350 |
+
description=_CONFIG_DESCRIPTION.format(year=2014),
|
351 |
+
version=_VERSION,
|
352 |
+
splits=[
|
353 |
+
Split(
|
354 |
+
name=datasets.Split.TRAIN,
|
355 |
+
images='train2014',
|
356 |
+
annotations='annotations_trainval2014',
|
357 |
+
annotation_type=AnnotationType.BBOXES,
|
358 |
+
),
|
359 |
+
Split(
|
360 |
+
name=datasets.Split.VALIDATION,
|
361 |
+
images='val2014',
|
362 |
+
annotations='annotations_trainval2014',
|
363 |
+
annotation_type=AnnotationType.BBOXES,
|
364 |
+
),
|
365 |
+
Split(
|
366 |
+
name=datasets.Split.TEST,
|
367 |
+
images='test2014',
|
368 |
+
annotations='image_info_test2014',
|
369 |
+
annotation_type=AnnotationType.NONE,
|
370 |
+
),
|
371 |
+
# Coco2014 contains an extra test split
|
372 |
+
Split(
|
373 |
+
name='test2015',
|
374 |
+
images='test2015',
|
375 |
+
annotations='image_info_test2015',
|
376 |
+
annotation_type=AnnotationType.NONE,
|
377 |
+
),
|
378 |
+
],
|
379 |
+
),
|
380 |
+
CocoConfig(
|
381 |
+
name='2017',
|
382 |
+
features=DETECTION_FEATURE,
|
383 |
+
description=_CONFIG_DESCRIPTION.format(year=2017),
|
384 |
+
version=_VERSION,
|
385 |
+
splits=[
|
386 |
+
Split(
|
387 |
+
name=datasets.Split.TRAIN,
|
388 |
+
images='train2017',
|
389 |
+
annotations='annotations_trainval2017',
|
390 |
+
annotation_type=AnnotationType.BBOXES,
|
391 |
+
),
|
392 |
+
Split(
|
393 |
+
name=datasets.Split.VALIDATION,
|
394 |
+
images='val2017',
|
395 |
+
annotations='annotations_trainval2017',
|
396 |
+
annotation_type=AnnotationType.BBOXES,
|
397 |
+
),
|
398 |
+
Split(
|
399 |
+
name=datasets.Split.TEST,
|
400 |
+
images='test2017',
|
401 |
+
annotations='image_info_test2017',
|
402 |
+
annotation_type=AnnotationType.NONE,
|
403 |
+
),
|
404 |
+
],
|
405 |
+
),
|
406 |
+
CocoConfig(
|
407 |
+
name='2017_panoptic',
|
408 |
+
features=PANOPTIC_FEATURE,
|
409 |
+
description=_CONFIG_DESCRIPTION.format(year=2017),
|
410 |
+
version=_VERSION,
|
411 |
+
has_panoptic=True,
|
412 |
+
splits=[
|
413 |
+
Split(
|
414 |
+
name=datasets.Split.TRAIN,
|
415 |
+
images='train2017',
|
416 |
+
annotations='panoptic_annotations_trainval2017',
|
417 |
+
annotation_type=AnnotationType.PANOPTIC,
|
418 |
+
),
|
419 |
+
Split(
|
420 |
+
name=datasets.Split.VALIDATION,
|
421 |
+
images='val2017',
|
422 |
+
annotations='panoptic_annotations_trainval2017',
|
423 |
+
annotation_type=AnnotationType.PANOPTIC,
|
424 |
+
),
|
425 |
+
],
|
426 |
+
),
|
427 |
+
]
|
428 |
+
|
429 |
+
DEFAULT_CONFIG_NAME = "2017"
|
430 |
+
|
431 |
+
def _info(self):
|
432 |
+
return datasets.DatasetInfo(
|
433 |
+
description=_DESCRIPTION,
|
434 |
+
features=self.config.features,
|
435 |
+
supervised_keys=None, # Probably needs to be fixed.
|
436 |
+
homepage=_URL,
|
437 |
+
citation=_CITATION,
|
438 |
+
)
|
439 |
+
|
440 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
441 |
+
|
442 |
+
# DownloadManager memoize the url, so duplicate urls will only be downloaded
|
443 |
+
# once.
|
444 |
+
if dl_manager.manual_dir is None:
|
445 |
+
# Merge urls from all splits together
|
446 |
+
urls = {}
|
447 |
+
for split in self.config.splits:
|
448 |
+
urls['{}_images'.format(split.name)] = 'zips/{}.zip'.format(split.images)
|
449 |
+
urls['{}_annotations'.format(split.name)] = 'annotations/{}.zip'.format(
|
450 |
+
split.annotations
|
451 |
+
)
|
452 |
+
|
453 |
+
logging.info("download and extract coco dataset")
|
454 |
+
root_url = 'http://images.cocodataset.org/'
|
455 |
+
extracted_paths = dl_manager.download_and_extract(
|
456 |
+
{key: root_url + url for key, url in urls.items()}
|
457 |
+
)
|
458 |
+
else:
|
459 |
+
logging.info(f"use manual directory: {dl_manager.manual_dir}")
|
460 |
+
extracted_paths = {}
|
461 |
+
for split in self.config.splits:
|
462 |
+
extracted_paths['{}_images'.format(split.name)] = dl_manager.manual_dir
|
463 |
+
extracted_paths['{}_annotations'.format(split.name)] = dl_manager.manual_dir
|
464 |
+
|
465 |
+
splits = []
|
466 |
+
for split in self.config.splits:
|
467 |
+
image_dir = extracted_paths['{}_images'.format(split.name)]
|
468 |
+
annotations_dir = extracted_paths['{}_annotations'.format(split.name)]
|
469 |
+
if self.config.has_panoptic:
|
470 |
+
if dl_manager.manual_dir is None:
|
471 |
+
logging.info("extract panoptic data")
|
472 |
+
panoptic_image_zip_path = os.path.join(
|
473 |
+
annotations_dir,
|
474 |
+
'annotations',
|
475 |
+
'panoptic_{}.zip'.format(split.images),
|
476 |
+
)
|
477 |
+
panoptic_dir = dl_manager.extract(panoptic_image_zip_path)
|
478 |
+
panoptic_dir = os.path.join(
|
479 |
+
panoptic_dir, 'panoptic_{}'.format(split.images)
|
480 |
+
)
|
481 |
+
else:
|
482 |
+
logging.info("use extracted data")
|
483 |
+
panoptic_dir = os.path.join(annotations_dir, 'annotations', 'panoptic_{}.zip'.format(split.images))
|
484 |
+
else:
|
485 |
+
panoptic_dir = None
|
486 |
+
splits.append(
|
487 |
+
datasets.SplitGenerator(
|
488 |
+
name=split.name,
|
489 |
+
gen_kwargs={
|
490 |
+
'image_dir': image_dir,
|
491 |
+
'annotation_dir': annotations_dir,
|
492 |
+
'split_name': split.images,
|
493 |
+
'annotation_type': split.annotation_type,
|
494 |
+
'panoptic_dir': panoptic_dir,
|
495 |
+
}
|
496 |
+
)
|
497 |
+
)
|
498 |
+
return splits
|
499 |
+
|
500 |
+
def _generate_examples(self, image_dir, annotation_dir, split_name, annotation_type, panoptic_dir):
|
501 |
+
"""Generate examples as dicts.
|
502 |
+
Args:
|
503 |
+
image_dir: `str`, directory containing the images
|
504 |
+
annotation_dir: `str`, directory containing annotations
|
505 |
+
split_name: `str`, <split_name><year> (ex: train2014, val2017)
|
506 |
+
annotation_type: `AnnotationType`, the annotation format (NONE, BBOXES,
|
507 |
+
PANOPTIC)
|
508 |
+
panoptic_dir: If annotation_type is PANOPTIC, contains the panoptic image
|
509 |
+
directory
|
510 |
+
Yields:
|
511 |
+
example key and data
|
512 |
+
"""
|
513 |
+
|
514 |
+
if annotation_type == AnnotationType.BBOXES:
|
515 |
+
instance_filename = 'instances_{}.json'
|
516 |
+
elif annotation_type == AnnotationType.PANOPTIC:
|
517 |
+
instance_filename = 'panoptic_{}.json'
|
518 |
+
elif annotation_type == AnnotationType.NONE: # No annotation for test sets
|
519 |
+
instance_filename = 'image_info_{}.json'
|
520 |
+
|
521 |
+
# Load the annotations (label names, images metadata,...)
|
522 |
+
instance_path = os.path.join(
|
523 |
+
annotation_dir,
|
524 |
+
'annotations',
|
525 |
+
instance_filename.format(split_name),
|
526 |
+
)
|
527 |
+
coco_annotation = ANNOTATION_CLS[annotation_type](instance_path)
|
528 |
+
# Each category is a dict:
|
529 |
+
# {
|
530 |
+
# 'id': 51, # From 1-91, some entry missing
|
531 |
+
# 'name': 'bowl',
|
532 |
+
# 'supercategory': 'kitchen',
|
533 |
+
# }
|
534 |
+
categories = coco_annotation.categories
|
535 |
+
# Each image is a dict:
|
536 |
+
# {
|
537 |
+
# 'id': 262145,
|
538 |
+
# 'file_name': 'COCO_train2017_000000262145.jpg'
|
539 |
+
# 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg',
|
540 |
+
# 'coco_url': 'http://images.cocodataset.org/train2017/xyz.jpg',
|
541 |
+
# 'license': 2,
|
542 |
+
# 'date_captured': '2013-11-20 02:07:55',
|
543 |
+
# 'height': 427,
|
544 |
+
# 'width': 640,
|
545 |
+
# }
|
546 |
+
images = coco_annotation.images
|
547 |
+
|
548 |
+
# TODO(b/121375022): ClassLabel names should also contains 'id' and
|
549 |
+
# and 'supercategory' (in addition to 'name')
|
550 |
+
# Warning: As Coco only use 80 out of the 91 labels, the c['id'] and
|
551 |
+
# dataset names ids won't match.
|
552 |
+
if self.config.has_panoptic:
|
553 |
+
objects_key = 'panoptic_objects'
|
554 |
+
else:
|
555 |
+
objects_key = 'objects'
|
556 |
+
# self.info.features[objects_key]['label'].names = [
|
557 |
+
# c['name'] for c in categories
|
558 |
+
# ]
|
559 |
+
|
560 |
+
# TODO(b/121375022): Conversion should be done by ClassLabel
|
561 |
+
# categories_id2name = {c['id']: c['name'] for c in categories}
|
562 |
+
|
563 |
+
# Iterate over all images
|
564 |
+
annotation_skipped = 0
|
565 |
+
for image_info in sorted(images, key=lambda x: x['id']):
|
566 |
+
if annotation_type == AnnotationType.BBOXES:
|
567 |
+
# Each instance annotation is a dict:
|
568 |
+
# {
|
569 |
+
# 'iscrowd': 0,
|
570 |
+
# 'bbox': [116.95, 305.86, 285.3, 266.03],
|
571 |
+
# 'image_id': 480023,
|
572 |
+
# 'segmentation': [[312.29, 562.89, 402.25, ...]],
|
573 |
+
# 'category_id': 58,
|
574 |
+
# 'area': 54652.9556,
|
575 |
+
# 'id': 86,
|
576 |
+
# }
|
577 |
+
instances = coco_annotation.get_annotations(img_id=image_info['id'])
|
578 |
+
elif annotation_type == AnnotationType.PANOPTIC:
|
579 |
+
# Each panoptic annotation is a dict:
|
580 |
+
# {
|
581 |
+
# 'file_name': '000000037777.png',
|
582 |
+
# 'image_id': 37777,
|
583 |
+
# 'segments_info': [
|
584 |
+
# {
|
585 |
+
# 'area': 353,
|
586 |
+
# 'category_id': 52,
|
587 |
+
# 'iscrowd': 0,
|
588 |
+
# 'id': 6202563,
|
589 |
+
# 'bbox': [221, 179, 37, 27],
|
590 |
+
# },
|
591 |
+
# ...
|
592 |
+
# ]
|
593 |
+
# }
|
594 |
+
panoptic_annotation = coco_annotation.get_annotations(
|
595 |
+
img_id=image_info['id']
|
596 |
+
)
|
597 |
+
instances = panoptic_annotation['segments_info']
|
598 |
+
else:
|
599 |
+
instances = [] # No annotations
|
600 |
+
|
601 |
+
if not instances:
|
602 |
+
annotation_skipped += 1
|
603 |
+
|
604 |
+
def build_bbox(x, y, width, height):
|
605 |
+
# pylint: disable=cell-var-from-loop
|
606 |
+
# build_bbox is only used within the loop so it is ok to use image_info
|
607 |
+
return [
|
608 |
+
y,
|
609 |
+
x,
|
610 |
+
(y + height),
|
611 |
+
(x + width),
|
612 |
+
]
|
613 |
+
# pylint: enable=cell-var-from-loop
|
614 |
+
|
615 |
+
example = {
|
616 |
+
'image': os.path.abspath(os.path.join(image_dir, split_name, image_info['file_name'])),
|
617 |
+
'image/filename': image_info['file_name'],
|
618 |
+
'image/id': image_info['id'],
|
619 |
+
objects_key: [
|
620 |
+
{ # pylint: disable=g-complex-comprehension
|
621 |
+
'id': instance['id'],
|
622 |
+
'area': instance['area'],
|
623 |
+
'bbox': build_bbox(*instance['bbox']),
|
624 |
+
'label': instance['category_id'],
|
625 |
+
'super_cat_label': SUPER_CAT.index(CAT2SUPER_CAT[instance['category_id']]),
|
626 |
+
'is_crowd': bool(instance['iscrowd']),
|
627 |
+
}
|
628 |
+
for instance in instances
|
629 |
+
],
|
630 |
+
}
|
631 |
+
if self.config.has_panoptic:
|
632 |
+
panoptic_filename = panoptic_annotation['file_name']
|
633 |
+
panoptic_image_path = os.path.join(panoptic_dir, panoptic_filename)
|
634 |
+
example['panoptic_image'] = panoptic_image_path
|
635 |
+
example['panoptic_image/filename'] = panoptic_filename
|
636 |
+
|
637 |
+
yield image_info['file_name'], example
|
638 |
+
|
639 |
+
logging.info(
|
640 |
+
'%d/%d images do not contains any annotations',
|
641 |
+
annotation_skipped,
|
642 |
+
len(images),
|
643 |
+
)
|
644 |
+
|
645 |
+
|
646 |
+
class CocoAnnotation(object):
|
647 |
+
"""Coco annotation helper class."""
|
648 |
+
|
649 |
+
def __init__(self, annotation_path):
|
650 |
+
with open(annotation_path, "r") as f:
|
651 |
+
data = json.load(f)
|
652 |
+
self._data = data
|
653 |
+
|
654 |
+
@property
|
655 |
+
def categories(self):
|
656 |
+
"""Return the category dicts, as sorted in the file."""
|
657 |
+
return self._data['categories']
|
658 |
+
|
659 |
+
@property
|
660 |
+
def images(self):
|
661 |
+
"""Return the image dicts, as sorted in the file."""
|
662 |
+
return self._data['images']
|
663 |
+
|
664 |
+
def get_annotations(self, img_id):
|
665 |
+
"""Return all annotations associated with the image id string."""
|
666 |
+
raise NotImplementedError # AnotationType.NONE don't have annotations
|
667 |
+
|
668 |
+
|
669 |
+
class CocoAnnotationBBoxes(CocoAnnotation):
|
670 |
+
"""Coco annotation helper class."""
|
671 |
+
|
672 |
+
def __init__(self, annotation_path):
|
673 |
+
super(CocoAnnotationBBoxes, self).__init__(annotation_path)
|
674 |
+
|
675 |
+
img_id2annotations = collections.defaultdict(list)
|
676 |
+
for a in self._data['annotations']:
|
677 |
+
img_id2annotations[a['image_id']].append(a)
|
678 |
+
self._img_id2annotations = {
|
679 |
+
k: list(sorted(v, key=lambda a: a['id']))
|
680 |
+
for k, v in img_id2annotations.items()
|
681 |
+
}
|
682 |
+
|
683 |
+
def get_annotations(self, img_id):
|
684 |
+
"""Return all annotations associated with the image id string."""
|
685 |
+
# Some images don't have any annotations. Return empty list instead.
|
686 |
+
return self._img_id2annotations.get(img_id, [])
|
687 |
+
|
688 |
+
|
689 |
+
class CocoAnnotationPanoptic(CocoAnnotation):
|
690 |
+
"""Coco annotation helper class."""
|
691 |
+
|
692 |
+
def __init__(self, annotation_path):
|
693 |
+
super(CocoAnnotationPanoptic, self).__init__(annotation_path)
|
694 |
+
self._img_id2annotations = {
|
695 |
+
a['image_id']: a for a in self._data['annotations']
|
696 |
+
}
|
697 |
+
|
698 |
+
def get_annotations(self, img_id):
|
699 |
+
"""Return all annotations associated with the image id string."""
|
700 |
+
return self._img_id2annotations[img_id]
|
701 |
+
|
702 |
+
|
703 |
+
ANNOTATION_CLS = {
|
704 |
+
AnnotationType.NONE: CocoAnnotation,
|
705 |
+
AnnotationType.BBOXES: CocoAnnotationBBoxes,
|
706 |
+
AnnotationType.PANOPTIC: CocoAnnotationPanoptic,
|
707 |
+
}
|