--- dataset_info: features: - name: image dtype: image - name: objects struct: - name: bbox sequence: sequence: float64 - name: segmentation sequence: sequence: sequence: float64 - name: categories sequence: int64 splits: - name: train num_bytes: 17598458856.47 num_examples: 117266 - name: validation num_bytes: 795110726.04 num_examples: 4952 download_size: 20170024873 dataset_size: 18393569582.510002 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* task_categories: - object-detection --- # MS-COCO2017 ## Use the dataset ```py from datasets import load_dataset ds = load_dataset("ariG23498/coco2017", streaming=True, split="validation") sample = next(iter(ds)) from PIL import Image, ImageDraw def draw_bboxes_on_image( image: Image.Image, objects: dict, category_names: dict = None, box_color: str = "red", text_color: str = "white" ): draw = ImageDraw.Draw(image) bboxes = objects.get("bbox", []) categories = objects.get("categories", []) for i, bbox in enumerate(bboxes): x, y, width, height = bbox # PIL expects (x_min, y_min, x_max, y_max) for rectangle x_min, y_min, x_max, y_max = x, y, x + width, y + height # Draw the rectangle draw.rectangle([x_min, y_min, x_max, y_max], outline=box_color, width=2) # Get category label category_id = categories[i] label = str(category_id) if category_names and category_id in category_names: label = category_names[category_id] # Draw the category label text_bbox = draw.textbbox((x_min, y_min), label) # Use textbbox to get text size text_width = text_bbox[2] - text_bbox[0] text_height = text_bbox[3] - text_bbox[1] # Draw a filled rectangle behind the text for better readability draw.rectangle([x_min, y_min - text_height - 5, x_min + text_width + 5, y_min], fill=box_color) draw.text((x_min + 2, y_min - text_height - 2), label, fill=text_color) return image draw_bboxes_on_image( image=sample["image"], objects=sample["objects"], ) ``` ## Get the categories ```py import json with open("/content/annotations/instances_train2017.json") as f: instances = json.load(f) instances["categories"] ``` ## Build the dataset and upload to Hub ```py !wget http://images.cocodataset.org/zips/train2017.zip !wget http://images.cocodataset.org/zips/val2017.zip !wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip !unzip train2017.zip !unzip val2017.zip !unzip annotations_trainval2017.zip import json from pathlib import Path from tqdm import tqdm from huggingface_hub import upload_folder from datasets import Dataset, DatasetDict, Features, Value, Sequence, Array2D import shutil # === Paths === base_dir = Path("/content") splits = { "train": { "image_dir": base_dir / "train2017", "annotation_file": base_dir / "annotations" / "instances_train2017.json", }, "val": { "image_dir": base_dir / "val2017", "annotation_file": base_dir / "annotations" / "instances_val2017.json", } } output_dir = base_dir / "coco_imagefolder" # Clean existing directory if output_dir.exists(): shutil.rmtree(output_dir) output_dir.mkdir(parents=True) def convert_coco_to_jsonl(image_dir, annotation_path, output_metadata_path): with open(annotation_path) as f: data = json.load(f) id_to_filename = {img['id']: img['file_name'] for img in data['images']} annotations_by_image = {} for ann in data['annotations']: img_id = ann['image_id'] bbox = ann['bbox'] # [x, y, width, height] category = ann['category_id'] if img_id not in annotations_by_image: annotations_by_image[img_id] = { "file_name": id_to_filename[img_id], "objects": { "bbox": [], "categories": [] } } annotations_by_image[img_id]["objects"]["bbox"].append(bbox) annotations_by_image[img_id]["objects"]["categories"].append(category) with open(output_metadata_path, "w") as f: for img_id, metadata in annotations_by_image.items(): json.dump(metadata, f) f.write("\n") # Convert and copy files to imagefolder-style structure for split, info in splits.items(): split_dir = output_dir / split split_dir.mkdir(parents=True) # Copy images for img_path in tqdm(info["image_dir"].glob("*.jpg"), desc=f"Copying {split} images"): shutil.copy(img_path, split_dir / img_path.name) # Convert annotations metadata_path = split_dir / "metadata.jsonl" convert_coco_to_jsonl(split_dir, info["annotation_file"], metadata_path) # HF Dataset from datasets import load_dataset dataset = load_dataset("imagefolder", data_dir="/content/coco_imagefolder") dataset.push_to_hub("ariG23498/coco2017") ```