| import os | |
| import random | |
| from glob import glob | |
| import json | |
| from huggingface_hub import hf_hub_download | |
| from tqdm import tqdm | |
| import numpy as np | |
| from astropy.io import fits | |
| from astropy.wcs import WCS | |
| import datasets | |
| from datasets import DownloadManager | |
| from fsspec.core import url_to_fs | |
| _DESCRIPTION = ( | |
| "GBI-16-4D is a dataset which is part of the AstroCompress project. It contains data " | |
| "assembled from the Sloan Digital SkySurvey (SDSS). Each FITS file contains a series " | |
| "of 800x800 pixel uint16 observations of the same portion of the Stripe82 field, " | |
| "taken in 5 bandpass filters (u, g, r, i, z) over time. The filenames give the " | |
| "starting run, field, camcol of the observations, the number of filtered images per " | |
| "timestep, and the number of timesteps. For example: " | |
| "`cube_center_run4203_camcol6_f44_35-5-800-800.fits` contains 35 frames of 800x800 " | |
| "pixel images in 5 bandpasses starting with run 4203, field 44, and camcol 6. " | |
| "The images are stored in the FITS standard." | |
| ) | |
| _HOMEPAGE = "https://google.github.io/AstroCompress" | |
| _LICENSE = "CC BY 4.0" | |
| _URL = "https://huggingface.co/datasets/AstroCompress/GBI-16-4D/resolve/main/" | |
| _URLS = { | |
| "tiny": { | |
| "train": "./splits/tiny_train.jsonl", | |
| "test": "./splits/tiny_test.jsonl", | |
| }, | |
| "full": { | |
| "train": "./splits/full_train.jsonl", | |
| "test": "./splits/full_test.jsonl", | |
| } | |
| } | |
| _REPO_ID = "AstroCompress/GBI-16-4D" | |
| class GBI_16_4D(datasets.GeneratorBasedBuilder): | |
| """GBI-16-4D Dataset""" | |
| VERSION = datasets.Version("1.0.2") | |
| BUILDER_CONFIGS = [ | |
| datasets.BuilderConfig( | |
| name="tiny", | |
| version=VERSION, | |
| description="A small subset of the data, to test downsteam workflows.", | |
| ), | |
| datasets.BuilderConfig( | |
| name="full", | |
| version=VERSION, | |
| description="The full dataset", | |
| ), | |
| ] | |
| DEFAULT_CONFIG_NAME = "tiny" | |
| def __init__(self, **kwargs): | |
| super().__init__(version=self.VERSION, **kwargs) | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=datasets.Features( | |
| { | |
| "image": datasets.Array4D(shape=(None, 5, 800, 800), dtype="uint16"), | |
| "ra": datasets.Value("float64"), | |
| "dec": datasets.Value("float64"), | |
| "pixscale": datasets.Value("float64"), | |
| "ntimes": datasets.Value("int64"), | |
| "nbands": datasets.Value("int64"), | |
| "image_id": datasets.Value("string"), | |
| } | |
| ), | |
| supervised_keys=None, | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| citation="TBD", | |
| ) | |
| def _split_generators(self, dl_manager: DownloadManager): | |
| ret = [] | |
| base_path = dl_manager._base_path | |
| locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT) | |
| _, path = url_to_fs(base_path) | |
| for split in ["train", "test"]: | |
| if locally_run: | |
| split_file_location = os.path.normpath(os.path.join(path, _URLS[self.config.name][split])) | |
| split_file = dl_manager.download_and_extract(split_file_location) | |
| else: | |
| split_file = hf_hub_download(repo_id=_REPO_ID, filename=_URLS[self.config.name][split], repo_type="dataset") | |
| with open(split_file, encoding="utf-8") as f: | |
| data_filenames = [] | |
| data_metadata = [] | |
| for line in f: | |
| item = json.loads(line) | |
| data_filenames.append(item["image"]) | |
| data_metadata.append({"ra": item["ra"], | |
| "dec": item["dec"], | |
| "pixscale": item["pixscale"], | |
| "ntimes": item["ntimes"], | |
| "nbands": item["nbands"], | |
| "image_id": item["image_id"]}) | |
| if locally_run: | |
| data_urls = [os.path.normpath(os.path.join(path,data_filename)) for data_filename in data_filenames] | |
| data_files = [dl_manager.download(data_url) for data_url in data_urls] | |
| else: | |
| data_urls = data_filenames | |
| data_files = [hf_hub_download(repo_id=_REPO_ID, filename=data_url, repo_type="dataset") for data_url in data_urls] | |
| ret.append( | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN if split == "train" else datasets.Split.TEST, | |
| gen_kwargs={"filepaths": data_files, | |
| "split_file": split_file, | |
| "split": split, | |
| "data_metadata": data_metadata}, | |
| ), | |
| ) | |
| return ret | |
| def _generate_examples(self, filepaths, split_file, split, data_metadata): | |
| """Generate GBI-16-4D examples""" | |
| for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)): | |
| task_instance_key = f"{self.config.name}-{split}-{idx}" | |
| with fits.open(filepath, memmap=False, ignore_missing_simple=True) as hdul: | |
| image_data = hdul[0].data.tolist() | |
| yield task_instance_key, {**{"image": image_data}, **item} |