import os import random from glob import glob import json from huggingface_hub import hf_hub_download from astropy.io import fits import datasets from datasets import DownloadManager from fsspec.core import url_to_fs _DESCRIPTION = ( "GBI-16-4D is a dataset which is part of the AstroCompress project. It contains data " "assembled from the Sloan Digital SkySurvey (SDSS). Each FITS file contains a series " "of 800x800 pixel uint16 observations of the same portion of the Stripe82 field, " "taken in 5 bandpass filters (u, g, r, i, z) over time. The filenames give the " "starting run, field, camcol of the observations, the number of filtered images per " "timestep, and the number of timesteps. For example: " "`cube_center_run4203_camcol6_f44_35-5-800-800.fits` contains 35 frames of 800x800 " "pixel images in 5 bandpasses starting with run 4203, field 44, and camcol 6. " "The images are stored in the FITS standard." ) _HOMEPAGE = "https://google.github.io/AstroCompress" _LICENSE = "CC BY 4.0" _URL = "https://huggingface.co/datasets/AstroCompress/GBI-16-4D/resolve/main/" _URLS = { "tiny": { "train": "./splits/tiny_train.jsonl", "test": "./splits/tiny_test.jsonl", }, "full": { "train": "./splits/full_train.jsonl", "test": "./splits/full_test.jsonl", } } _REPO_ID = "AstroCompress/GBI-16-4D" class GBI_16_4D(datasets.GeneratorBasedBuilder): """GBI-16-4D Dataset""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="tiny", version=VERSION, description="A small subset of the data, to test downsteam workflows.", ), datasets.BuilderConfig( name="full", version=VERSION, description="The full dataset", ), ] DEFAULT_CONFIG_NAME = "tiny" def __init__(self, **kwargs): super().__init__(version=self.VERSION, **kwargs) def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image": datasets.Array4D(shape=(None, 5, 800, 800), dtype="uint16"), "ra": datasets.Value("float64"), "dec": datasets.Value("float64"), "pixscale": datasets.Value("float64"), "ntimes": datasets.Value("int64"), "nbands": datasets.Value("int64"), "image_id": datasets.Value("string"), } ), supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation="TBD", ) def _split_generators(self, dl_manager: DownloadManager): ret = [] base_path = dl_manager._base_path locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT) _, path = url_to_fs(base_path) for split in ["train", "test"]: if locally_run: split_file_location = os.path.normpath(os.path.join(path, _URLS[self.config.name][split])) split_file = dl_manager.download_and_extract(split_file_location) else: split_file = hf_hub_download(repo_id=_REPO_ID, filename=_URLS[self.config.name][split], repo_type="dataset") with open(split_file, encoding="utf-8") as f: data_filenames = [] data_metadata = [] for line in f: item = json.loads(line) data_filenames.append(item["image"]) data_metadata.append({"ra": item["ra"], "dec": item["dec"], "pixscale": item["pixscale"], "ntimes": item["ntimes"], "nbands": item["nbands"], "image_id": item["image_id"]}) if locally_run: data_urls = [os.path.normpath(os.path.join(path,data_filename)) for data_filename in data_filenames] data_files = [dl_manager.download(data_url) for data_url in data_urls] else: data_urls = data_filenames data_files = [hf_hub_download(repo_id=_REPO_ID, filename=data_url, repo_type="dataset") for data_url in data_urls] ret.append( datasets.SplitGenerator( name=datasets.Split.TRAIN if split == "train" else datasets.Split.TEST, gen_kwargs={"filepaths": data_files, "split_file": split_file, "split": split, "data_metadata": data_metadata}, ), ) return ret def _generate_examples(self, filepaths, split_file, split, data_metadata): """Generate GBI-16-4D examples""" for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)): task_instance_key = f"{self.config.name}-{split}-{idx}" with fits.open(filepath, memmap=False) as hdul: image_data = hdul[0].data.tolist() yield task_instance_key, {**{"image": image_data}, **item} def make_split_jsonl_files(config_type="tiny", data_dir="./data", outdir="./splits", seed=42): """ Create jsonl files for the GBI-16-4D dataset. config_type: str, default="tiny" The type of split to create. Options are "tiny" and "full". data_dir: str, default="./data" The directory where the FITS files are located. outdir: str, default="./splits" The directory where the jsonl files will be created. seed: int, default=42 The seed for the random split. """ random.seed(seed) os.makedirs(outdir, exist_ok=True) fits_files = glob(os.path.join(data_dir, "*.fits")) random.shuffle(fits_files) if config_type == "tiny": train_files = fits_files[:2] test_files = fits_files[2:3] elif config_type == "full": split_idx = int(0.8 * len(fits_files)) train_files = fits_files[:split_idx] test_files = fits_files[split_idx:] else: raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.") def create_jsonl(files, split_name): output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl") with open(output_file, "w") as out_f: for file in files: print(file, flush=True, end="...") with fits.open(file, memmap=False) as hdul: image_id = os.path.basename(file).split(".fits")[0] ra = hdul[0].header.get('CRVAL1', 0) dec = hdul[0].header.get('CRVAL2', 0) pixscale = hdul[0].header.get('CD1_2', 0.396) ntimes = hdul[0].data.shape[0] nbands = hdul[0].data.shape[1] item = {"image_id": image_id, "image": file, "ra": ra, "dec": dec, "pixscale": pixscale, "ntimes": ntimes, "nbands": nbands} out_f.write(json.dumps(item) + "\n") create_jsonl(train_files, "train") create_jsonl(test_files, "test")