|
--- |
|
dataset_info: |
|
features: |
|
- name: recipes |
|
dtype: string |
|
- name: names |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 11479327 |
|
num_examples: 20000 |
|
download_size: 5911822 |
|
dataset_size: 11479327 |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: train |
|
path: data/train-* |
|
--- |
|
|
|
This is a simple recipes dataset, obtained by formatting/cleaning [this one](formido/recipes-20k), that I think it was just made by scrapping the food.com website. |
|
Here's the cleanup script I used to obtain it. |
|
|
|
```python |
|
from datasets import load_dataset |
|
|
|
def clean_recipe(recipe): |
|
recipe = recipe.replace(" , ", ", ") |
|
recipe = recipe.replace('"', "'") |
|
recipe = recipe.replace("\\'", "'") |
|
recipe = recipe.strip("\\']") |
|
recipe = recipe.strip("['") |
|
splitted = recipe.split("\', \'") |
|
recipe = "\n".join(map(lambda x: "- " + (x.capitalize()), splitted)) |
|
return recipe |
|
|
|
def clean_name(name): |
|
name = name.capitalize() |
|
name = name.replace(" ", " ") |
|
return name |
|
|
|
def preprocess_function(examples): |
|
recipes = examples["output"] |
|
names = examples["input"] |
|
|
|
clean_recipes = [] |
|
clean_names = [] |
|
for recipe, name in zip(recipes, names): |
|
# Sanitize the name and recipe string |
|
clean_recipes.append(clean_recipe(recipe)) |
|
clean_names.append(clean_name(name)) |
|
|
|
return {"recipes": clean_recipes, "names": clean_names} |
|
|
|
def split_dataset(): |
|
from transformers import set_seed |
|
set_seed(42) |
|
dataset_id = "formido/recipes-20k" |
|
dataset = load_dataset(dataset_id) |
|
dataset = dataset.map(preprocess_function, batched=True, remove_columns=dataset["train"].column_names) |
|
dataset.push_to_hub("simple_recipes") |
|
|
|
|
|
if __name__ == "__main__": |
|
split_dataset() |
|
|
|
``` |
|
|
|
|