dusha_emotion_audio / dusha_emotion_audio.py
KELONMYOSA's picture
loading script
73121e5
raw
history blame
2.42 kB
import csv
import os
import datasets
_DESCRIPTION = """\
Dusha is a bi-modal corpus suitable for speech emotion recognition (SER) tasks.
The dataset consists of audio recordings with Russian speech and their emotional labels.
The corpus contains approximately 350 hours of data. Four basic emotions that usually appear in a dialog with
a virtual assistant were selected: Happiness (Positive), Sadness, Anger and Neutral emotion.
"""
_HOMEPAGE = "https://github.com/salute-developers/golos/tree/master/dusha#dusha-dataset"
_DATA_URL = "https://huggingface.co/datasets/KELONMYOSA/dusha_emotion_audio/resolve/main/data/data.zip"
_METADATA_URL = "https://huggingface.co/datasets/KELONMYOSA/dusha_emotion_audio/resolve/main/data/labels.csv"
class Dusha(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 256
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"label": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
metadata = dl_manager.download(_METADATA_URL)
archive = dl_manager.download_and_extract(_DATA_URL)
return [datasets.SplitGenerator(
name=datasets.Split.ALL,
gen_kwargs={
"audio_files": archive,
"metadata": metadata},
)]
def _generate_examples(self, audio_files, metadata):
examples = list()
with open(metadata, encoding="utf-8") as f:
csv_reader = csv.reader(f, delimiter=",")
next(csv_reader)
for row in csv_reader:
audio_path, label = row
full_audio_path = os.path.join(audio_files, audio_path)
with open(full_audio_path) as audio_file:
audio = {"path": full_audio_path, "bytes": audio_file.read()}
res = dict()
res["file"] = full_audio_path
res["audio"] = audio
res["label"] = label
examples.append(res)
key = 0
for example in examples:
yield key, example
key += 1