diff --git a/.gitattributes b/.gitattributes index 957b2579c6ef20995a09efd9a17f8fd90606f5ed..dcbd53e17d30b1b9108618c4c1168b77bf69f5d4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zstandard filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +*json* filter=lfs diff=lfs merge=lfs -text +*wav* filter=lfs diff=lfs merge=lfs -text +*mp3* filter=lfs diff=lfs merge=lfs -text diff --git a/NPSC.py b/NPSC.py new file mode 100644 index 0000000000000000000000000000000000000000..3d1bc5e27050b5d89ef2973f557b377e69d07664 --- /dev/null +++ b/NPSC.py @@ -0,0 +1,96 @@ +"""NPSC dataset.""" +import gzip +import json +import datasets + +logger = datasets.logging.get_logger(__name__) +_DESCRIPTION = """\\nNorwegian Colossal Corpus v2. Short sequences of maximum 100k characters.""" +_CITATION = """ +TO BE DONE +""" +_URL = "https://www.nb.no/sprakbanken/ressurskatalog/oai-nb-no-sbr-58/" +_DATA_URL = "https://huggingface.co/datasets/NbAiLab/NPSC/resolve/main/data/{split_suffix}-shard-{index:04d}-of-{n_shards:04d}.json.gz" +_N_SHARDS_PER_SPLIT = { + "train": 1, "dev": 1, "test": 1 +} + + +class NPSCConfig(datasets.BuilderConfig): + """BuilderConfig for NbNn.""" + + def __init__(self, *args, **kwargs): + """BuilderConfig for NbNn. + Args: + **kwargs: keyword arguments forwarded to super. + """ + super().__init__( + *args, + name="NPSC", + **kwargs, + ) + + +class NPSC(datasets.GeneratorBasedBuilder): + BUILDER_CONFIGS = [NPSCConfig()] + BUILDER_CONFIG_CLASS = NPSCConfig + + def _info(self): + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=datasets.Features( + { + "sentence_order": datasets.Value("int32"), + "speaker_id" : datasets.Value("int32"), + "speaker_name": datasets.Value("string"), + "sentence_text": datasets.Value("string"), + "sentence_language_code": datasets.Value("string"), + "text": datasets.Value("string"), + "start_time": datasets.Value("int32"), + "end_time": datasets.Value("int32"), + "normsentence_text": datasets.Value("string"), + "transsentence_text": datasets.Value("string"), + "translated": datasets.Value("int32"), + "audio": datasets.features.Audio(sampling_rate=48000), + + } + ), + supervised_keys=None, + homepage=_URL, + citation=_CITATION, + ) + + def _split_generators(self, dl_manager): + data_urls = {} + for split in ["train", "dev", "test"]: + data_urls[split] = [ + _DATA_URL.format( + language=self.config.name, + split_suffix=split, + index=index, + n_shards=_N_SHARDS_PER_SPLIT[split], + ) + for index in range(1, _N_SHARDS_PER_SPLIT[split] + 1) + ] + train_downloaded_files = dl_manager.download(data_urls["train"]) + dev_downloaded_files = dl_manager.download(data_urls["dev"]) + test_downloaded_files = dl_manager.download(data_urls["test"]) + + return [ + datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files} + ), + + ] + + def _generate_examples(self, filepaths): + """This function returns the examples in the raw (text) form by iterating on all the files.""" + id_ = 0 + for filepath in filepaths: + logger.info("generating examples from = %s", filepath) + with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: + for line in f: + if line: + example = json.loads(line) + yield id_, example + id_ += 1 diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..86cae20f2dc8c77e037d5c56b0b80087dda88575 --- /dev/null +++ b/README.md @@ -0,0 +1,161 @@ +--- +YAML tags: + +annotations_creators: +- no-annotation +language_creators: +- found +languages: +- nb,no,nn +licenses: +- CC-ZERO +multilinguality: +- monolingual +pretty_name: NPSC +size_categories: +- 2G