ar / ar.py
system's picture
system HF staff
import from S3
7322776
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The Arabic United nation Corpus dataset."""
from __future__ import absolute_import, division, print_function
import glob
import os
import re
import datasets
_DESCRIPTION = """\
The corpus is a part of the MultiUN corpus.\
It is a collection of translated documents from the United Nations.\
The corpus is download from the following website : \
[open parallel corpus](http://opus.datasetsl.eu/) \
"""
_CITATION = """\
@inproceedings{eisele2010multiun,
title={MultiUN: A Multilingual Corpus from United Nation Documents.},
author={Eisele, Andreas and Chen, Yu},
booktitle={LREC},
year={2010}
}
"""
URL = "https://object.pouta.csc.fi/OPUS-MultiUN/v1/mono/ar.txt.gz"
class AracorpusConfig(datasets.BuilderConfig):
"""BuilderConfig for BookCorpus."""
def __init__(self, **kwargs):
"""BuilderConfig for BookCorpus.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(AracorpusConfig, self).__init__(
version=datasets.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Aracorpus(datasets.GeneratorBasedBuilder):
"""BookCorpus dataset."""
BUILDER_CONFIGS = [AracorpusConfig(name="plain_text", description="Plain text",)]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string"),}),
supervised_keys=None,
homepage="http://opus.datasetsl.eu/",
citation=_CITATION,
)
def _vocab_text_gen(self, archive):
for _, ex in self._generate_examples(archive):
yield ex["text"]
def _split_generators(self, dl_manager):
arch_path = dl_manager.download_and_extract(URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"directory": arch_path}),
]
def _generate_examples(self, directory):
index=directory.rfind("datasets")
index=index+8
url=directory[:index]
direct_name=directory[index+1:]
directory=url
files = [
os.path.join(directory, direct_name),
]
_id = 0
for txt_file in files:
with open(txt_file, mode="r") as f:
for line in f:
yield _id, {"text": line.strip()}
_id += 1