system HF staff commited on
Commit
e583318
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language_creators:
5
+ - found
6
+ languages:
7
+ - en
8
+ - fa
9
+ licenses:
10
+ - unknown
11
+ multilinguality:
12
+ - translation
13
+ size_categories:
14
+ - 1k<10K
15
+ source_datasets:
16
+ - original
17
+ task_categories:
18
+ - conditional-text-generation
19
+ task_ids:
20
+ - machine-translation
21
+ ---
22
+
23
+ # Dataset Card for [tep_en_fa_para]
24
+
25
+ ## Table of Contents
26
+ - [Dataset Description](#dataset-description)
27
+ - [Dataset Summary](#dataset-summary)
28
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
29
+ - [Languages](#languages)
30
+ - [Dataset Structure](#dataset-structure)
31
+ - [Data Instances](#data-instances)
32
+ - [Data Fields](#data-instances)
33
+ - [Data Splits](#data-instances)
34
+ - [Dataset Creation](#dataset-creation)
35
+ - [Curation Rationale](#curation-rationale)
36
+ - [Source Data](#source-data)
37
+ - [Annotations](#annotations)
38
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
39
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
40
+ - [Social Impact of Dataset](#social-impact-of-dataset)
41
+ - [Discussion of Biases](#discussion-of-biases)
42
+ - [Other Known Limitations](#other-known-limitations)
43
+ - [Additional Information](#additional-information)
44
+ - [Dataset Curators](#dataset-curators)
45
+ - [Licensing Information](#licensing-information)
46
+ - [Citation Information](#citation-information)
47
+
48
+ ## Dataset Description
49
+
50
+ - **Homepage:**[TEP: Tehran English-Persian parallel corpus](http://opus.nlpl.eu/TEP.php)
51
+ - **Repository:**
52
+ - **Paper:**
53
+ - **Leaderboard:**
54
+ - **Point of Contact:**
55
+
56
+ ### Dataset Summary
57
+
58
+ TEP: Tehran English-Persian parallel corpus. The first free Eng-Per corpus, provided by the Natural Language and Text Processing Laboratory, University of Tehran.
59
+
60
+ ### Supported Tasks and Leaderboards
61
+
62
+ The underlying task is machine translation for language pair English-Persian
63
+
64
+ ### Languages
65
+
66
+ English, Persian
67
+
68
+ ## Dataset Structure
69
+
70
+ ### Data Instances
71
+
72
+ [More Information Needed]
73
+
74
+ ### Data Fields
75
+
76
+ [More Information Needed]
77
+
78
+ ### Data Splits
79
+
80
+ [More Information Needed]
81
+
82
+ ## Dataset Creation
83
+
84
+ ### Curation Rationale
85
+
86
+ [More Information Needed]
87
+
88
+ ### Source Data
89
+
90
+ #### Initial Data Collection and Normalization
91
+
92
+ [More Information Needed]
93
+
94
+ #### Who are the source language producers?
95
+
96
+ [More Information Needed]
97
+
98
+ ### Annotations
99
+
100
+ #### Annotation process
101
+
102
+ [More Information Needed]
103
+
104
+ #### Who are the annotators?
105
+
106
+ [More Information Needed]
107
+
108
+ ### Personal and Sensitive Information
109
+
110
+ [More Information Needed]
111
+
112
+ ## Considerations for Using the Data
113
+
114
+ ### Social Impact of Dataset
115
+
116
+ [More Information Needed]
117
+
118
+ ### Discussion of Biases
119
+
120
+ [More Information Needed]
121
+
122
+ ### Other Known Limitations
123
+
124
+ [More Information Needed]
125
+
126
+ ## Additional Information
127
+
128
+ ### Dataset Curators
129
+
130
+ [More Information Needed]
131
+
132
+ ### Licensing Information
133
+
134
+ [More Information Needed]
135
+
136
+ ### Citation Information
137
+ M. T. Pilevar, H. Faili, and A. H. Pilevar, “TEP: Tehran English-Persian Parallel Corpus”, in proceedings of 12th International Conference on Intelligent Text Processing and Computational Linguistics (CICLing-2011).
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"en-fa": {"description": "TEP: Tehran English-Persian parallel corpus. The first free Eng-Per corpus, provided by the Natural Language and Text Processing Laboratory, University of Tehran.\n", "citation": "@InProceedings{\u201cTEP: Tehran English-Persian Parallel Corpus\u201d,\ntitle = {TEP: Tehran English-Persian Parallel Corpus\u201d, in proceedings of 12th International Conference on Intelligent Text Processing and Computational Linguistics (CICLing-2011)},\nauthors={M. T. Pilevar, H. Faili, and A. H. Pilevar, },\nyear={2011}\n}\n", "homepage": "http://opus.nlpl.eu/TEP.php", "license": "", "features": {"translation": {"languages": ["en", "fa"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "builder_name": "tep_en_fa_para", "config_name": "en-fa", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 58735557, "num_examples": 612087, "dataset_name": "tep_en_fa_para"}}, "download_checksums": {"https://object.pouta.csc.fi/OPUS-TEP/v1/moses/en-fa.txt.zip": {"num_bytes": 16353318, "checksum": "190690734392e1898aaee2e46093883c086de3e11e903ab3c07fac54e350a238"}}, "download_size": 16353318, "post_processing_size": null, "dataset_size": 58735557, "size_in_bytes": 75088875}}
dummy/en-fa/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad9fa8ee734f22f609fda627b6bc5bd4b2c5d593736fb5c57d6ea22127fdbb7b
3
+ size 792
tep_en_fa_para.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TEP: Tehran English-Persian parallel corpus."""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """\
25
+ @InProceedings{“TEP: Tehran English-Persian Parallel Corpus”,
26
+ title = {TEP: Tehran English-Persian Parallel Corpus”, in proceedings of 12th International Conference \
27
+ on Intelligent Text Processing and Computational Linguistics (CICLing-2011)},
28
+ authors={M. T. Pilevar, H. Faili, and A. H. Pilevar, },
29
+ year={2011}
30
+ }
31
+ """
32
+
33
+
34
+ _DESCRIPTION = """\
35
+ TEP: Tehran English-Persian parallel corpus. The first free Eng-Per corpus, provided by the Natural Language and Text Processing Laboratory, University of Tehran.
36
+ """
37
+
38
+
39
+ _HOMEPAGE = "http://opus.nlpl.eu/TEP.php"
40
+
41
+
42
+ _LICENSE = ""
43
+
44
+
45
+ _URLs = {"train": "https://object.pouta.csc.fi/OPUS-TEP/v1/moses/en-fa.txt.zip"}
46
+
47
+
48
+ class TepEnFaPara(datasets.GeneratorBasedBuilder):
49
+ """TEP: Tehran English-Persian parallel corpus."""
50
+
51
+ VERSION = datasets.Version("1.1.0")
52
+
53
+ BUILDER_CONFIGS = [
54
+ datasets.BuilderConfig(name="en-fa", version=VERSION),
55
+ ]
56
+
57
+ def _info(self):
58
+ return datasets.DatasetInfo(
59
+ description=_DESCRIPTION,
60
+ features=datasets.Features(
61
+ {"translation": datasets.features.Translation(languages=tuple(self.config.name.split("-")))}
62
+ ),
63
+ supervised_keys=None,
64
+ homepage="http://opus.nlpl.eu/TEP.php",
65
+ citation=_CITATION,
66
+ )
67
+
68
+ def _split_generators(self, dl_manager):
69
+ """Returns SplitGenerators."""
70
+ data_dir = dl_manager.download_and_extract(_URLs)
71
+ return [
72
+ datasets.SplitGenerator(
73
+ name=datasets.Split.TRAIN,
74
+ # These kwargs will be passed to _generate_examples
75
+ gen_kwargs={
76
+ "source_file": os.path.join(data_dir["train"], "TEP.en-fa.en"),
77
+ "target_file": os.path.join(data_dir["train"], "TEP.en-fa.fa"),
78
+ "split": "train",
79
+ },
80
+ ),
81
+ ]
82
+
83
+ def _generate_examples(self, source_file, target_file, split):
84
+ """This function returns the examples in the raw (text) form."""
85
+ with open(source_file, encoding="utf-8") as f:
86
+ source_sentences = f.read().split("\n")
87
+ with open(target_file, encoding="utf-8") as f:
88
+ target_sentences = f.read().split("\n")
89
+
90
+ assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
91
+ len(source_sentences),
92
+ len(target_sentences),
93
+ source_file,
94
+ target_file,
95
+ )
96
+
97
+ source, target = tuple(self.config.name.split("-"))
98
+ for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
99
+ result = {"translation": {source: l1, target: l2}}
100
+ yield idx, result