albertvillanova HF staff commited on
Commit
0feebfa
1 Parent(s): 89835d1

Delete loading script

Browse files
Files changed (1) hide show
  1. bsd_ja_en.py +0 -163
bsd_ja_en.py DELETED
@@ -1,163 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Japanese-English Business Scene Dialogue (BSD) dataset. """
16
-
17
-
18
- import json
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @inproceedings{rikters-etal-2019-designing,
25
- title = "Designing the Business Conversation Corpus",
26
- author = "Rikters, Matīss and
27
- Ri, Ryokan and
28
- Li, Tong and
29
- Nakazawa, Toshiaki",
30
- booktitle = "Proceedings of the 6th Workshop on Asian Translation",
31
- month = nov,
32
- year = "2019",
33
- address = "Hong Kong, China",
34
- publisher = "Association for Computational Linguistics",
35
- url = "https://www.aclweb.org/anthology/D19-5204",
36
- doi = "10.18653/v1/D19-5204",
37
- pages = "54--61"
38
- }
39
- """
40
-
41
-
42
- _DESCRIPTION = """\
43
- This is the Business Scene Dialogue (BSD) dataset,
44
- a Japanese-English parallel corpus containing written conversations
45
- in various business scenarios.
46
-
47
- The dataset was constructed in 3 steps:
48
- 1) selecting business scenes,
49
- 2) writing monolingual conversation scenarios according to the selected scenes, and
50
- 3) translating the scenarios into the other language.
51
-
52
- Half of the monolingual scenarios were written in Japanese
53
- and the other half were written in English.
54
-
55
- Fields:
56
- - id: dialogue identifier
57
- - no: sentence pair number within a dialogue
58
- - en_speaker: speaker name in English
59
- - ja_speaker: speaker name in Japanese
60
- - en_sentence: sentence in English
61
- - ja_sentence: sentence in Japanese
62
- - original_language: language in which monolingual scenario was written
63
- - tag: scenario
64
- - title: scenario title
65
- """
66
-
67
- _HOMEPAGE = "https://github.com/tsuruoka-lab/BSD"
68
-
69
- _LICENSE = "CC BY-NC-SA 4.0"
70
-
71
- _REPO = "https://raw.githubusercontent.com/tsuruoka-lab/BSD/master/"
72
-
73
- _URLs = {
74
- "train": _REPO + "train.json",
75
- "dev": _REPO + "dev.json",
76
- "test": _REPO + "test.json",
77
- }
78
-
79
-
80
- class BsdJaEn(datasets.GeneratorBasedBuilder):
81
- """Japanese-English Business Scene Dialogue (BSD) dataset."""
82
-
83
- VERSION = datasets.Version("1.0.0")
84
-
85
- def _info(self):
86
- features = datasets.Features(
87
- {
88
- "id": datasets.Value("string"),
89
- "tag": datasets.Value("string"),
90
- "title": datasets.Value("string"),
91
- "original_language": datasets.Value("string"),
92
- "no": datasets.Value("int32"),
93
- "en_speaker": datasets.Value("string"),
94
- "ja_speaker": datasets.Value("string"),
95
- "en_sentence": datasets.Value("string"),
96
- "ja_sentence": datasets.Value("string"),
97
- }
98
- )
99
- return datasets.DatasetInfo(
100
- description=_DESCRIPTION,
101
- features=features,
102
- supervised_keys=None,
103
- homepage=_HOMEPAGE,
104
- license=_LICENSE,
105
- citation=_CITATION,
106
- )
107
-
108
- def _split_generators(self, dl_manager):
109
- """Returns SplitGenerators."""
110
- data_dir = dl_manager.download_and_extract(_URLs)
111
-
112
- return [
113
- datasets.SplitGenerator(
114
- name=datasets.Split.TRAIN,
115
- gen_kwargs={
116
- "filepath": data_dir["train"],
117
- "split": "train",
118
- },
119
- ),
120
- datasets.SplitGenerator(
121
- name=datasets.Split.TEST,
122
- gen_kwargs={"filepath": data_dir["test"], "split": "test"},
123
- ),
124
- datasets.SplitGenerator(
125
- name=datasets.Split.VALIDATION,
126
- gen_kwargs={
127
- "filepath": data_dir["dev"],
128
- "split": "dev",
129
- },
130
- ),
131
- ]
132
-
133
- def _generate_examples(self, filepath, split):
134
- """Yields examples."""
135
-
136
- with open(filepath, encoding="utf-8") as f:
137
- data = json.load(f)
138
-
139
- for dialogue in data:
140
- id_ = dialogue["id"]
141
- tag = dialogue["tag"]
142
- title = dialogue["title"]
143
- original_language = dialogue["original_language"]
144
- conversation = dialogue["conversation"]
145
-
146
- for turn in conversation:
147
- sent_no = int(turn["no"])
148
- en_speaker = turn["en_speaker"]
149
- ja_speaker = turn["ja_speaker"]
150
- en_sentence = turn["en_sentence"]
151
- ja_sentence = turn["ja_sentence"]
152
-
153
- yield f"{id_}_{sent_no}", {
154
- "id": id_,
155
- "tag": tag,
156
- "title": title,
157
- "original_language": original_language,
158
- "no": sent_no,
159
- "en_speaker": en_speaker,
160
- "ja_speaker": ja_speaker,
161
- "en_sentence": en_sentence,
162
- "ja_sentence": ja_sentence,
163
- }