Datasets:

Multilinguality:
translation
Size Categories:
1K<n<10K
Language Creators:
crowdsourced
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
rbawden commited on
Commit
3d81c4a
1 Parent(s): 2c51398

Upload DiaBLa.py

Browse files
Files changed (1) hide show
  1. DiaBLa.py +134 -0
DiaBLa.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ '''DiaBLA: Dialogue Bilingue datset'''
3
+
4
+ import json
5
+ import datasets
6
+
7
+
8
+ logger = datasets.logging.get_logger(__name__)
9
+
10
+ _CITATION = '''\
11
+ @article{bawden_DiaBLa:-A-Corpus-of_2021,
12
+ author = {Bawden, Rachel and Bilinski, Eric and Lavergne, Thomas and Rosset, Sophie},
13
+ doi = {10.1007/s10579-020-09514-4},
14
+ title = {DiaBLa: A Corpus of Bilingual Spontaneous Written Dialogues for Machine Translation},
15
+ year = {2021},
16
+ journal = {Language Resources and Evaluation},
17
+ publisher = {Springer Verlag},
18
+ volume = {55},
19
+ pages = {635--660},
20
+ url = {https://hal.inria.fr/hal-03021633},
21
+ pdf = {https://hal.inria.fr/hal-03021633/file/diabla-lre-personal-formatting.pdf},
22
+ }
23
+ '''
24
+
25
+ _DESCRIPTION = '''\
26
+ English-French parallel dataset for the evaluation of \
27
+ Machine Translation (MT) for informal, written bilingual dialogue.
28
+ '''
29
+
30
+ #_URL = 'https://github.com/rbawden/DiaBLa-dataset'
31
+ #_URLS = {
32
+ # 'dialogues': _URL + '/DiaBLa-corpus/all-dialogues.json',
33
+ # 'users': _URL + '/DiaBLa-corpus/all-users.json'
34
+ #}
35
+
36
+
37
+ class DiablaConfig(datasets.BuilderConfig):
38
+ '''BuilderConfig for DiaBLa.'''
39
+
40
+ def __init__(self, **kwargs):
41
+ """BuilderConfig for SQUAD.
42
+
43
+ Args:
44
+ **kwargs: keyword arguments forwarded to super.
45
+ """
46
+ super(SquadConfig, self).__init__(**kwargs)
47
+
48
+
49
+ class Diabla(datasets.GeneratorBasedBuilder):
50
+ '''DiaBLa: English-French parallel dataset of bilingual dialogue'''
51
+
52
+ BUILDER_CONFIGS = [
53
+ SquadConfig(
54
+ name="plain_text",
55
+ version=datasets.Version("1.0.0", ""),
56
+ description="Plain text",
57
+ ),
58
+ ]
59
+
60
+ def _info(self):
61
+ return datasets.DatasetInfo(
62
+ description=_DESCRIPTION,
63
+ features=datasets.Features(
64
+ {
65
+ "id": datasets.Value("string"),
66
+ "title": datasets.Value("string"),
67
+ "context": datasets.Value("string"),
68
+ "question": datasets.Value("string"),
69
+ "answers": datasets.features.Sequence(
70
+ {
71
+ "text": datasets.Value("string"),
72
+ "answer_start": datasets.Value("int32"),
73
+ }
74
+ ),
75
+ }
76
+ ),
77
+ # No default supervised_keys (as we have to pass both question
78
+ # and context as input).
79
+ supervised_keys=None,
80
+ homepage='https://github.com/rbawden/DiaBLa-dataset'
81
+ citation=_CITATION,
82
+ task_templates=[
83
+ # TODO
84
+ ],
85
+ )
86
+
87
+ #def _split_generators(self, dl_manager):
88
+ # downloaded_files = dl_manager.download_and_extract(_URLS)
89
+
90
+ # return [
91
+ # datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
92
+ # datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
93
+ # ]
94
+
95
+ def _generate_examples(self, filepath):
96
+ '''This function returns the examples in the raw (text) form.'''
97
+ logger.info("generating examples from = %s", filepath)
98
+ key = 0
99
+ with open(filepath, encoding="utf-8") as f:
100
+ diabla = json.load(f)
101
+ for dialogue_name in sorted(diabla['dialogues']):
102
+ dialogue_history = [] # to store past utterances
103
+ dialogue = diabla['dialogues'][dialogue_name]
104
+ # Meta-information attached to the dialogue
105
+ dialogue_info_keys = ['start_time', 'end_time', 'scenario',
106
+ 'user1', 'user2', 'translation_model',
107
+ 'final_evaluation_user1', 'final_evaluation_user2']
108
+ dialogue_info = {k: dialogue[k] for k in dialogue_info_keys}
109
+ # Main data: the utterances
110
+ for utterance_id in dialogue['utterances']:
111
+ utterance = utterances[utterance_id]
112
+ # Meta-information attached to the utterance
113
+ utterance_info_keys = ['judgment', 'verbatim', 'problems', 'user']
114
+ utterance_info = {'eval-' + k: utterance['eval'][k] for k in utterance_info_keys}
115
+ utterance_info['language'] = utterance['language']
116
+ # Utterance text
117
+ original_text = utterance['original_text']
118
+ mt_text = utterance['postprocessed_text']
119
+ reference_text = utterance['reference_translation']
120
+ normalised_text = utterance['normalised_version']
121
+ id_ = dialogue_name + '_' + utterance_id
122
+ utterance_instance = {
123
+ 'orig_text': original_text,
124
+ 'norm_text': normalised_text,
125
+ 'mt_text': mt_text,
126
+ 'id': id_,
127
+ 'ref_text': reference_text,
128
+ 'utterance_meta_info': utterance_info
129
+ 'context': dialogue_history
130
+ }
131
+ # add to history (without dialogue info)
132
+ dialogue_history.append(utterance_instance.copy())
133
+ utterance_instance['dialogue_meta_info'] = utterance_info
134
+ yield id_, utterance_instance