OfekGlick commited on
Commit
531820d
1 Parent(s): d7d8355

Upload 47 files

Browse files
DiscoEval.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import io
17
+ import datasets
18
+ import constants
19
+ import pickle
20
+
21
+ _CITATION = """\
22
+ @InProceedings{mchen-discoeval-19,
23
+ title = {Evaluation Benchmarks and Learning Criteria for Discourse-Aware Sentence Representations},
24
+ author = {Mingda Chen and Zewei Chu and Kevin Gimpel},
25
+ booktitle = {Proc. of {EMNLP}},
26
+ year={2019}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ This dataset contains all tasks of the DiscoEval benchmark for sentence representation learning.
32
+ """
33
+
34
+ _HOMEPAGE = "https://github.com/ZeweiChu/DiscoEval"
35
+
36
+
37
+ # TODO: Add link to the official dataset URLs here
38
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
39
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
40
+ # _URLS = {
41
+ # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
42
+ # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
43
+ # }
44
+
45
+
46
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
47
+ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
48
+ """DiscoEval Benchmark"""
49
+
50
+ VERSION = datasets.Version("1.1.0")
51
+
52
+ BUILDER_CONFIGS = [
53
+ datasets.BuilderConfig(
54
+ name=constants.SPARXIV,
55
+ version=VERSION,
56
+ description="Sentence positioning dataset from arXiv",
57
+ ),
58
+ datasets.BuilderConfig(
59
+ name=constants.SPROCSTORY,
60
+ version=VERSION,
61
+ description="Sentence positioning dataset from ROCStory",
62
+ ),
63
+ datasets.BuilderConfig(
64
+ name=constants.SPWIKI,
65
+ version=VERSION,
66
+ description="Sentence positioning dataset from Wikipedia",
67
+ ),
68
+ datasets.BuilderConfig(
69
+ name=constants.DCCHAT,
70
+ version=VERSION,
71
+ description="Discourse Coherence dataset from chat",
72
+ ),
73
+ datasets.BuilderConfig(
74
+ name=constants.DCWIKI,
75
+ version=VERSION,
76
+ description="Discourse Coherence dataset from Wikipedia",
77
+ ),
78
+ datasets.BuilderConfig(
79
+ name=constants.RST,
80
+ version=VERSION,
81
+ description="The RST Discourse Treebank dataset ",
82
+ ),
83
+ datasets.BuilderConfig(
84
+ name=constants.PDTB_E,
85
+ version=VERSION,
86
+ description="The Penn Discourse Treebank - Explicit dataset.",
87
+ ),
88
+ datasets.BuilderConfig(
89
+ name=constants.PDTB_I,
90
+ version=VERSION,
91
+ description="The Penn Discourse Treebank - Implicit dataset.",
92
+ ),
93
+ datasets.BuilderConfig(
94
+ name=constants.SSPABS,
95
+ version=VERSION,
96
+ description="The SSP dataset.",
97
+ ),
98
+ ]
99
+
100
+ DEFAULT_CONFIG_NAME = constants.SPARXIV # It's not mandatory to have a default configuration. Just use one if it make sense.
101
+
102
+ def _info(self):
103
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
104
+
105
+ if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
106
+ features_dict = {
107
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
108
+ for i in range(constants.SP_TEXT_COLUMNS + 1)
109
+ }
110
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SP_LABELS)
111
+ features = datasets.Features(features_dict)
112
+
113
+ elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
114
+ features_dict = {
115
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
116
+ for i in range(constants.DC_TEXT_COLUMNS + 1)
117
+ }
118
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.DC_LABELS)
119
+ features = datasets.Features(features_dict)
120
+
121
+ elif self.config.name in [constants.RST]:
122
+ features_dict = {
123
+ constants.TEXT_COLUMN_NAME[i]: [datasets.Value('string')]
124
+ for i in range(constants.RST_TEXT_COLUMNS + 1)
125
+ }
126
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.RST_LABELS)
127
+ features = datasets.Features(features_dict)
128
+
129
+ elif self.config.name in [constants.PDTB_E]:
130
+ features_dict = {
131
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
132
+ for i in range(constants.PDTB_E_TEXT_COLUMNS + 1)
133
+ }
134
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.PDTB_E_LABELS)
135
+ features = datasets.Features(features_dict)
136
+
137
+ elif self.config.name in [constants.PDTB_I]:
138
+ features_dict = {
139
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
140
+ for i in range(constants.PDTB_I_TEXT_COLUMNS + 1)
141
+ }
142
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.PDTB_I_LABELS)
143
+ features = datasets.Features(features_dict)
144
+
145
+ elif self.config.name in [constants.SSPABS]:
146
+ features_dict = {
147
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
148
+ for i in range(constants.SSPABS_TEXT_COLUMNS + 1)
149
+ }
150
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SSPABS_LABELS)
151
+ features = datasets.Features(features_dict)
152
+
153
+ else: # This is an example to show how to have different features for "first_domain" and "second_domain"
154
+ features = datasets.Features(
155
+ {
156
+ "sentence": datasets.Value("string"),
157
+ "option2": datasets.Value("string"),
158
+ "second_domain_answer": datasets.Value("string")
159
+ # These are the features of your dataset like images, labels ...
160
+ }
161
+ )
162
+ return datasets.DatasetInfo(
163
+ # This is the description that will appear on the datasets page.
164
+ description=_DESCRIPTION,
165
+ # This defines the different columns of the dataset and their types
166
+ features=features, # Here we define them above because they are different between the two configurations
167
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
168
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
169
+ # supervised_keys=("sentence", "label"),
170
+ # Homepage of the dataset for documentation
171
+ homepage=_HOMEPAGE,
172
+ # Citation for the dataset
173
+ citation=_CITATION,
174
+ )
175
+
176
+ def _split_generators(self, dl_manager):
177
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
178
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
179
+
180
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
181
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
182
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
183
+
184
+ # urls = _URLS[self.config.name]
185
+ # data_dir = dl_manager.download_and_extract(urls)
186
+ if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
187
+ data_dir = os.path.join(constants.SP_DATA_DIR, self.config.name)
188
+ train_name = constants.SP_TRAIN_NAME
189
+ valid_name = constants.SP_VALID_NAME
190
+ test_name = constants.SP_TEST_NAME
191
+
192
+ elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
193
+ data_dir = os.path.join(constants.DC_DATA_DIR, self.config.name)
194
+ train_name = constants.DC_TRAIN_NAME
195
+ valid_name = constants.DC_VALID_NAME
196
+ test_name = constants.DC_TEST_NAME
197
+
198
+ elif self.config.name in [constants.RST]:
199
+ data_dir = constants.RST_DATA_DIR
200
+ train_name = constants.RST_TRAIN_NAME
201
+ valid_name = constants.RST_VALID_NAME
202
+ test_name = constants.RST_TEST_NAME
203
+
204
+ elif self.config.name in [constants.PDTB_E]:
205
+ data_dir = os.path.join(constants.PDTB_DATA_DIR, constants.PDTB_E)
206
+ train_name = constants.PDTB_TRAIN_NAME
207
+ valid_name = constants.PDTB_VALID_NAME
208
+ test_name = constants.PDTB_TEST_NAME
209
+
210
+ elif self.config.name in [constants.PDTB_I]:
211
+ data_dir = os.path.join(constants.PDTB_DATA_DIR, constants.PDTB_I)
212
+ train_name = constants.PDTB_TRAIN_NAME
213
+ valid_name = constants.PDTB_VALID_NAME
214
+ test_name = constants.PDTB_TEST_NAME
215
+
216
+ elif self.config.name in [constants.SSPABS]:
217
+ data_dir = constants.SSPABS_DATA_DIR
218
+ train_name = constants.SSPABS_TRAIN_NAME
219
+ valid_name = constants.SSPABS_VALID_NAME
220
+ test_name = constants.SSPABS_TEST_NAME
221
+
222
+ return [
223
+ datasets.SplitGenerator(
224
+ name=datasets.Split.TRAIN,
225
+ # These kwargs will be passed to _generate_examples
226
+ gen_kwargs={
227
+ "filepath": os.path.join(data_dir, train_name),
228
+ "split": "train",
229
+ },
230
+ ),
231
+ datasets.SplitGenerator(
232
+ name=datasets.Split.VALIDATION,
233
+ # These kwargs will be passed to _generate_examples
234
+ gen_kwargs={
235
+ "filepath": os.path.join(data_dir, valid_name),
236
+ "split": "dev",
237
+ },
238
+ ),
239
+ datasets.SplitGenerator(
240
+ name=datasets.Split.TEST,
241
+ # These kwargs will be passed to _generate_examples
242
+ gen_kwargs={
243
+ "filepath": os.path.join(data_dir, test_name),
244
+ "split": "test"
245
+ },
246
+ ),
247
+ ]
248
+
249
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
250
+ def _generate_examples(self, filepath, split):
251
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
252
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
253
+ if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI,
254
+ constants.DCWIKI, constants.DCCHAT,
255
+ constants.PDTB_E, constants.PDTB_I,
256
+ constants.SSPABS]:
257
+ with io.open(filepath, mode='r', encoding='utf-8') as f:
258
+ for key, line in enumerate(f):
259
+ line = line.strip().split("\t")
260
+ example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
261
+ example[constants.LABEL_NAME] = line[0]
262
+ yield key, example
263
+
264
+ elif self.config.name in [constants.RST]:
265
+ data = pickle.load(open(filepath, "rb"))
266
+ for key, line in enumerate(data):
267
+ example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
268
+ example[constants.LABEL_NAME] = line[0]
269
+ yield key, example
270
+
271
+ # TODO: implement other datasets
272
+ else:
273
+ yield 0, {
274
+ "sentence": 'example sentences',
275
+ "option2": 'another example sentence',
276
+ "second_domain_answer": "" if split == "test" else 'label',
277
+ }
278
+
279
+
280
+ if __name__ == '__main__':
281
+ data = pickle.load(open(r'/data/RST/RST_TRAIN.pkl', "rb"))
282
+ sents = []
283
+ labels = []
284
+ for d in data:
285
+ input1= d[1]
286
+ input2 = d[2]
287
+ label = d[0]
288
+ ofek = 5
constants.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # General Constants:
2
+ LABEL_NAME = 'label'
3
+ TEXT_COLUMN_NAME = [f"sentence_{i}" for i in range(1, 10)]
4
+
5
+ # SSPabs Constants:
6
+ SSPABS = 'SSPabs'
7
+ SSPABS_TRAIN_NAME = 'train.txt'
8
+ SSPABS_VALID_NAME = 'valid.txt'
9
+ SSPABS_TEST_NAME = 'test.txt'
10
+ SSPABS_DATA_DIR = './data/SSP/abs/'
11
+ SSPABS_LABELS = ["0", "1"]
12
+ SSPABS_TEXT_COLUMNS = 1
13
+
14
+ # PDTB Constants:
15
+ PDTB_I = 'PDTB-I'
16
+ PDTB_E = 'PDTB-E'
17
+ PDTB_TRAIN_NAME = 'train.txt'
18
+ PDTB_VALID_NAME = 'valid.txt'
19
+ PDTB_TEST_NAME = 'test.txt'
20
+ PDTB_DATA_DIR = './data/PDTB/'
21
+ PDTB_E_LABELS = [
22
+ 'Comparison.Concession',
23
+ 'Comparison.Contrast',
24
+ 'Contingency.Cause',
25
+ 'Contingency.Condition',
26
+ 'Contingency.Pragmatic condition',
27
+ 'Expansion.Alternative',
28
+ 'Expansion.Conjunction',
29
+ 'Expansion.Instantiation',
30
+ 'Expansion.List',
31
+ 'Expansion.Restatement',
32
+ 'Temporal.Asynchronous',
33
+ 'Temporal.Synchrony',
34
+ ]
35
+ PDTB_I_LABELS = [
36
+ 'Comparison.Concession',
37
+ 'Comparison.Contrast',
38
+ 'Contingency.Cause',
39
+ 'Contingency.Pragmatic cause',
40
+ 'Expansion.Alternative',
41
+ 'Expansion.Conjunction',
42
+ 'Expansion.Instantiation',
43
+ 'Expansion.List',
44
+ 'Expansion.Restatement',
45
+ 'Temporal.Asynchronous',
46
+ 'Temporal.Synchrony',
47
+ ]
48
+ PDTB_E_TEXT_COLUMNS = 2
49
+ PDTB_I_TEXT_COLUMNS = 2
50
+
51
+
52
+ # SP Constants:
53
+ SPARXIV = 'SParxiv'
54
+ SPROCSTORY = 'SProcstory'
55
+ SPWIKI = 'SPwiki'
56
+ SP_TRAIN_NAME = 'train.txt'
57
+ SP_VALID_NAME = 'valid.txt'
58
+ SP_TEST_NAME = 'test.txt'
59
+ SP_DATA_DIR = './data/SP/'
60
+ SP_LABELS = ["0", "1", "2", "3", "4"]
61
+ SP_TEXT_COLUMNS = 5
62
+
63
+ # DC Constants:
64
+ DCCHAT = 'DCchat'
65
+ DCWIKI = 'DCwiki'
66
+ DC_TRAIN_NAME = 'train.txt'
67
+ DC_VALID_NAME = 'valid.txt'
68
+ DC_TEST_NAME = 'test.txt'
69
+ DC_DATA_DIR = './data/DC/'
70
+ DC_LABELS = ["0", "1"]
71
+ DC_TEXT_COLUMNS = 6
72
+
73
+
74
+ # RST Constants:
75
+ RST = 'RST'
76
+ RST_TRAIN_NAME = 'RST_TRAIN.pkl'
77
+ RST_VALID_NAME = 'RST_DEV.pkl'
78
+ RST_TEST_NAME = 'RST_TEST.pkl'
79
+ RST_DATA_DIR = './data/RST/'
80
+ RST_LABELS = [
81
+ 'NS-Explanation',
82
+ 'NS-Evaluation',
83
+ 'NN-Condition',
84
+ 'NS-Summary',
85
+ 'SN-Cause',
86
+ 'SN-Background',
87
+ 'NS-Background',
88
+ 'SN-Summary',
89
+ 'NS-Topic-Change',
90
+ 'NN-Explanation',
91
+ 'SN-Topic-Comment',
92
+ 'NS-Elaboration',
93
+ 'SN-Attribution',
94
+ 'SN-Manner-Means',
95
+ 'NN-Evaluation',
96
+ 'NS-Comparison',
97
+ 'NS-Contrast',
98
+ 'SN-Condition',
99
+ 'NS-Temporal',
100
+ 'NS-Enablement',
101
+ 'SN-Evaluation',
102
+ 'NN-Topic-Comment',
103
+ 'NN-Temporal',
104
+ 'NN-Textual-organization',
105
+ 'NN-Same-unit',
106
+ 'NN-Comparison',
107
+ 'NN-Topic-Change',
108
+ 'SN-Temporal',
109
+ 'NN-Joint',
110
+ 'SN-Enablement',
111
+ 'SN-Explanation',
112
+ 'NN-Contrast',
113
+ 'NN-Cause',
114
+ 'SN-Contrast',
115
+ 'NS-Attribution',
116
+ 'NS-Topic-Comment',
117
+ 'SN-Elaboration',
118
+ 'SN-Comparison',
119
+ 'NS-Cause',
120
+ 'NS-Condition',
121
+ 'NS-Manner-Means'
122
+ ]
123
+ RST_TEXT_COLUMNS = 2
data/BSO/arxiv/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/arxiv/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/arxiv/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/rocstory/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/rocstory/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/rocstory/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/wiki/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/wiki/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/wiki/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/chat/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/chat/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/chat/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/wiki/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/wiki/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/wiki/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Explicit/labelset.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Comparison.Concession
2
+ Comparison.Contrast
3
+ Contingency.Cause
4
+ Contingency.Condition
5
+ Contingency.Pragmatic condition
6
+ Expansion.Alternative
7
+ Expansion.Conjunction
8
+ Expansion.Instantiation
9
+ Expansion.List
10
+ Expansion.Restatement
11
+ Temporal.Asynchronous
12
+ Temporal.Synchrony
data/PDTB/Explicit/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Explicit/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Explicit/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Implicit/labelset.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Comparison.Concession
2
+ Comparison.Contrast
3
+ Contingency.Cause
4
+ Contingency.Pragmatic cause
5
+ Expansion.Alternative
6
+ Expansion.Conjunction
7
+ Expansion.Instantiation
8
+ Expansion.List
9
+ Expansion.Restatement
10
+ Temporal.Asynchronous
11
+ Temporal.Synchrony
data/PDTB/Implicit/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Implicit/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Implicit/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/RST/RST_DEV.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ccce6a70b54d5aa8c6b07718eb5d609216842b6b86109d84ecd1e5ca4a12931
3
+ size 288410
data/RST/RST_TEST.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:441da3fbb3a4a8fc9b84e07c7897decca76ea043c789ceb918955b9be7232d1f
3
+ size 324662
data/RST/RST_TRAIN.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2da537c3259a007d3f7e3ef7c08953b522768b4fd2fc2b930c1856704f639f78
3
+ size 2467975
data/RST/cmds.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ cp /share/data/speech/mingda/data/for-zewei/SentEval/data/*.pkl .
data/RST/py2/RST_DEV.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ccce6a70b54d5aa8c6b07718eb5d609216842b6b86109d84ecd1e5ca4a12931
3
+ size 288410
data/RST/py2/RST_TEST.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:441da3fbb3a4a8fc9b84e07c7897decca76ea043c789ceb918955b9be7232d1f
3
+ size 324662
data/RST/py2/RST_TRAIN.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2da537c3259a007d3f7e3ef7c08953b522768b4fd2fc2b930c1856704f639f78
3
+ size 2467975
data/RST/py3/RST_DEV.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c77fc13ce8f31220182a93a97372e6c8c160869349af3c98d0f3c3f04b5f1dab
3
+ size 288410
data/RST/py3/RST_TEST.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c7ff5e45cde0a339598dfae57d93a7283159d66a0186cd4028987cb65649b5
3
+ size 324662
data/RST/py3/RST_TRAIN.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:190971e116ee66a356e3fbe897a8c6f9e895920daf72d77cecea53e3722b133c
3
+ size 2467975
data/SP/arxiv/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/arxiv/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/arxiv/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/rocstory/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/rocstory/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/rocstory/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/wiki/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/wiki/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/wiki/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SSP/abs/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SSP/abs/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SSP/abs/valid.txt ADDED
The diff for this file is too large to render. See raw diff