system HF staff commited on
Commit
6b2a014
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. crd3.py +152 -0
  3. dataset_infos.json +1 -0
  4. dummy/0.0.0/dummy_data.zip +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
crd3.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """CRD3 dataset"""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import logging
23
+ import os
24
+
25
+ import datasets
26
+
27
+
28
+ _CITATION = """
29
+ @inproceedings{
30
+ title = {Storytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset},
31
+ author = {Rameshkumar, Revanth and Bailey, Peter},
32
+ year = {2020},
33
+ publisher = {Association for Computational Linguistics},
34
+ conference = {ACL}
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """
39
+ Storytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset.
40
+ Critical Role is an unscripted, live-streamed show where a fixed group of people play Dungeons and Dragons, an open-ended role-playing game.
41
+ The dataset is collected from 159 Critical Role episodes transcribed to text dialogues, consisting of 398,682 turns. It also includes corresponding
42
+ abstractive summaries collected from the Fandom wiki. The dataset is linguistically unique in that the narratives are generated entirely through player
43
+ collaboration and spoken interaction. For each dialogue, there are a large number of turns, multiple abstractive summaries with varying levels of detail,
44
+ and semantic ties to the previous dialogues.
45
+ """
46
+
47
+ _URL = "https://github.com/RevanthRameshkumar/CRD3/archive/master.zip"
48
+
49
+
50
+ def get_train_test_dev_files(files, test_split, train_split, dev_split):
51
+ test_files = dev_files = train_files = []
52
+ for file in files:
53
+ filename = os.path.split(file)[1].split("_")[0]
54
+ if filename in test_split:
55
+ test_files.append(file)
56
+ elif filename in train_split:
57
+ train_files.append(file)
58
+ elif filename in dev_split:
59
+ dev_files.append(file)
60
+ else:
61
+ logging.info("skipped file {}".format(file))
62
+ return test_files, train_files, dev_files
63
+
64
+
65
+ class CRD3(datasets.GeneratorBasedBuilder):
66
+ def _info(self):
67
+ return datasets.DatasetInfo(
68
+ description=_DESCRIPTION,
69
+ features=datasets.Features(
70
+ {
71
+ "chunk": datasets.Value("string"),
72
+ "chunk_id": datasets.Value("int32"),
73
+ "turn_start": datasets.Value("int32"),
74
+ "turn_end": datasets.Value("int32"),
75
+ "alignment_score": datasets.Value("float32"),
76
+ "turn_num": datasets.Value("int32"),
77
+ "turns": datasets.features.Sequence(
78
+ {
79
+ "names": datasets.Value("string"),
80
+ "utterances": datasets.Value("string"),
81
+ }
82
+ ),
83
+ }
84
+ ),
85
+ homepage="https://github.com/RevanthRameshkumar/CRD3",
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ path = dl_manager.download_and_extract(_URL)
91
+ test_file = os.path.join(path, "CRD3-master", "data", "aligned data", "test_files")
92
+ train_file = os.path.join(path, "CRD3-master", "data", "aligned data", "train_files")
93
+ dev_file = os.path.join(path, "CRD3-master", "data", "aligned data", "val_files")
94
+ with open(test_file, encoding="utf-8") as f:
95
+ test_splits = [file.replace("\n", "") for file in f.readlines()]
96
+
97
+ with open(train_file, encoding="utf-8") as f:
98
+ train_splits = [file.replace("\n", "") for file in f.readlines()]
99
+ with open(dev_file, encoding="utf-8") as f:
100
+ dev_splits = [file.replace("\n", "") for file in f.readlines()]
101
+ c2 = "CRD3-master/data/aligned data/c=2"
102
+ c3 = "CRD3-master/data/aligned data/c=3"
103
+ c4 = "CRD3-master/data/aligned data/c=4"
104
+ files = [os.path.join(path, c2, file) for file in sorted(os.listdir(os.path.join(path, c2)))]
105
+ files.extend([os.path.join(path, c3, file) for file in sorted(os.listdir(os.path.join(path, c3)))])
106
+ files.extend([os.path.join(path, c4, file) for file in sorted(os.listdir(os.path.join(path, c4)))])
107
+
108
+ test_files, train_files, dev_files = get_train_test_dev_files(files, test_splits, train_splits, dev_splits)
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN,
113
+ gen_kwargs={"files_path": train_files},
114
+ ),
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TEST,
117
+ gen_kwargs={"files_path": test_files},
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.VALIDATION,
121
+ gen_kwargs={"files_path": dev_files},
122
+ ),
123
+ ]
124
+
125
+ def _generate_examples(self, files_path):
126
+ """Yields examples."""
127
+
128
+ for file in files_path:
129
+ with open(file, encoding="utf-8") as f:
130
+ data = json.load(f)
131
+ for id1, row in enumerate(data):
132
+ chunk = row["CHUNK"]
133
+ chunk_id = row["ALIGNMENT"]["CHUNK ID"]
134
+ turn_start = row["ALIGNMENT"]["TURN START"]
135
+ turn_end = row["ALIGNMENT"]["TURN END"]
136
+ score = row["ALIGNMENT"]["ALIGNMENT SCORE"]
137
+ for id2, turn in enumerate(row["TURNS"]):
138
+ turn_names = turn["NAMES"]
139
+ turn_utterances = turn["UTTERANCES"]
140
+ turn_num = turn["NUMBER"]
141
+ yield str(id1) + "_" + str(id2), {
142
+ "chunk": chunk,
143
+ "chunk_id": chunk_id,
144
+ "turn_start": turn_start,
145
+ "turn_end": turn_end,
146
+ "alignment_score": score,
147
+ "turn_num": turn_num,
148
+ "turns": {
149
+ "names": turn_names,
150
+ "utterances": turn_utterances,
151
+ },
152
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "\nStorytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset.\nCritical Role is an unscripted, live-streamed show where a fixed group of people play Dungeons and Dragons, an open-ended role-playing game. \nThe dataset is collected from 159 Critical Role episodes transcribed to text dialogues, consisting of 398,682 turns. It also includes corresponding \nabstractive summaries collected from the Fandom wiki. The dataset is linguistically unique in that the narratives are generated entirely through player \ncollaboration and spoken interaction. For each dialogue, there are a large number of turns, multiple abstractive summaries with varying levels of detail, \nand semantic ties to the previous dialogues.\n", "citation": "\n@inproceedings{\ntitle = {Storytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset},\nauthor = {Rameshkumar, Revanth and Bailey, Peter},\nyear = {2020},\npublisher = {Association for Computational Linguistics},\nconference = {ACL}\n}\n ", "homepage": "https://github.com/RevanthRameshkumar/CRD3", "license": "", "features": {"chunk": {"dtype": "string", "id": null, "_type": "Value"}, "chunk_id": {"dtype": "int32", "id": null, "_type": "Value"}, "turn_start": {"dtype": "int32", "id": null, "_type": "Value"}, "turn_end": {"dtype": "int32", "id": null, "_type": "Value"}, "alignment_score": {"dtype": "float32", "id": null, "_type": "Value"}, "turn_num": {"dtype": "int32", "id": null, "_type": "Value"}, "turns": {"feature": {"names": {"dtype": "string", "id": null, "_type": "Value"}, "utterances": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "cr_d3", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1405206229, "num_examples": 2942362, "dataset_name": "cr_d3"}, "test": {"name": "test", "num_bytes": 1405206229, "num_examples": 2942362, "dataset_name": "cr_d3"}, "validation": {"name": "validation", "num_bytes": 1405206229, "num_examples": 2942362, "dataset_name": "cr_d3"}}, "download_checksums": {"https://github.com/RevanthRameshkumar/CRD3/archive/master.zip": {"num_bytes": 293524408, "checksum": "485ee871073c66359320db3a380cc1fa7d8bc05c9c981d87dbf36df91041ff14"}}, "download_size": 293524408, "dataset_size": 4215618687, "size_in_bytes": 4509143095}}
dummy/0.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:015b912d23db6bd0c4910dc4d4abd455b780e35f55199dd2359c7a7cf24a5157
3
+ size 21265