Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
no-annotation
Source Datasets:
original
License:
File size: 6,364 Bytes
6b2a014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1a9e1b
 
 
6b2a014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0754063
6b2a014
 
 
0754063
6b2a014
 
 
 
 
 
 
 
 
92af8f2
6b2a014
 
 
 
 
 
 
 
 
 
 
 
 
 
b0c84a5
6b2a014
b0c84a5
 
 
6b2a014
b0c84a5
6b2a014
 
 
 
 
 
 
0754063
 
 
 
 
 
6b2a014
 
 
 
 
 
 
0754063
 
 
6b2a014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7403845
6b2a014
 
 
 
 
 
 
 
b0c84a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""CRD3  dataset"""


import json
import os

import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = """
@inproceedings{
title = {Storytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset},
author = {Rameshkumar, Revanth  and Bailey, Peter},
year = {2020},
publisher = {Association for Computational Linguistics},
conference = {ACL}
}
 """

_DESCRIPTION = """
Storytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset.
Critical Role is an unscripted, live-streamed show where a fixed group of people play Dungeons and Dragons, an open-ended role-playing game.
The dataset is collected from 159 Critical Role episodes transcribed to text dialogues, consisting of 398,682 turns. It also includes corresponding
abstractive summaries collected from the Fandom wiki. The dataset is linguistically unique in that the narratives are generated entirely through player
collaboration and spoken interaction. For each dialogue, there are a large number of turns, multiple abstractive summaries with varying levels of detail,
and semantic ties to the previous dialogues.
"""

_URL = "https://huggingface.co/datasets/crd3/resolve/72bffe55b4d5bf19b530d3e417447b3384ba3673/data/aligned%20data.zip"


def get_train_test_dev_files(files, test_split, train_split, dev_split):
    test_files, dev_files, train_files = [], [], []
    for file in files:
        filename = os.path.split(file)[1].split("_")[0]
        if filename in test_split:
            test_files.append(file)
        elif filename in train_split:
            train_files.append(file)
        elif filename in dev_split:
            dev_files.append(file)
        else:
            logger.info(f"skipped file {file}")
    return test_files, train_files, dev_files


class CRD3(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "chunk": datasets.Value("string"),
                    "chunk_id": datasets.Value("int32"),
                    "turn_start": datasets.Value("int32"),
                    "turn_end": datasets.Value("int32"),
                    "alignment_score": datasets.Value("float32"),
                    "turns": [
                        {
                            "names": datasets.features.Sequence(datasets.Value("string")),
                            "utterances": datasets.features.Sequence(datasets.Value("string")),
                            "number": datasets.Value("int32"),
                        }
                    ],
                }
            ),
            homepage="https://github.com/RevanthRameshkumar/CRD3",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        root = dl_manager.download_and_extract(_URL)
        path = os.path.join(root, "aligned data")

        test_file = os.path.join(path, "test_files")
        train_file = os.path.join(path, "train_files")
        dev_file = os.path.join(path, "val_files")
        with open(test_file, encoding="utf-8") as f:
            test_splits = [file.replace("\n", "") for file in f.readlines()]

        with open(train_file, encoding="utf-8") as f:
            train_splits = [file.replace("\n", "") for file in f.readlines()]
        with open(dev_file, encoding="utf-8") as f:
            dev_splits = [file.replace("\n", "") for file in f.readlines()]
        c2 = "c=2"
        c3 = "c=3"
        c4 = "c=4"
        files = [os.path.join(path, c2, file) for file in sorted(os.listdir(os.path.join(path, c2)))]
        files.extend([os.path.join(path, c3, file) for file in sorted(os.listdir(os.path.join(path, c3)))])
        files.extend([os.path.join(path, c4, file) for file in sorted(os.listdir(os.path.join(path, c4)))])

        test_files, train_files, dev_files = get_train_test_dev_files(files, test_splits, train_splits, dev_splits)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"files_path": train_files},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"files_path": test_files},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"files_path": dev_files},
            ),
        ]

    def _generate_examples(self, files_path):
        """Yields examples."""

        for id0, file in enumerate(files_path):
            with open(file, encoding="utf-8") as f:
                data = json.load(f)
                for id1, row in enumerate(data):
                    chunk = row["CHUNK"]
                    chunk_id = row["ALIGNMENT"]["CHUNK ID"]
                    turn_start = row["ALIGNMENT"]["TURN START"]
                    turn_end = row["ALIGNMENT"]["TURN END"]
                    score = row["ALIGNMENT"]["ALIGNMENT SCORE"]
                    for turn in row["TURNS"]:
                        turn["names"] = turn["NAMES"]
                        turn["utterances"] = turn["UTTERANCES"]
                        turn["number"] = turn["NUMBER"]

                        del turn["NAMES"]
                        del turn["UTTERANCES"]
                        del turn["NUMBER"]

                    yield str(id0) + "_" + str(id1), {
                        "chunk": chunk,
                        "chunk_id": chunk_id,
                        "turn_start": turn_start,
                        "turn_end": turn_end,
                        "alignment_score": score,
                        "turns": row["TURNS"],
                    }