Datasets:

Languages: English
Multilinguality: monolingual
Size Categories: 1M<n<10M
Language Creators: found
Annotations Creators: found
Source Datasets: original
License:
system HF staff commited on
Commit
ad6d30b
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"train": {"description": "Ubuntu Dialogue Corpus, a dataset containing almost 1 million multi-turn dialogues, with a total of over 7 million utterances and 100 million words. This provides a unique resource for research into building dialogue managers based on neural language models that can make use of large amounts of unlabeled data. The dataset has both the multi-turn property of conversations in the Dialog State Tracking Challenge datasets, and the unstructured nature of interactions from microblog services such as Twitter.\n", "citation": "@article{DBLP:journals/corr/LowePSP15,\n author = {Ryan Lowe and\n Nissan Pow and\n Iulian Serban and\n Joelle Pineau},\n title = {The Ubuntu Dialogue Corpus: {A} Large Dataset for Research in Unstructured\n Multi-Turn Dialogue Systems},\n journal = {CoRR},\n volume = {abs/1506.08909},\n year = {2015},\n url = {http://arxiv.org/abs/1506.08909},\n archivePrefix = {arXiv},\n eprint = {1506.08909},\n timestamp = {Mon, 13 Aug 2018 16:48:23 +0200},\n biburl = {https://dblp.org/rec/journals/corr/LowePSP15.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://github.com/rkadlec/ubuntu-ranking-dataset-creator", "license": "", "features": {"Context": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "ubuntu_dialogs_corpus", "config_name": "train", "version": {"version_str": "2.0.0", "description": null, "datasets_version_to_prepare": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 65497027, "num_examples": 127422, "dataset_name": "ubuntu_dialogs_corpus"}}, "download_checksums": {}, "download_size": 0, "dataset_size": 65497027, "size_in_bytes": 65497027}}
dummy/dev_test/2.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd0d07d667dea3347a6dbe380ad1040e94ef149153909c7b20425cbb048001df
3
+ size 1824
dummy/train/2.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14cddad0c84ff0f8a9a8e63f5f38105710bffbd124160334e5fe4362f376e374
3
+ size 1152
ubuntu_dialogs_corpus.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(ubuntu_dialogs_corpus): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import csv
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(ubuntu_dialogs_corpus): BibTeX citation
12
+ _CITATION = """\
13
+ @article{DBLP:journals/corr/LowePSP15,
14
+ author = {Ryan Lowe and
15
+ Nissan Pow and
16
+ Iulian Serban and
17
+ Joelle Pineau},
18
+ title = {The Ubuntu Dialogue Corpus: {A} Large Dataset for Research in Unstructured
19
+ Multi-Turn Dialogue Systems},
20
+ journal = {CoRR},
21
+ volume = {abs/1506.08909},
22
+ year = {2015},
23
+ url = {http://arxiv.org/abs/1506.08909},
24
+ archivePrefix = {arXiv},
25
+ eprint = {1506.08909},
26
+ timestamp = {Mon, 13 Aug 2018 16:48:23 +0200},
27
+ biburl = {https://dblp.org/rec/journals/corr/LowePSP15.bib},
28
+ bibsource = {dblp computer science bibliography, https://dblp.org}
29
+ }
30
+ """
31
+
32
+ # TODO(ubuntu_dialogs_corpus):
33
+ _DESCRIPTION = """\
34
+ Ubuntu Dialogue Corpus, a dataset containing almost 1 million multi-turn dialogues, with a total of over 7 million utterances and 100 million words. This provides a unique resource for research into building dialogue managers based on neural language models that can make use of large amounts of unlabeled data. The dataset has both the multi-turn property of conversations in the Dialog State Tracking Challenge datasets, and the unstructured nature of interactions from microblog services such as Twitter.
35
+ """
36
+
37
+
38
+ class UbuntuDialogsCorpusConfig(datasets.BuilderConfig):
39
+ """BuilderConfig for UbuntuDialogsCorpus."""
40
+
41
+ def __init__(self, features, **kwargs):
42
+ """BuilderConfig for UbuntuDialogsCorpus.
43
+
44
+ Args:
45
+
46
+ **kwargs: keyword arguments forwarded to super.
47
+ """
48
+
49
+ super(UbuntuDialogsCorpusConfig, self).__init__(version=datasets.Version("2.0.0"), **kwargs)
50
+ self.features = features
51
+
52
+
53
+ class UbuntuDialogsCorpus(datasets.GeneratorBasedBuilder):
54
+ """TODO(ubuntu_dialogs_corpus): Short description of my dataset."""
55
+
56
+ # TODO(ubuntu_dialogs_corpus): Set up version.
57
+ VERSION = datasets.Version("2.0.0")
58
+ BUILDER_CONFIGS = [
59
+ UbuntuDialogsCorpusConfig(
60
+ name="train", features=["Context", "Utterance", "Label"], description="training features"
61
+ ),
62
+ UbuntuDialogsCorpusConfig(
63
+ name="dev_test",
64
+ features=["Context", "Ground Truth Utterance"] + ["Distractor_" + str(i) for i in range(9)],
65
+ description="test and dev features",
66
+ ),
67
+ ]
68
+
69
+ @property
70
+ def manual_download_instructions(self):
71
+ return """\
72
+ Please download the Ubuntu Dialog Corpus from https://github.com/rkadlec/ubuntu-ranking-dataset-creator. Run ./generate.sh -t -s -l to download the
73
+ data. Others arguments are left to their default values here. Please save train.csv, test.csv and valid.csv in the same path"""
74
+
75
+ def _info(self):
76
+ # TODO(ubuntu_dialogs_corpus): Specifies the datasets.DatasetInfo object
77
+ features = {feature: datasets.Value("string") for feature in self.config.features}
78
+ if self.config.name == "train":
79
+ features["Label"] = datasets.Value("int32")
80
+ return datasets.DatasetInfo(
81
+ # This is the description that will appear on the datasets page.
82
+ description=_DESCRIPTION,
83
+ # datasets.features.FeatureConnectors
84
+ features=datasets.Features(
85
+ # These are the features of your dataset like images, labels ...
86
+ features
87
+ ),
88
+ # If there's a common (input, target) tuple from the features,
89
+ # specify them here. They'll be used if as_supervised=True in
90
+ # builder.as_dataset.
91
+ supervised_keys=None,
92
+ # Homepage of the dataset for documentation
93
+ homepage="https://github.com/rkadlec/ubuntu-ranking-dataset-creator",
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager):
98
+ """Returns SplitGenerators."""
99
+ # TODO(ubuntu_dialogs_corpus): Downloads the data and defines the splits
100
+ # dl_manager is a datasets.download.DownloadManager that can be used to
101
+ # download and extract URLs
102
+ manual_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
103
+
104
+ if self.config.name == "train":
105
+ return [
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TRAIN,
108
+ # These kwargs will be passed to _generate_examples
109
+ gen_kwargs={"filepath": os.path.join(manual_dir, "train.csv")},
110
+ ),
111
+ ]
112
+ else:
113
+ return [
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TEST,
116
+ # These kwargs will be passed to _generate_examples
117
+ gen_kwargs={"filepath": os.path.join(manual_dir, "test.csv")},
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.VALIDATION,
121
+ # These kwargs will be passed to _generate_examples
122
+ gen_kwargs={"filepath": os.path.join(manual_dir, "valid.csv")},
123
+ ),
124
+ ]
125
+
126
+ def _generate_examples(self, filepath):
127
+ """Yields examples."""
128
+ # TODO(ubuntu_dialogs_corpus): Yields (key, example) tuples from the dataset
129
+ with open(filepath, encoding="utf-8") as f:
130
+ data = csv.DictReader(f)
131
+ for id_, row in enumerate(data):
132
+ yield id_, row