epeters3 commited on
Commit
5dda1b3
1 Parent(s): 7674d8f

Add v1 files and loading script

Browse files
.gitattributes CHANGED
@@ -1,16 +1,5 @@
1
- *.bin.* filter=lfs diff=lfs merge=lfs -text
2
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.h5 filter=lfs diff=lfs merge=lfs -text
5
- *.tflite filter=lfs diff=lfs merge=lfs -text
6
- *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
- *.ot filter=lfs diff=lfs merge=lfs -text
8
- *.onnx filter=lfs diff=lfs merge=lfs -text
9
- *.arrow filter=lfs diff=lfs merge=lfs -text
10
- *.ftz filter=lfs diff=lfs merge=lfs -text
11
- *.joblib filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.pb filter=lfs diff=lfs merge=lfs -text
15
- *.pt filter=lfs diff=lfs merge=lfs -text
16
- *.pth filter=lfs diff=lfs merge=lfs -text
 
1
+ personachat_truecased_full_train.json filter=lfs diff=lfs merge=lfs -text
2
+ personachat_truecased_full_valid.json filter=lfs diff=lfs merge=lfs -text
3
+ personachat_truecased_sample_train.json filter=lfs diff=lfs merge=lfs -text
4
+ personachat_truecased_sample_valid.json filter=lfs diff=lfs merge=lfs -text
5
+ dataset_infos.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ venv
2
+ .idea
dataset_infos.json ADDED
File without changes
personachat_truecased.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+ from datasets.features import Sequence
5
+
6
+
7
+ _BASE_URL = "https://huggingface.co/datasets/bavard/personachat_truecased/raw/main"
8
+ _URLS = {
9
+ "full": {
10
+ "train": _BASE_URL + "/persona_chat_truecased_full_train.json",
11
+ "valid": _BASE_URL + "/persona_chat_truecased_full_valid.json"
12
+ },
13
+ "sample": {
14
+ "train": _BASE_URL + "/persona_chat_truecased_sample_train.json",
15
+ "valid": _BASE_URL + "/persona_chat_truecased_sample_valid.json"
16
+ }
17
+ }
18
+
19
+ _DESCRIPTION = """\
20
+ A version of the PersonaChat dataset that has been true-cased, and also has been given more normalized punctuation.
21
+ The original PersonaChat dataset is in all lower case, and has extra space around each clause/sentence separating
22
+ punctuation mark. This version of the dataset has more of a natural language look, with sentence capitalization,
23
+ proper noun capitalization, and normalized whitespace. Also, each dialogue turn includes a pool of distractor
24
+ candidate responses, which can be used by a multiple choice regularization loss during training.
25
+ """
26
+
27
+ _CITATION = """\
28
+ @article{zhang2018personalizing,
29
+ title={Personalizing dialogue agents: I have a dog, do you have pets too?},
30
+ author={Zhang, Saizheng and Dinan, Emily and Urbanek, Jack and Szlam, Arthur and Kiela, Douwe and Weston, Jason},
31
+ journal={arXiv preprint arXiv:1801.07243},
32
+ year={2018}
33
+ }
34
+ """
35
+
36
+
37
+ class PersonachatTruecased(datasets.DatasetBuilder):
38
+ """
39
+ Version of the PersonaChat dataset that includes true-casing, normalized punctuation, and candidate distractor
40
+ responses for each dialogue turn, for including a multiple choice regularzation loss while training.
41
+ """
42
+
43
+ VERSION = datasets.Version("1.0.0")
44
+
45
+ BUILDER_CONFIGS = [
46
+ datasets.BuilderConfig(name="full", version=VERSION, description="The full dataset."),
47
+ datasets.BuilderConfig(name="sample", version=VERSION, description="A sample sample of the dataset, useful for testing.")
48
+ ]
49
+
50
+ DEFAULT_CONFIG_NAME = "full"
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features({
56
+ "personality": Sequence(datasets.Value("string")),
57
+ "candidates": Sequence(datasets.Value("string")),
58
+ "history": Sequence(datasets.Value("string")),
59
+ "conv_id": datasets.Value("int32"),
60
+ "utterance_idx": datasets.Value("int32")
61
+ }),
62
+ citation=_CITATION
63
+ )
64
+
65
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
66
+ split_paths = dl_manager.download(_URLS[self.config.name])
67
+ return [
68
+ datasets.SplitGenerator(
69
+ name=datasets.Split.TRAIN,
70
+ # These kwargs will be passed to _generate_examples
71
+ gen_kwargs={"data_path": split_paths["train"]}
72
+ ),
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.VALIDATION,
75
+ gen_kwargs={"data_path": split_paths["valid"]}
76
+ )
77
+ ]
78
+
79
+ def _generate_examples(self, data_path: str):
80
+ with open(data_path) as f:
81
+ data = json.load(f)
82
+ for conv_id, conv in enumerate(data):
83
+ personality = conv["personality"]
84
+ for utterance_idx, utterance in enumerate(conv["utterances"]):
85
+ id_ = f"{conv_id}-{utterance_idx}"
86
+ yield id_, {
87
+ "personality": personality,
88
+ "candidates": utterance["candidates"],
89
+ "history": utterance["history"],
90
+ "conv_id": conv_id,
91
+ "utterance_idx": utterance_idx
92
+ }
personachat_truecased_full_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cea0e87a230ecbf69ef7937a6012e12060a7f4a2bd9a1adc44d3141cb57938f3
3
+ size 193210313
personachat_truecased_full_valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2277648a4b773d81cf3406eb872deff9489a930c7140b5a1a1bed79a48317562
3
+ size 11995403
personachat_truecased_sample_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cba1d219748010a15e47bdb5fbc78903bd77c52ffa2ff8fdb96e6e68d1747e5a
3
+ size 21396
personachat_truecased_sample_valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0f956b6e6b359949073dc47117e192140a289ec974f2edb77c230f5a63e6420
3
+ size 23092