Sourab Mangrulkar commited on
Commit
14b1991
β€’
1 Parent(s): 63c49b4

adding code

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
chat-t5.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import pandas as pd
4
+ import numpy as np
5
+ import datasets
6
+
7
+ _DESCRIPTION = """\
8
+ This script is used to generate a dataset for the chatbot task. The dataset is combination of datasets used to train BlenderBot."""
9
+
10
+ _VERSION = "1.0.0"
11
+
12
+
13
+ class ChatT5Config(datasets.BuilderConfig):
14
+ """BuilderConfig for TapacoConfig."""
15
+
16
+ def __init__(self, **kwargs):
17
+ super(ChatT5Config, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
18
+
19
+
20
+ class ChatT5(datasets.GeneratorBasedBuilder):
21
+
22
+ BUILDER_CONFIGS = [
23
+ ChatT5Config(
24
+ name="chat_t5",
25
+ description="The dataset is combination of datasets used to train BlenderBot.",
26
+ )
27
+ ]
28
+ BUILDER_CONFIG_CLASS = ChatT5Config
29
+ DEFAULT_CONFIG_NAME = "chat_t5"
30
+
31
+ def _info(self):
32
+ features = datasets.Features({"texts": datasets.Value("string"), "labels": datasets.Value("string")})
33
+
34
+ return datasets.DatasetInfo(description=_DESCRIPTION, features=features, supervised_keys=None)
35
+
36
+ def _split_generators(self, dl_manager):
37
+ """Returns SplitGenerators."""
38
+ data_dir = os.path.join(os.curdir, "data")
39
+ return [
40
+ datasets.SplitGenerator(
41
+ name=datasets.Split.TRAIN,
42
+ gen_kwargs={"data_dir": os.path.join(data_dir, "train")},
43
+ ),
44
+ datasets.SplitGenerator(
45
+ name=datasets.Split.TEST,
46
+ gen_kwargs={"data_dir": os.path.join(data_dir, "test")},
47
+ ),
48
+ datasets.SplitGenerator(
49
+ name=datasets.Split.VALIDATION,
50
+ gen_kwargs={"data_dir": os.path.join(data_dir, "validation")},
51
+ ),
52
+ ]
53
+
54
+ def _generate_examples(self, data_dir):
55
+ """Yields examples."""
56
+ data = pd.DataFrame()
57
+ for file_path in os.listdir(data_dir):
58
+ data = pd.concat(data, pd.read_parquet(os.path.join(data_dir, file_path)))
59
+ id_ = -1
60
+ for _, row in data.iterrows():
61
+ id_ += 1
62
+ yield id_, {"texts": row["texts"], "labels": row["labels"]}
data/.DS_Store ADDED
Binary file (6.15 kB). View file
 
data/{blended_skill_talk_test.parquet β†’ test/blended_skill_talk_test.parquet} RENAMED
File without changes
data/{test_ed.parquet β†’ test/empathetic_dialogues_test.parquet} RENAMED
File without changes
data/test/wizard_of_wikipedia_test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d81abbcbea8aa23558992f9a3d9b3eb70259cf2483e148e942b9e1208652ef0
3
+ size 1035006
data/{blended_skill_talk_train.parquet β†’ train/blended_skill_talk_train.parquet} RENAMED
File without changes
data/{train_both_original_no_cands_convai2.parquet β†’ train/conv_ai_2_train.parquet} RENAMED
File without changes
data/{train_ed.parquet β†’ train/empathetic_dialogues_train.parquet} RENAMED
File without changes
data/{train_wow.parquet β†’ train/wizard_of_wikipedia_train.parquet} RENAMED
File without changes
data/{blended_skill_talk_validation.parquet β†’ validation/blended_skill_talk_validation.parquet} RENAMED
File without changes
data/{valid_both_original_no_cands_convai2.parquet β†’ validation/conv_ai_2_validation.parquet} RENAMED
File without changes
data/{validation_ed.parquet β†’ validation/empathetic_dialogues_validation.parquet} RENAMED
File without changes
data/{valid_random_split_wow.parquet β†’ validation/wizard_of_wikipedia_validation.parquet} RENAMED
File without changes