siyangliu commited on
Commit
46941b0
1 Parent(s): cb7ada4

Upload 6 files

Browse files
Files changed (7) hide show
  1. .gitattributes +2 -0
  2. data_split.py +18 -0
  3. loading_script.py +96 -0
  4. mhp_full.json +3 -0
  5. test.json +0 -0
  6. train.json +3 -0
  7. valid.json +0 -0
.gitattributes CHANGED
@@ -52,3 +52,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ mhp_full.json filter=lfs diff=lfs merge=lfs -text
56
+ train.json filter=lfs diff=lfs merge=lfs -text
data_split.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import random
3
+ random.seed(42)
4
+ def split_data():
5
+ f = open("./mhp_full.json")
6
+ lines = json.load(f)
7
+ random.shuffle(lines)
8
+ train_data = lines[:int(len(lines)*0.8)]
9
+ valid_data = lines[int(len(lines)*0.8):int(len(lines)*0.9)]
10
+ test_data = lines[int(len(lines)*0.9):]
11
+ print(len(train_data), len(valid_data), len(test_data))
12
+ with open("train.json", "w") as f:
13
+ json.dump(train_data, f)
14
+ with open("valid.json", "w") as f:
15
+ json.dump(valid_data, f)
16
+ with open("test.json", "w") as f:
17
+ json.dump(test_data, f)
18
+ split_data()
loading_script.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """reddit_mhp dataset."""
17
+
18
+
19
+ import json
20
+ import os
21
+ import datasets
22
+
23
+
24
+
25
+ _DESCRIPTION = """ FutureWarning
26
+ """
27
+ _CITATION = """ null """
28
+ _URLs = {
29
+ "train": "https://huggingface.co/datasets/siyangliu/reddit_mhp/resolve/main/train.json",
30
+ "valid": "https://huggingface.co/datasets/siyangliu/reddit_mhp/resolve/main/valid.json",
31
+ "test": "https://huggingface.co/datasets/siyangliu/reddit_mhp/resolve/main/test.json",
32
+ }
33
+
34
+
35
+
36
+ class redditMHP(datasets.GeneratorBasedBuilder):
37
+ """redditMHP dataset."""
38
+
39
+ VERSION = datasets.Version("1.1.0")
40
+
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(
43
+ name="",
44
+ description="",
45
+ version=VERSION,
46
+ )
47
+ ]
48
+
49
+ def _info(self):
50
+ return datasets.DatasetInfo(
51
+ description=_DESCRIPTION,
52
+ features=datasets.Features(
53
+ {
54
+ "question": datasets.Value("string"),
55
+ "comment": datasets.Value("string")
56
+ }
57
+ ),
58
+ supervised_keys=None,
59
+ homepage="https://huggingface.co/datasets/siyangliu/reddit_mhp",
60
+ citation=_CITATION,
61
+ )
62
+
63
+ def _split_generators(self, dl_manager):
64
+ """Returns SplitGenerators."""
65
+ data_dir = dl_manager.download_and_extract(_URLs)
66
+ return [
67
+ datasets.SplitGenerator(
68
+ name=datasets.Split.TRAIN,
69
+ gen_kwargs={
70
+ "filepath": data_dir["train"]
71
+ },
72
+ ),
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.TEST,
75
+ gen_kwargs={
76
+ "filepath": data_dir["test"]
77
+ },
78
+ ),
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.VALIDATION,
81
+ gen_kwargs={
82
+ "filepath": data_dir["valid"]
83
+ },
84
+ ),
85
+ ]
86
+
87
+ def _generate_examples(self, filepath, label_filepath=None, strategy=False):
88
+ """Yields examples."""
89
+ with open(filepath, encoding="utf-8") as input_file:
90
+ dataset = json.load(input_file)
91
+ idx = 0
92
+ for meta_data in dataset:
93
+ yield idx, {"question": meta_data["questions"], "comment": meta_data["comment"]}
94
+ idx += 1
95
+
96
+
mhp_full.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a4b7960e0f570f3674587d9f67be5feeb954c20646cf391fc5d616ab1636a8e
3
+ size 15629109
test.json ADDED
The diff for this file is too large to render. See raw diff
 
train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:141e5f4a26618820c8f9249469ddd056d77792a4cd7f210ed0eae75d7eedc08f
3
+ size 12555975
valid.json ADDED
The diff for this file is too large to render. See raw diff