sh110495 commited on
Commit
b33835e
·
1 Parent(s): 27b0b75

upload data

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitattributes CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ klue-mrc-v1.1_dev.json filter=lfs diff=lfs merge=lfs -text
29
+ klue-mrc-v1.1_train.json filter=lfs diff=lfs merge=lfs -text
klue-mrc-v1.1_dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b31bb073e47cdeb19f2f1bae03d916ecabf945140334c75842d6994f700d4f47
3
+ size 18712914
klue-mrc-v1.1_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c52c7b82a6015c09ea8be1fea49c912e73012d8b44d3653256d91c65d519c3df
3
+ size 47952737
klue-mrc.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import json
3
+ import pandas as pd
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _DESCRIPTION = """\
9
+ Klue Machine Reading Comprehension Data
10
+ """
11
+
12
+ _URL = "https://huggingface.co/datasets/LeverageX/klue-mrc/resolve/main/"
13
+ _URLS = {
14
+ "train_data": _URL + "klue-mrc-v1.1_train.json",
15
+ "validation_data": _URL + "klue-mrc-v1.1_dev.json",
16
+ }
17
+
18
+ class KoreanNewspaper(datasets.GeneratorBasedBuilder):
19
+
20
+ BUILDER_CONFIGS = [
21
+ datasets.BuilderConfig(
22
+ name="KLUE Machine Reading Comprehension",
23
+ version=datasets.Version("1.0.0", ""),
24
+ description="For LeverageX Project",
25
+ ),
26
+ ]
27
+
28
+ def _info(self):
29
+ return datasets.DatasetInfo(
30
+ description=_DESCRIPTION,
31
+ features=datasets.Features(
32
+ {
33
+ "title": datasets.Value("string"),
34
+ "source": datasets.Value("string"),
35
+ "news_category": datasets.Value("string"),
36
+ "paragraphs": datasets.Sequence(dict)
37
+ }
38
+ ),
39
+ # No default supervised_keys (as we have to pass both question
40
+ # and context as input).
41
+ supervised_keys=None,
42
+ homepage="https://klue-benchmark.com/tasks/70/overview/description",
43
+ )
44
+
45
+ def _split_generators(self, dl_manager):
46
+ downloaded_files = dl_manager.download_and_extract(_URLS)
47
+ return [
48
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train_data"]}),
49
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation_data"]}),
50
+ ]
51
+
52
+ def _generate_examples(self, filepath):
53
+ """This function returns the examples in the raw (text) form."""
54
+ logger.info("generating examples from = %s", filepath)
55
+ key = 0
56
+ with open(filepath, encoding="utf-8") as f :
57
+ data = json.load(f)
58
+
59
+ data = data['data']
60
+
61
+ for info in data :
62
+ guid = info['title']
63
+ news_category = info['news_category']
64
+ source = info['source']
65
+ paragraphs = info['paragraphs']
66
+
67
+ context = paragraphs[0]['context']
68
+
69
+ qas = paragraphs[0]['qas'][0]
70
+ question = qas['question']
71
+ answer = qas['answers'][0]
72
+
73
+ answer_text = answer['text']
74
+ answer_start = answer['answer_start']
75
+ guid = qas['guid']
76
+
77
+ print({
78
+ "guid" : guid,
79
+ "news_category" : news_category,
80
+ "source" : source,
81
+ "context" : context,
82
+ "question" : question,
83
+ "answer_text" : answer_text,
84
+ "answer_start" : answer_start,
85
+ })
86
+
87
+ yield key, {
88
+ "guid" : guid,
89
+ "news_category" : news_category,
90
+ "source" : source,
91
+ "context" : context,
92
+ "question" : question,
93
+ "answer_text" : answer_text,
94
+ "answer_start" : answer_start,
95
+ }
96
+ key += 1