parquet-converter commited on
Commit
488efa0
1 Parent(s): 1b319ec

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,29 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- klue-re-v1.1_dev.json filter=lfs diff=lfs merge=lfs -text
29
- klue-re-v1.1_train.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
klue-re-v1.1_dev.json → KLUE Relation Extraction/klue-re-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d150873430e592a5c569d35b20d88fbbbfba751dae0ffa4d8cbfb6b7321dea14
3
- size 5212646
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da3fcb0001c700a21abe2ad7de1176aa624a03a21008c82929f2a734faa88a3d
3
+ size 6652833
klue-re-v1.1_train.json → KLUE Relation Extraction/klue-re-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:10101fa3059b340f9971286dd5639daccb5889324182aed31679f038bca05bc4
3
- size 22316866
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7483fe529658d33e374205c1d399a8f84d46a0b87f26062178e63e96f3b5a70
3
+ size 1543139
klue-re.py DELETED
@@ -1,89 +0,0 @@
1
-
2
- import json
3
- import pandas as pd
4
- import datasets
5
-
6
- logger = datasets.logging.get_logger(__name__)
7
-
8
- _DESCRIPTION = """\
9
- Klue Relation Extraction Data
10
- """
11
-
12
- _URL = "https://huggingface.co/datasets/LeverageX/klue-re/resolve/main/"
13
- _URLS = {
14
- "train_data": _URL + "klue-re-v1.1_train.json",
15
- "validation_data": _URL + "klue-re-v1.1_dev.json",
16
- }
17
-
18
- class KoreanNewspaper(datasets.GeneratorBasedBuilder):
19
-
20
- BUILDER_CONFIGS = [
21
- datasets.BuilderConfig(
22
- name="KLUE Relation Extraction",
23
- version=datasets.Version("1.0.0", ""),
24
- description="For LeverageX Project",
25
- ),
26
- ]
27
-
28
- def _info(self):
29
- return datasets.DatasetInfo(
30
- description=_DESCRIPTION,
31
- features=datasets.Features(
32
- {
33
- "guid": datasets.Value("string"),
34
- "label": datasets.Value("string"),
35
- "object_entity":
36
- {
37
- "word": datasets.Value("string"),
38
- "start_idx": datasets.Value("int32"),
39
- "end_idx": datasets.Value("int32"),
40
- "type": datasets.Value("string"),
41
- },
42
- "sentence": datasets.Value("string"),
43
- "source": datasets.Value("string"),
44
- "subject_entity":
45
- {
46
- "word": datasets.Value("string"),
47
- "start_idx": datasets.Value("int32"),
48
- "end_idx": datasets.Value("int32"),
49
- "type": datasets.Value("string"),
50
- }
51
- }
52
- ),
53
- # No default supervised_keys (as we have to pass both question
54
- # and context as input).
55
- supervised_keys=None,
56
- homepage="https://klue-benchmark.com/tasks/70/overview/description",
57
- )
58
-
59
- def _split_generators(self, dl_manager):
60
- downloaded_files = dl_manager.download_and_extract(_URLS)
61
- return [
62
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train_data"]}),
63
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation_data"]}),
64
- ]
65
-
66
- def _generate_examples(self, filepath):
67
- """This function returns the examples in the raw (text) form."""
68
- logger.info("generating examples from = %s", filepath)
69
- key = 0
70
- with open(filepath, encoding="utf-8") as f :
71
- data = json.load(f)
72
-
73
- for info in data :
74
- guid = info['guid']
75
- label = info['label']
76
- object_entity = info['object_entity']
77
- subject_entity = info['subject_entity']
78
- source = info['source']
79
- sentence = info['sentence']
80
-
81
- yield key, {
82
- "guid" : guid,
83
- "label" : label,
84
- "object_entity" : object_entity,
85
- "subject_entity" : subject_entity,
86
- "source" : source,
87
- "sentence" : sentence,
88
- }
89
- key += 1