parquet-converter
commited on
Commit
•
5d95c4b
1
Parent(s):
b317c61
Update parquet files
Browse files
train_data.json → Aihub Book Summarization/book-summarization-train.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:337bd391945326007eff2a17a2930edb91e31d4b8e2fbdfcc283c88cf4311480
|
3 |
+
size 140851508
|
validation_data.json → Aihub Book Summarization/book-summarization-validation.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e52c5a964e9a89218b08bd90677f82c46b97f2871ad54b605ffed9bf36aad9b
|
3 |
+
size 17242895
|
book-summarization.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
|
2 |
-
import json
|
3 |
-
import pandas as pd
|
4 |
-
import datasets
|
5 |
-
|
6 |
-
logger = datasets.logging.get_logger(__name__)
|
7 |
-
|
8 |
-
_DESCRIPTION = """\
|
9 |
-
Korean Book Summarization Data
|
10 |
-
"""
|
11 |
-
|
12 |
-
_URL = "https://huggingface.co/datasets/LeverageX/book-summarization/resolve/main/"
|
13 |
-
_URLS = {
|
14 |
-
"train_data": _URL + "train_data.json",
|
15 |
-
"validation_data": _URL + "validation_data.json",
|
16 |
-
}
|
17 |
-
|
18 |
-
class KoreanNewspaper(datasets.GeneratorBasedBuilder):
|
19 |
-
|
20 |
-
BUILDER_CONFIGS = [
|
21 |
-
datasets.BuilderConfig(
|
22 |
-
name="Aihub Book Summarization",
|
23 |
-
version=datasets.Version("1.0.0", ""),
|
24 |
-
description="Korean Summarization Data",
|
25 |
-
),
|
26 |
-
]
|
27 |
-
|
28 |
-
def _info(self):
|
29 |
-
return datasets.DatasetInfo(
|
30 |
-
description=_DESCRIPTION,
|
31 |
-
features=datasets.Features(
|
32 |
-
{
|
33 |
-
"id": datasets.Value("string"),
|
34 |
-
"name": datasets.Value("string"),
|
35 |
-
"publisher": datasets.Value("string"),
|
36 |
-
"passage": datasets.Value("string"),
|
37 |
-
"summary": datasets.Value("string"),
|
38 |
-
}
|
39 |
-
),
|
40 |
-
# No default supervised_keys (as we have to pass both question
|
41 |
-
# and context as input).
|
42 |
-
supervised_keys=None,
|
43 |
-
homepage="https://aihub.or.kr/aidata/30713",
|
44 |
-
)
|
45 |
-
|
46 |
-
def _split_generators(self, dl_manager):
|
47 |
-
downloaded_files = dl_manager.download_and_extract(_URLS)
|
48 |
-
return [
|
49 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train_data"]}),
|
50 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation_data"]}),
|
51 |
-
]
|
52 |
-
|
53 |
-
def _generate_examples(self, filepath):
|
54 |
-
"""This function returns the examples in the raw (text) form."""
|
55 |
-
logger.info("generating examples from = %s", filepath)
|
56 |
-
key = 0
|
57 |
-
with open(filepath, encoding="utf-8") as f :
|
58 |
-
data = json.load(f)
|
59 |
-
|
60 |
-
for info in data :
|
61 |
-
doc_id = info['id']
|
62 |
-
doc_name = info['name']
|
63 |
-
publisher = info['publisher']
|
64 |
-
passage = info['passage']
|
65 |
-
summary = info['summary']
|
66 |
-
|
67 |
-
yield key, {
|
68 |
-
"id" : doc_id,
|
69 |
-
"name" : doc_name,
|
70 |
-
"publisher" : publisher,
|
71 |
-
"passage" : passage,
|
72 |
-
"summary" : summary,
|
73 |
-
}
|
74 |
-
key += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|