File size: 2,546 Bytes
ffa90f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116aa8a
 
 
 
 
ffa90f5
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import json
import pandas as pd
import datasets
import csv
from datasets.tasks import Summarization

logger = datasets.logging.get_logger(__name__)


_DESCRIPTION = """\
Aihub Document summarization data
"""
_URL = "https://huggingface.co/datasets/metamong1/summarization_optimization/resolve/main/"
_URLS = {
    "train_data": _URL + "train_data.csv",
    "validation_data": _URL + "validation_data.csv",
}

class SummarizationOptimization(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="Summarization Part Data",
            version=datasets.Version("1.0.0", ""),
            description="Text Summarization & Generation Title for optimization",
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "doc_id": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "doc_type": datasets.Value("string"),
                    "file": datasets.Value("string"),
                }
            ),
            # No default supervised_keys (as we have to pass both question
            # and context as input).
            supervised_keys=None,
            homepage="https://huggingface.co/datasets/metamong1/summarization_optimization",
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download_and_extract(_URLS)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train_data"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation_data"]}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        with open(filepath, newline='', encoding="utf-8") as csvfile:
            reader = csv.reader(csvfile, delimiter=",")
            feature_name = next(reader)

            idx = 0
            for row in reader:
                features = {
                    "doc_id" : row[1],
                    "title" : row[2],
                    "text" : row[3],
                    "doc_type" : row[4],
                    "file" : row[5],
                }
                
                yield idx, features
                idx += 1