paniniDot commited on
Commit
27a68ed
1 Parent(s): f1e328c

Delete sci_lay.py

Browse files
Files changed (1) hide show
  1. sci_lay.py +0 -160
sci_lay.py DELETED
@@ -1,160 +0,0 @@
1
- """SciLay Dataset."""
2
-
3
- import gzip
4
- import json
5
- import os
6
-
7
- import datasets
8
- from datasets.download.streaming_download_manager import FilesIterable
9
-
10
- _HOMEPAGE = ""
11
-
12
- _CITATION = """
13
- """
14
-
15
- _DESCRIPTION = """
16
- SCILAY comprises 46,486 instances, each representing a scientific article in the biomedical domain.
17
- Each instance in the dataset includes the following components:
18
- - plain_text: Containing a plain language summary of the scientific article. This section is written in a simple and accessible language, and is intended to be understandable by a wide audience.
19
- - technical_text: This section contains the abstract of the scientific article. It provides a detailed and technical description of the research conducted in the article.
20
- In addition to the textual content, each instance is associated with the following metadata:
21
- - Keywords: Keywords that capture the main topics and themes addressed in the article.
22
- - Journal: The journal in which the article is published, providing context about the source of the research.
23
- - DOI (Digital Object Identifier): A unique identifier for the article, facilitating easy referencing.
24
- The main objective of the SCILAY dataset is to support the development and evaluation of text summarization models that can effectively simplify complex scientific language while retaining the essential information.
25
- """
26
-
27
- _LICENSE = "Creative Commons Attribution 4.0 International"
28
-
29
- _SPLIT_NAMES = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "validation", datasets.Split.TEST: "test"}
30
- _URL = "data/{version}/{split_name}.zip"
31
-
32
- _DOI = "doi"
33
- _PMCID = "pmcid"
34
- _SUMMARY = "plain_text"
35
- _ABSTRACT = "technical_text"
36
- _JOURNAL = "journal"
37
- _TOPICS = "topics"
38
- _KEYWORDS = "keywords"
39
-
40
- _JOURNALS = {
41
- "NC": "Nature Communications",
42
- "A": "Animals: an Open Access Journal from MDPI",
43
- "NIHR": "NIHR Journals Library",
44
- "PLGEN": "PLoS Genetics",
45
- "PLPAT": "PLoS Pathogens",
46
- "PLCB": "PLoS Computational Biology",
47
- "PLNTD": "PLoS Neglected Tropical Diseases",
48
- "B": "Biology",
49
- "I": "Insects",
50
- "EL": "eLife",
51
- "PLB": "PLoS Biology",
52
- "CB": "Communications Biology",
53
- "SD": "Scientific Data",
54
- "MBIO": "mBio",
55
- "C": "Cancers",
56
- "OTHER": "Others"
57
- }
58
-
59
- # Available versions:
60
- # 1.0.0 cased raw strings.
61
-
62
- _VERSION = "1.0.0"
63
-
64
- class SciLayConfig(datasets.BuilderConfig):
65
- """BuilderConfig for SciLay."""
66
-
67
- def __init__(self, journals="all", version=_VERSION, **kwargs):
68
- """BuilderConfig for SciLay.
69
- Args:
70
- journals (str or list, default 'all'): List of journal names. Either 'all' or a combination
71
- of {'NC', 'A', 'NIHR', 'PLGEN', 'PLPAT', 'PLCB', 'PLNTD', 'B', 'I', 'EL', 'PLB', 'CB', 'SD', 'MBIO', 'C', 'OTHER'}.
72
- **kwargs: keyword arguments forwarded to super.
73
- """
74
- if isinstance(journals, str):
75
- journals = [journals]
76
- name = "+".join(journals)
77
- if name == "all":
78
- journals = list(_JOURNALS)
79
- if version != _VERSION:
80
- name = f"{name}-{version}"
81
- super().__init__(name=name, version=version, **kwargs)
82
- self.journals = journals
83
-
84
- class SciLay(datasets.GeneratorBasedBuilder):
85
- """SciLay datasets."""
86
-
87
- BUILDER_CONFIG_CLASS = SciLayConfig
88
- BUILDER_CONFIGS = [
89
- SciLayConfig(
90
- journals="all",
91
- description="Articles from all journals.",
92
- ),
93
- ] + [
94
- SciLayConfig(
95
- journals=k,
96
- description=f"Articles from journals {k}: {v}",
97
- )
98
- for k, v in sorted(_JOURNALS.items())
99
- ]
100
- DEFAULT_CONFIG_NAME = "all"
101
- VERSION = _VERSION
102
-
103
- def _info(self):
104
- return datasets.DatasetInfo(
105
- description=_DESCRIPTION,
106
- features=datasets.Features({
107
- _DOI: datasets.Value("string"),
108
- _PMCID: datasets.Value("string"),
109
- _SUMMARY: datasets.Value("string"),
110
- _ABSTRACT: datasets.Value("string"),
111
- _JOURNAL: datasets.Value("string"),
112
- _TOPICS: datasets.Sequence(datasets.Value("string")),
113
- _KEYWORDS: datasets.Sequence(datasets.Value("string"))
114
- }),
115
- supervised_keys=(_ABSTRACT, _SUMMARY),
116
- homepage=_HOMEPAGE,
117
- license=_LICENSE,
118
- citation=_CITATION,
119
- )
120
-
121
- def _split_generators(self, dl_manager):
122
- """Returns SplitGenerators."""
123
- urls = {
124
- split: _URL.format(version=self.config.version, split_name=split_name)
125
- for split, split_name in _SPLIT_NAMES.items()
126
- }
127
- dl_paths = dl_manager.download_and_extract(urls)
128
- paths = {
129
- split: [
130
- dl_manager.iter_files(os.path.join(dl_paths[split], split_name, code)) for code in self.config.journals
131
- ]
132
- for split, split_name in _SPLIT_NAMES.items()
133
- }
134
- raise Exception(f"paths creati = {[file_path.__repr__ for file_path in paths]}")
135
- return [
136
- datasets.SplitGenerator(
137
- name=split,
138
- gen_kwargs={"paths": paths[split]},
139
- )
140
- for split in _SPLIT_NAMES
141
- ]
142
-
143
- def _generate_examples(self, paths=None):
144
- """Yields examples."""
145
- for paths_per_journal in paths:
146
- for path in paths_per_journal:
147
- with open(path, "rb") as fin:
148
- fin = gzip.GzipFile(fileobj=fin)
149
- for row in fin:
150
- json_obj = json.loads(row)
151
- yield json_obj["doi"], {
152
- _DOI: json_obj[_DOI],
153
- _PMCID: json_obj[_PMCID],
154
- _SUMMARY: json_obj[_SUMMARY],
155
- _ABSTRACT: json_obj[_ABSTRACT],
156
- _JOURNAL: json_obj[_JOURNAL],
157
- _TOPICS: json_obj[_TOPICS],
158
- _KEYWORDS: json_obj[_KEYWORDS]
159
- }
160
-