ccdv commited on
Commit
6b99b84
1 Parent(s): aace6eb
Files changed (2) hide show
  1. README.md +46 -0
  2. pubmed-summarization.py +0 -16
README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ languages:
3
+ - en
4
+ multilinguality:
5
+ - monolingual
6
+ size_categories:
7
+ - 100K<n<1M
8
+ task_categories:
9
+ - conditional-text-generation
10
+ task_ids:
11
+ - summarization
12
+ ---
13
+
14
+ # PubMed dataset for summarization
15
+
16
+
17
+ Adapted from this [repo](https://github.com/armancohan/long-summarization).
18
+ Note that original data are pre-tokenized. This dataset returns ' '.join(text).
19
+ This dataset is compatible with the `run_summarization.py` [script](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) from Transformers if you add this line to the `summarization_name_mapping` variable:
20
+ ```python
21
+ "ccdv/pubmed-summarization": ("article", "abstract")
22
+ ```
23
+
24
+ # Cite original article
25
+ ```
26
+ @inproceedings{cohan-etal-2018-discourse,
27
+ title = "A Discourse-Aware Attention Model for Abstractive Summarization of Long Documents",
28
+ author = "Cohan, Arman and
29
+ Dernoncourt, Franck and
30
+ Kim, Doo Soon and
31
+ Bui, Trung and
32
+ Kim, Seokhwan and
33
+ Chang, Walter and
34
+ Goharian, Nazli",
35
+ booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)",
36
+ month = jun,
37
+ year = "2018",
38
+ address = "New Orleans, Louisiana",
39
+ publisher = "Association for Computational Linguistics",
40
+ url = "https://aclanthology.org/N18-2097",
41
+ doi = "10.18653/v1/N18-2097",
42
+ pages = "615--621",
43
+ abstract = "Neural abstractive summarization models have led to promising results in summarizing relatively short documents. We propose the first model for abstractive summarization of single, longer-form documents (e.g., research papers). Our approach consists of a new hierarchical encoder that models the discourse structure of a document, and an attentive discourse-aware decoder to generate the summary. Empirical results on two large-scale datasets of scientific papers show that our model significantly outperforms state-of-the-art models.",
44
+ }
45
+ ```
46
+
pubmed-summarization.py CHANGED
@@ -51,7 +51,6 @@ class PubMedSummarizationConfig(datasets.BuilderConfig):
51
  class PubMedSummarizationDataset(datasets.GeneratorBasedBuilder):
52
  """PubMedSummarization Dataset."""
53
 
54
- _DOWNLOAD_URL = "https://huggingface.co/datasets/ccdv/pubmed-summarization/resolve/main/"
55
  _TRAIN_FILE = "train.zip"
56
  _VAL_FILE = "val.zip"
57
  _TEST_FILE = "test.zip"
@@ -83,25 +82,10 @@ class PubMedSummarizationDataset(datasets.GeneratorBasedBuilder):
83
  )
84
 
85
  def _split_generators(self, dl_manager):
86
- """
87
- train_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._TRAIN_FILE)
88
- val_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._VAL_FILE)
89
- test_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._TEST_FILE)
90
-
91
-
92
- dl_paths = dl_manager.download_and_extract(self._TRAIN_FILE)
93
- train_files = _subset_filenames(dl_paths, datasets.Split.TRAIN)
94
 
95
- dl_paths = dl_manager.download_and_extract(self._VAL_FILE)
96
- train_files = _subset_filenames(dl_paths, datasets.Split.TRAIN)
97
-
98
- dl_paths = dl_manager.download_and_extract(self._TEST_FILE)
99
- train_files = _subset_filenames(dl_paths, datasets.Split.TRAIN)
100
- """
101
  train_path = dl_manager.download_and_extract(self._TRAIN_FILE) + "/train.txt"
102
  val_path = dl_manager.download_and_extract(self._VAL_FILE) + "/val.txt"
103
  test_path = dl_manager.download_and_extract(self._TEST_FILE) + "/test.txt"
104
- print("PATHS\n", train_path, val_path, test_path)
105
 
106
  return [
107
  datasets.SplitGenerator(
 
51
  class PubMedSummarizationDataset(datasets.GeneratorBasedBuilder):
52
  """PubMedSummarization Dataset."""
53
 
 
54
  _TRAIN_FILE = "train.zip"
55
  _VAL_FILE = "val.zip"
56
  _TEST_FILE = "test.zip"
 
82
  )
83
 
84
  def _split_generators(self, dl_manager):
 
 
 
 
 
 
 
 
85
 
 
 
 
 
 
 
86
  train_path = dl_manager.download_and_extract(self._TRAIN_FILE) + "/train.txt"
87
  val_path = dl_manager.download_and_extract(self._VAL_FILE) + "/val.txt"
88
  test_path = dl_manager.download_and_extract(self._TEST_FILE) + "/test.txt"
 
89
 
90
  return [
91
  datasets.SplitGenerator(