arka0821 commited on
Commit
f2db6c2
·
1 Parent(s): 4bed9e3

Update multi_document_summarization.py

Browse files
Files changed (1) hide show
  1. multi_document_summarization.py +4 -5
multi_document_summarization.py CHANGED
@@ -35,9 +35,9 @@ _DESCRIPTION = """
35
  Multi-Document, a large-scale multi-document summarization dataset created from scientific articles. Multi-Document introduces a challenging multi-document summarization task: writing the related-work section of a paper based on its abstract and the articles it references.
36
  """
37
 
38
- _URL_TRAIN = "https://github.com/arka0821/multi_document_summarization/blob/master/data/train.json"
39
- _URL_TEST = "https://github.com/arka0821/multi_document_summarization/blob/master/data/test.json"
40
- _URL_VAL = "https://github.com/arka0821/multi_document_summarization/blob/master/data/val.json"
41
 
42
 
43
  class MultiDocumentSum(datasets.GeneratorBasedBuilder):
@@ -87,9 +87,8 @@ class MultiDocumentSum(datasets.GeneratorBasedBuilder):
87
  ]
88
  def _generate_examples(self, path=None):
89
  """Yields examples."""
90
- print("*********************" + path.split("/")[-1])
91
  with open(path, encoding="utf-8") as f:
92
- data = json.load(f)
93
  f.close()
94
 
95
  for idx, el in enumerate(data):
 
35
  Multi-Document, a large-scale multi-document summarization dataset created from scientific articles. Multi-Document introduces a challenging multi-document summarization task: writing the related-work section of a paper based on its abstract and the articles it references.
36
  """
37
 
38
+ _URL_TRAIN = "https://github.com/arka0821/multi_document_summarization/blob/master/data/train.txt"
39
+ _URL_TEST = "https://github.com/arka0821/multi_document_summarization/blob/master/data/test.txt"
40
+ _URL_VAL = "https://github.com/arka0821/multi_document_summarization/blob/master/data/val.txt"
41
 
42
 
43
  class MultiDocumentSum(datasets.GeneratorBasedBuilder):
 
87
  ]
88
  def _generate_examples(self, path=None):
89
  """Yields examples."""
 
90
  with open(path, encoding="utf-8") as f:
91
+ data = f.readlines()#json.load(f)
92
  f.close()
93
 
94
  for idx, el in enumerate(data):