jimregan commited on
Commit
1ef6b8f
1 Parent(s): f99e8b0

tweak script

Browse files
Files changed (1) hide show
  1. corpuscrawler-irish.py +14 -14
corpuscrawler-irish.py CHANGED
@@ -50,20 +50,16 @@ _SCRAPES = ["20191117", "20210810"]
50
 
51
 
52
  logger = datasets.utils.logging.get_logger(__name__)
53
- _DATA_URL = 'https://gist.githubusercontent.com/jimregan/66612f4ecb88ed96d41d43266e6d0872/raw/26bd05f11b4c1c31e33d36528ac53dea587be8ef/crawled-{}.txt'
54
 
55
 
56
- class CorpusCrawlerIrishConfig(datasets.BuilderConfig):
57
- """BuilderConfig for CorpusCrawlerIrish."""
58
-
59
- def __init__(self, **kwargs):
60
- super(CorpusCrawlerIrishConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
61
-
62
  class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder):
63
  """Corpus Crawler crawled text dataset."""
64
 
65
  BUILDER_CONFIGS = [
66
- CorpusCrawlerIrishConfig(name=scrape) for scrape in _SCRAPES
 
 
67
  ]
68
 
69
  def _info(self):
@@ -82,21 +78,20 @@ class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder):
82
  )
83
 
84
  def _split_generators(self, dl_manager):
85
- if not self.config.data_dir:
86
- raise ValueError(f"Path to Corpus Crawler cache directory must be specified, but got data_dir={self.config.data_dir}")
87
- cc_cache = self.config.data_dir
88
 
89
  if not self.config.name:
90
  raise ValueError(f"Scrape set must be specified, but got name={self.config.name}")
91
  scrape_set = self.config.name
92
- dl_path = dl_manager.download(_DATA_URL.format(self.config.name))
 
93
 
94
  return [
95
  datasets.SplitGenerator(
96
  name=datasets.Split.TRAIN,
97
  gen_kwargs={
98
  "name": scrape_set,
99
- "data_dir": cc_cache,
100
  "data_file": dl_path,
101
  })
102
  ]
@@ -104,6 +99,7 @@ class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder):
104
  def _generate_examples(self, name, data_dir, data_file):
105
  """Generate examples from a Corpus Crawl cache."""
106
  logger.info("generating examples from = %s", name)
 
107
  links = _get_links(data_file)
108
  if not self.config.data_dir:
109
  self.config.data_dir = data_dir
@@ -114,7 +110,11 @@ class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder):
114
  _id = 1
115
  for link in links:
116
  res = self._fetch_page(link, data_dir)
117
- for para in res['text']:
 
 
 
 
118
  example = {
119
  "genre": res.get('genre', ''),
120
  "url": res['location'],
 
50
 
51
 
52
  logger = datasets.utils.logging.get_logger(__name__)
53
+ _DATA_URL = 'https://huggingface.co/datasets/phonlab-tcd/corpuscrawler-ga/raw/main/crawled-{}.txt'
54
 
55
 
 
 
 
 
 
 
56
  class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder):
57
  """Corpus Crawler crawled text dataset."""
58
 
59
  BUILDER_CONFIGS = [
60
+ datasets.BuilderConfig(name=f"{scrape}_{cfg}")
61
+ for scrape in _SCRAPES
62
+ for cfg in ["documents", "paragraphs"]
63
  ]
64
 
65
  def _info(self):
 
78
  )
79
 
80
  def _split_generators(self, dl_manager):
81
+ manual_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
 
 
82
 
83
  if not self.config.name:
84
  raise ValueError(f"Scrape set must be specified, but got name={self.config.name}")
85
  scrape_set = self.config.name
86
+ sset= self.config.name.split('_')[0]
87
+ dl_path = dl_manager.download(_DATA_URL.format(sset))
88
 
89
  return [
90
  datasets.SplitGenerator(
91
  name=datasets.Split.TRAIN,
92
  gen_kwargs={
93
  "name": scrape_set,
94
+ "data_dir": manual_dir,
95
  "data_file": dl_path,
96
  })
97
  ]
 
99
  def _generate_examples(self, name, data_dir, data_file):
100
  """Generate examples from a Corpus Crawl cache."""
101
  logger.info("generating examples from = %s", name)
102
+ scfg = self.config.name.split('_')[1]
103
  links = _get_links(data_file)
104
  if not self.config.data_dir:
105
  self.config.data_dir = data_dir
 
110
  _id = 1
111
  for link in links:
112
  res = self._fetch_page(link, data_dir)
113
+ if scfg == "documents":
114
+ text = ["\n".join(res['text'])]
115
+ else:
116
+ text = res['text']
117
+ for para in text:
118
  example = {
119
  "genre": res.get('genre', ''),
120
  "url": res['location'],