jimregan commited on
Commit
f99e8b0
1 Parent(s): 22049dc

in progress version of script

Browse files
Files changed (1) hide show
  1. corpuscrawler-irish.py +615 -0
corpuscrawler-irish.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ # Copyright 2021 Phonetics and Speech Laboratory, Trinity College, Dublin
4
+ #
5
+ # Based on Corpus Crawler (utils.py):
6
+ # Copyright 2017 Google Inc. All rights reserved.
7
+ #
8
+ # Based on Corpus Crawler's Irish crawler (crawl_ga.py):
9
+ # Copyright 2017 Google Inc. All rights reserved.
10
+ # Copyright 2017 Jim O'Regan
11
+ #
12
+ # Licensed under the Apache License, Version 2.0 (the "License");
13
+ # you may not use this file except in compliance with the License.
14
+ # You may obtain a copy of the License at
15
+ #
16
+ # http://www.apache.org/licenses/LICENSE-2.0
17
+ #
18
+ # Unless required by applicable law or agreed to in writing, software
19
+ # distributed under the License is distributed on an "AS IS" BASIS,
20
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ # See the License for the specific language governing permissions and
22
+ # limitations under the License.
23
+ # Lint as: python3
24
+ """Corpus Crawler Irish web text dataset."""
25
+
26
+ import collections
27
+ import os
28
+ import re
29
+ import struct
30
+ import unicodedata
31
+ import base64
32
+ import hashlib
33
+ from html.entities import name2codepoint
34
+ from email import message_from_string as Message
35
+ from urllib.parse import urlparse
36
+ from pathlib import Path
37
+
38
+ import datasets
39
+
40
+
41
+ _DESCRIPTION = """\
42
+ Irish web corpus, crawled with Corpus Crawler.
43
+
44
+ Uses a list of URLs, collected by the crawler, to
45
+ retrieve the files from the crawler's cache.
46
+ """
47
+
48
+ #_SCRAPES = ["20180911", "20191117", "20210810"]
49
+ _SCRAPES = ["20191117", "20210810"]
50
+
51
+
52
+ logger = datasets.utils.logging.get_logger(__name__)
53
+ _DATA_URL = 'https://gist.githubusercontent.com/jimregan/66612f4ecb88ed96d41d43266e6d0872/raw/26bd05f11b4c1c31e33d36528ac53dea587be8ef/crawled-{}.txt'
54
+
55
+
56
+ class CorpusCrawlerIrishConfig(datasets.BuilderConfig):
57
+ """BuilderConfig for CorpusCrawlerIrish."""
58
+
59
+ def __init__(self, **kwargs):
60
+ super(CorpusCrawlerIrishConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
61
+
62
+ class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder):
63
+ """Corpus Crawler crawled text dataset."""
64
+
65
+ BUILDER_CONFIGS = [
66
+ CorpusCrawlerIrishConfig(name=scrape) for scrape in _SCRAPES
67
+ ]
68
+
69
+ def _info(self):
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=datasets.Features(
73
+ {
74
+ "url": datasets.Value("string"),
75
+ "genre": datasets.Value("string"),
76
+ "publication_date": datasets.Value("string"),
77
+ "title": datasets.Value("string"),
78
+ "text": datasets.Value("string"),
79
+ "video_url": datasets.Value("string"),
80
+ }
81
+ ),
82
+ )
83
+
84
+ def _split_generators(self, dl_manager):
85
+ if not self.config.data_dir:
86
+ raise ValueError(f"Path to Corpus Crawler cache directory must be specified, but got data_dir={self.config.data_dir}")
87
+ cc_cache = self.config.data_dir
88
+
89
+ if not self.config.name:
90
+ raise ValueError(f"Scrape set must be specified, but got name={self.config.name}")
91
+ scrape_set = self.config.name
92
+ dl_path = dl_manager.download(_DATA_URL.format(self.config.name))
93
+
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ gen_kwargs={
98
+ "name": scrape_set,
99
+ "data_dir": cc_cache,
100
+ "data_file": dl_path,
101
+ })
102
+ ]
103
+
104
+ def _generate_examples(self, name, data_dir, data_file):
105
+ """Generate examples from a Corpus Crawl cache."""
106
+ logger.info("generating examples from = %s", name)
107
+ links = _get_links(data_file)
108
+ if not self.config.data_dir:
109
+ self.config.data_dir = data_dir
110
+ dd_path = Path(data_dir)
111
+ if not dd_path.is_dir():
112
+ raise Exception('No directory: ' + data_dir)
113
+
114
+ _id = 1
115
+ for link in links:
116
+ res = self._fetch_page(link, data_dir)
117
+ for para in res['text']:
118
+ example = {
119
+ "genre": res.get('genre', ''),
120
+ "url": res['location'],
121
+ "publication_date": res.get('publication-date', ''),
122
+ "video_url": res.get('video', ''),
123
+ "title": res.get('title', ''),
124
+ "text": para
125
+ }
126
+ yield _id, example
127
+ _id += 1
128
+
129
+ def _fetch_page(self, url, data_dir):
130
+ _EXTRATORS = {
131
+ 'www.unicode.org': do_udhr,
132
+ 'tuairisc.ie': do_tuairisc_ie,
133
+ 'www.rte.ie': do_nuachtrte,
134
+ 'www.irishtimes.com': do_irishtimes,
135
+ 'www.chg.gov.ie': do_chg,
136
+ 'www.ainm.ie': do_ainm_ie,
137
+ 'gaeltacht21.blogspot.com': do_blogspot,
138
+ 'aonghus.blogspot.com': do_blogspot,
139
+ 'nimill.blogspot.com': do_blogspot,
140
+ 'turasailse.blogspot.com': do_blogspot,
141
+ 'caomhach.blogspot.com': do_blogspot,
142
+ 'breacleabhar.blogspot.com': do_blogspot,
143
+ 'gearoid.blogspot.com': do_blogspot,
144
+ 'philo-celtic.blogspot.com': do_blogspot,
145
+ 'iomhannablag.blogspot.com': do_blogspot,
146
+ 'smaointefanacha.blogspot.com': do_blogspot,
147
+ 'imeall.blogspot.com': do_blogspot,
148
+ 'coislife.ie': do_coislife_ie,
149
+ 'meoneile.ie': do_meoneile_ie,
150
+ 'peig.ie': do_peig_ie,
151
+ 'www.forasnagaeilge.ie': do_forasnagaeilge_ie,
152
+ }
153
+ parsed_url = urlparse(url)
154
+ host = parsed_url.netloc
155
+ extract = _EXTRATORS.get(host)
156
+ if extract:
157
+ fr = fetch(data_dir, url)
158
+ if fr is None:
159
+ raise Exception("Failed to fetch " + url + " from " + data_dir)
160
+ return extract(fr)
161
+
162
+
163
+ # Corpus Crawler: utils.py
164
+ _TAG_REGEX = re.compile(r'\<.+?\>', flags=re.DOTALL)
165
+ def striptags(s):
166
+ return _TAG_REGEX.sub('', s)
167
+
168
+
169
+ def unichar(i):
170
+ try:
171
+ return chr(i)
172
+ except ValueError:
173
+ # non-BMP codepoint in narrow Python build
174
+ return struct.pack('i', i).decode('utf-32')
175
+
176
+
177
+ def replace_html_entities(html):
178
+ entities = name2codepoint
179
+ html = re.sub(r'&#([0-9]+);',
180
+ lambda z:unichar(int(z.group(1))), html)
181
+ html = re.sub(r'&#[xX]([0-9a-fA-F]+);',
182
+ lambda z:unichar(int(z.group(1), 16)), html)
183
+ html = re.sub(r'&([a-zA-Z]+);',
184
+ lambda z:unichar(entities.get(z.group(1).lower(), 0x20)), html)
185
+ return html
186
+
187
+
188
+ def cleantext(html):
189
+ html = re.sub(r'<script.+?</script>', ' ', html, flags=re.DOTALL)
190
+ html = replace_html_entities(striptags(html))
191
+ # Some web sites insert zero-width spaces, possibly as byte order marks
192
+ # (from Microsoft Notepad) which their scripts failed to recognize as such.
193
+ html = html.replace('\u200B', '')
194
+ return unicodedata.normalize('NFC', ' '.join(html.split()))
195
+
196
+
197
+ def clean_paragraphs(html):
198
+ text = html.replace('\n', ' ')
199
+ text = re.sub(r'</(?:div|DIV|p|P|[hH][1-6]|table|TABLE|tr|td|article)>',
200
+ '\n', text)
201
+ text = re.sub(r'<(?:br|BR)\s*/?>', '\n', text)
202
+ return list(filter(None, [cleantext(p) for p in text.split('\n')]))
203
+
204
+
205
+ def extract(before, after, html):
206
+ s = html.split(before, 1)
207
+ return s[1].split(after)[0] if len(s) == 2 else None
208
+
209
+
210
+ FetchResult = collections.namedtuple('FetchResult',
211
+ ['headers', 'content', 'url', 'filepath'])
212
+
213
+
214
+ def fetch(cache_dir, url):
215
+ logger.info("fetching url %s from cache %s", url, cache_dir)
216
+ try:
217
+ digest = hashlib.sha256(url.encode('utf-8')).digest()
218
+ filepath = os.path.join(cache_dir,
219
+ "f" + base64.urlsafe_b64encode(digest).decode('utf-8'))
220
+ except:
221
+ digest = hashlib.sha256(url).digest()
222
+ filepath = os.path.join(cache_dir,
223
+ "f" + base64.urlsafe_b64encode(digest))
224
+
225
+ fp = Path(filepath)
226
+ if not fp.is_file():
227
+ raise Exception("No such file: " + fp)
228
+
229
+ try:
230
+ with open(filepath, 'r', encoding='utf-8-sig', newline='') as f:
231
+ file_content = f.read()
232
+ if '\r\n\r\n\r\n' in file_content:
233
+ splitter = '\r\n\r\n\r\n'
234
+ else:
235
+ splitter = '\n\n\n'
236
+ cached = file_content.split(splitter, 1)
237
+ if len(cached) == 2:
238
+ headers, content = cached
239
+ try:
240
+ content = content.encode('utf-8')
241
+ except:
242
+ # already encoded as bytes
243
+ pass
244
+ headers = Message(headers)
245
+ return FetchResult(headers, content, url, filepath)
246
+ except IOError:
247
+ raise Exception("fetch() failed")
248
+
249
+ def do_udhr(fetchresult):
250
+ out = {}
251
+ text = fetchresult.content.decode('utf-8').split('---', 1)[1]
252
+ out['location'] = fetchresult.url
253
+ out['genre'] = 'Legal'
254
+ paras = []
255
+ for paragraph in text.splitlines():
256
+ paragraph = paragraph.strip()
257
+ if len(paragraph) > 0:
258
+ paras.append(paragraph)
259
+ out['text'] = paras
260
+ return out
261
+
262
+
263
+ # corpuscrawler: crawl_ga.py
264
+ _ENGLISH_MONTHS = {
265
+ 'january': 1,
266
+ 'february': 2,
267
+ 'march': 3,
268
+ 'april': 4,
269
+ 'may': 5,
270
+ 'june': 6,
271
+ 'july': 7,
272
+ 'august': 8,
273
+ 'september': 9,
274
+ 'october': 10,
275
+ 'november': 11,
276
+ 'december': 12
277
+ }
278
+
279
+
280
+ def _byline_to_pubdate(byline):
281
+ date = re.search(r'(\d{1,2}) ([^ ]+?) (\d{4})', byline)
282
+ if not date:
283
+ return None
284
+ day = int(date.group(1))
285
+ year = int(date.group(3))
286
+ month = _ENGLISH_MONTHS[date.group(2).lower()]
287
+ if not month:
288
+ return None
289
+ out = "{}-{:0>2d}-{:0>2d}".format(year, month, day)
290
+ return out
291
+
292
+
293
+ def _rte_writable_paragraph(text):
294
+ if text == '':
295
+ return False
296
+ if text.startswith('© RTÉ '):
297
+ return False
298
+ if text.startswith('By using this website, you consent'):
299
+ return False
300
+ if text.startswith('RTÉ.ie is the website of Raidió Teilifís Éireann'):
301
+ return False
302
+ if text.find('is not responsible for the content') >= 0:
303
+ return False
304
+ if text.find('RTÉ uses cookies in accordance with our Cookie Policy') >= 0:
305
+ return False
306
+ if re.match('^[\*\+]+$', text):
307
+ return False
308
+ return True
309
+
310
+
311
+ def _rte_cleanall(html):
312
+ section_article_regex = re.compile(r'<section[^>]+itemprop="articleBody"[^>]*>')
313
+ search = section_article_regex.search(html)
314
+ out = []
315
+ if search:
316
+ body = extract(search.group(0), '</section>', html)
317
+ for para in clean_paragraphs(body):
318
+ if _rte_writable_paragraph(para):
319
+ out.append(para)
320
+ return '\n'.join(out)
321
+ for paragraph in re.findall(r'<p>(.+?)</p>', html):
322
+ cleaned = cleantext(paragraph)
323
+ if _rte_writable_paragraph(cleaned):
324
+ out.append(cleaned)
325
+ else:
326
+ continue
327
+ return out
328
+
329
+
330
+ def _sceala_clean(paras):
331
+ out = []
332
+ for para in paras:
333
+ if '\n____' not in para:
334
+ out.append(para)
335
+ else:
336
+ out.append(para.split('\n____')[0])
337
+ break
338
+ return out
339
+
340
+
341
+ def do_nuachtrte(fetchresult):
342
+ out = {}
343
+ pubdate_regex = re.compile(r'name="DC.date" (?:scheme="DCTERMS.URI" )?content="([0-9T:+\-]{19,25})"')
344
+ html = fetchresult.content.decode('utf-8')
345
+ pubdate_match = pubdate_regex.search(html)
346
+ pubdate = pubdate_match.group(1) if pubdate_match else None
347
+ if pubdate is None: pubdate = fetchresult.headers.get('Last-Modified')
348
+ out['location'] = fetchresult.url
349
+ if 'nuacht' in fetchresult.url:
350
+ out['genre'] = 'News'
351
+ if pubdate:
352
+ out['publication-date'] = pubdate
353
+ title = re.search(r'<title>(.+?)</title>', html)
354
+ if title:
355
+ title = striptags(title.group(1).split('- RTÉ')[0]).strip()
356
+ if title:
357
+ out['title'] = cleantext(title)
358
+ cleaned = _rte_cleanall(html)
359
+ if '/sceala/' in fetchresult.url:
360
+ cleaned = _sceala_clean(cleaned)
361
+ out['text'] = cleaned
362
+ return out
363
+
364
+
365
+ def do_meoneile_ie(fetchresult):
366
+ out = {}
367
+ html = fetchresult.content.decode('utf-8')
368
+ title = extract(r'<title>', '</title>', html).strip()
369
+ title = title.split('&lt;')[0].strip() if title else ''
370
+ video = re.search(r"<iframe.*src='(//player.vimeo.com/video/[0-9]+)[^>]*></iframe>", html)
371
+ body = extract("<div class='article-content'>", '</article>', html) or ''
372
+ byline = extract("<div class='byline'>", '</span>', html) or ''
373
+ byline = _byline_to_pubdate(byline)
374
+ if title:
375
+ out['title'] = title
376
+ paras = clean_paragraphs(body)
377
+ if paras:
378
+ out['location'] = fetchresult.url
379
+ out['genre'] = 'News'
380
+ if video:
381
+ out['video'] = f'https:{video.group(1)}'
382
+ if byline:
383
+ out['publication-date'] = byline
384
+ paras_filt = []
385
+ for para in paras:
386
+ if para == 'Roinn':
387
+ continue
388
+ else:
389
+ paras_filt.append(para)
390
+ out['text'] = paras_filt
391
+ return out
392
+
393
+
394
+ def do_irishtimes(fetchresult):
395
+ out = {}
396
+ html = fetchresult.content.decode('utf-8')
397
+ pubdatere1 = re.compile(r'<meta itemprop="datePublished" content="([^"]*)"/>')
398
+ pubdatere2 = re.compile(r'"datePublished": "([^"])"')
399
+ out['location'] = fetchresult.url
400
+ out['genre'] = 'News'
401
+ title = re.search(r'<title>(.+?)</title>', html)
402
+ pubdate_match = pubdatere1.search(html)
403
+ pubdate_match = pubdate_match if pubdate_match else pubdatere2.search(html)
404
+ pubdate = pubdate_match.group(1) if pubdate_match else None
405
+ if pubdate is None:
406
+ pubdate = fetchresult.headers.get('Last-Modified')
407
+ if pubdate:
408
+ out['publication-date'] = pubdate
409
+ if title:
410
+ out['title'] = cleantext(title.group(1))
411
+ paras = []
412
+ for paragraph in re.findall(
413
+ r'<p class="no_name">(.+?)</p>',
414
+ html.split('<div class="article_bodycopy">')[1]):
415
+ cleaned = cleantext(paragraph)
416
+ paras.append(cleaned)
417
+ out['text'] = paras
418
+
419
+
420
+ def do_blogspot(fetchresult):
421
+ out = {}
422
+ pubdate_regex = re.compile(
423
+ r"<abbr class='published' title='([^']*)'>[^<]*</abbr>")
424
+ html = fetchresult.content.decode('utf-8')
425
+ pubdate_match = pubdate_regex.search(html)
426
+ pubdate = pubdate_match.group(1) if pubdate_match else None
427
+ if pubdate is None: pubdate = fetchresult.headers.get('Last-Modified')
428
+ title = re.search(r"<meta content='([^']+)' property='og:title'/>",
429
+ html)
430
+ title = title.group(1) if title else ''
431
+ post = extract("<div class='post-body entry-content'>",
432
+ "<div class='post-footer'>", html)
433
+ if post == None:
434
+ post = extract("<div class='post-header'>",
435
+ "<div class='post-footer'>", html)
436
+ if post == None:
437
+ post = extract('<div class="post-body">',
438
+ '<p class="post-footer">', html)
439
+ paras = clean_paragraphs(post)
440
+ if paras:
441
+ out['title'] = title
442
+ out['location'] = fetchresult.url
443
+ out['genre'] = 'News'
444
+ if pubdate:
445
+ out['publication-date'] = pubdate
446
+ out['text'] = paras
447
+ return out
448
+
449
+
450
+ def do_ainm_ie(fetchresult):
451
+ out = {}
452
+ html = fetchresult.content.decode('utf-8')
453
+ title = re.search(r'<title>(.+?)</title>', html)
454
+ title = title.group(1).split('|')[0] if title else ''
455
+ body = extract('<div class="article">',
456
+ '<!-- .contentWrapper-->', html) or ''
457
+ body = body.split('<div id="machines"')[0]
458
+ paras = clean_paragraphs(body)
459
+ pubdate = fetchresult.headers.get('Last-Modified')
460
+ if paras:
461
+ out['title'] = title
462
+ out['location'] = fetchresult.url
463
+ out['genre'] = 'Biography'
464
+ if pubdate:
465
+ out['publication-date'] = pubdate
466
+ out['text'] = paras
467
+ return out
468
+
469
+
470
+ def do_tuairisc_ie(fetchresult):
471
+ out = {}
472
+ pubdate_regex = re.compile(
473
+ r'<time datetime="(20\d\d-\d\d-\d\d)\s+(\d\d:\d\d)" '
474
+ r'itemprop="datePublished">')
475
+ html = fetchresult.content.decode('utf-8')
476
+ title = extract('<h1 class="title article--full__title">', '</h1>',
477
+ html) or ''
478
+ pubdate_match = pubdate_regex.search(html)
479
+ if pubdate_match:
480
+ pubdate = '%sT%s:00Z' % (
481
+ pubdate_match.group(1), pubdate_match.group(2))
482
+ body = extract(
483
+ '<div class="article--full__content" itemprop="articleBody">',
484
+ '</article>', html)
485
+ if body:
486
+ paras = clean_paragraphs(body)
487
+ if paras:
488
+ out['title'] = title
489
+ out['location'] = fetchresult.url
490
+ out['genre'] = 'News'
491
+ if pubdate:
492
+ out['publication-date'] = pubdate
493
+ out['text'] = paras
494
+ return out
495
+
496
+
497
+ def do_coislife_ie(fetchresult):
498
+ out = {}
499
+ html = fetchresult.content.decode('utf-8')
500
+ title = re.search(r'<title>(.+?)</title>', html)
501
+ title = title.group(1).split('&#8211;')[0].strip() if title else ''
502
+ desc = re.search(r'<meta property="og:description" content="([^"]+?)"', html)
503
+ desc = cleantext(desc.group(1))
504
+ body = extract('<div class="tab-content">',
505
+ '<div class="entry-content in fade tab-pane" id="tab-additional_information">', html) or ''
506
+ paras = clean_paragraphs(title + '<br/>' + body)
507
+ pubdate = fetchresult.headers.get('Last-Modified')
508
+ if paras:
509
+ out['title'] = title
510
+ out['location'] = fetchresult.url
511
+ out['genre'] = 'Commerce'
512
+ if desc:
513
+ out['description'] = desc
514
+ if pubdate:
515
+ out['publication-date'] = pubdate
516
+ outp = []
517
+ for para in paras:
518
+ if para.find('Léigh sliocht as an leabhar') >= 0:
519
+ continue
520
+ else:
521
+ outp.append(para)
522
+ out['text'] = outp
523
+ return out
524
+
525
+
526
+ def do_chg(fetchresult):
527
+ out = {}
528
+ def _chg_content(page):
529
+ return page.split('<div class="container" id="article">')[1].split('<!-- /.right columns -->')[0]
530
+ phtml = fetchresult.content.decode('utf-8')
531
+ ptext = _chg_content(phtml)
532
+ title = re.search(r'<title>(.+?)</title>', phtml)
533
+ if title: title = striptags(title.group(1).split('|')[0]).strip()
534
+ pubdate = fetchresult.headers.get('Last-Modified')
535
+ out['location'] = fetchresult.url
536
+ out['genre'] = 'Government'
537
+ if pubdate:
538
+ out['publication-date'] = pubdate
539
+ if title:
540
+ out['title'] = title
541
+ paras = []
542
+ for paragraph in re.findall(r'<p>(.+?)</p>', ptext):
543
+ cleaned = cleantext(paragraph)
544
+ paras.append(cleaned)
545
+ out['text'] = paras
546
+ return out
547
+
548
+
549
+ def do_peig_ie(fetchresult):
550
+ out = {}
551
+ def peig_cat(page):
552
+ if page.find('/imeachtai/') >= 0:
553
+ return 'Events'
554
+ elif page.find('peig.ie/20') >= 0:
555
+ return 'News'
556
+ elif page.find('/fol%C3%BAntais/') >= 0:
557
+ return 'Job listings'
558
+ else:
559
+ return ''
560
+ # Peig.ie has a lot of posts from other sites
561
+ html = fetchresult.content.decode('utf-8')
562
+ title = re.search(r'<title>(.+?)</title>', html)
563
+ title = title.group(1).split('|')[0].strip() if title else ''
564
+ if '<meta property="article:modified_time"' in html:
565
+ date = re.search(r'<meta property="article:modified_time" content="([^"]+)"', html).group(1)
566
+ else:
567
+ date = re.search(r'"datePublished":"([^"]+)"', html).group(1)
568
+ body = extract('<div class="uk-margin-medium-top" property="text">', '<ul class="uk-pagination', html) or ''
569
+ paras = clean_paragraphs(body)
570
+ genre = peig_cat(fetchresult.url)
571
+ if paras:
572
+ out['location'] = fetchresult.url
573
+ if title:
574
+ out['title'] = title
575
+ if genre:
576
+ out['genre'] = genre
577
+ if date:
578
+ out['publication-date'] = date
579
+ out['text'] = paras
580
+ return out
581
+
582
+
583
+ def do_forasnagaeilge_ie(fetchresult):
584
+ out = {}
585
+ pubdate_regex = re.compile(r'"datePublished":"([^"]+)",')
586
+ html = fetchresult.content.decode('utf-8')
587
+ if '<html class="no-js" lang="en">' in html:
588
+ return {}
589
+ title = extract('<title>', ' - www.forasnagaeilge.ie</title>',
590
+ html) or ''
591
+ pubdate_match = pubdate_regex.search(html)
592
+ if pubdate_match:
593
+ pubdate = pubdate_match.group(1)
594
+ body = extract(
595
+ '<div id="main" class="container">',
596
+ '</div><!-- /.content -->', html)
597
+ if body:
598
+ paras = clean_paragraphs(body)
599
+ if paras:
600
+ out['location'] = fetchresult.url
601
+ out['genre'] = 'News'
602
+ if title:
603
+ out['title'] = title
604
+ if pubdate:
605
+ out['publication-date'] = pubdate
606
+ out['text'] = paras
607
+ return out
608
+
609
+
610
+ def _get_links(scrape):
611
+ links = set()
612
+ with open(scrape) as f:
613
+ for url in f.readlines():
614
+ links.add(url.rstrip())
615
+ return list(links)