# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # Copyright 2021 Phonetics and Speech Laboratory, Trinity College, Dublin # # Based on Corpus Crawler (utils.py): # Copyright 2017 Google Inc. All rights reserved. # # Based on Corpus Crawler's Irish crawler (crawl_ga.py): # Copyright 2017 Google Inc. All rights reserved. # Copyright 2017 Jim O'Regan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Corpus Crawler Irish web text dataset.""" import collections import os import re import struct import unicodedata import base64 import hashlib from html.entities import name2codepoint from email import message_from_string as Message from urllib.parse import urlparse from pathlib import Path import datasets _DESCRIPTION = """\ Irish web corpus, crawled with Corpus Crawler. Uses a list of URLs, collected by the crawler, to retrieve the files from the crawler's cache. """ #_SCRAPES = ["20180911", "20191117", "20210810"] _SCRAPES = ["20191117", "20210810"] logger = datasets.utils.logging.get_logger(__name__) _DATA_URL = 'https://huggingface.co/datasets/phonlab-tcd/corpuscrawler-ga/raw/main/crawled-{}.txt' class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder): """Corpus Crawler crawled text dataset.""" BUILDER_CONFIGS = [ datasets.BuilderConfig(name=f"{scrape}_{cfg}") for scrape in _SCRAPES for cfg in ["documents", "paragraphs"] ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "url": datasets.Value("string"), "genre": datasets.Value("string"), "publication_date": datasets.Value("string"), "title": datasets.Value("string"), "text": datasets.Value("string"), "video_url": datasets.Value("string"), } ), ) def _split_generators(self, dl_manager): manual_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) scrape_set = self.config.name sset = self.config.name.split('_')[0] dl_path = dl_manager.download(_DATA_URL.format(sset)) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "name": scrape_set, "data_dir": manual_dir, "data_file": dl_path, }) ] def _generate_examples(self, name, data_dir, data_file): """Generate examples from a Corpus Crawl cache.""" logger.info("generating examples from = %s", name) scfg = self.config.name.split('_')[1] links = _get_links(data_file) if not self.config.data_dir: self.config.data_dir = data_dir dd_path = Path(data_dir) if not dd_path.is_dir(): raise Exception('No directory: ' + data_dir) _id = 1 for link in links: if not link: continue res = self._fetch_page(link, data_dir) if res is None: raise Exception("Failed to read " + link + " from " + data_dir) if scfg == "documents": text = ["\n".join(res.get('text', []))] else: text = res.get('text', []) for para in text: example = { "genre": res.get('genre', ''), "url": res.get('location', link), "publication_date": res.get('publication-date', ''), "video_url": res.get('video', ''), "title": res.get('title', ''), "text": para } yield _id, example _id += 1 def _fetch_page(self, url, data_dir): _EXTRATORS = { 'www.unicode.org': do_udhr, 'tuairisc.ie': do_tuairisc_ie, 'www.rte.ie': do_nuachtrte, 'www.irishtimes.com': do_irishtimes, 'www.chg.gov.ie': do_chg, 'www.ainm.ie': do_ainm_ie, 'gaeltacht21.blogspot.com': do_blogspot, 'aonghus.blogspot.com': do_blogspot, 'nimill.blogspot.com': do_blogspot, 'turasailse.blogspot.com': do_blogspot, 'caomhach.blogspot.com': do_blogspot, 'breacleabhar.blogspot.com': do_blogspot, 'gearoid.blogspot.com': do_blogspot, 'philo-celtic.blogspot.com': do_blogspot, 'iomhannablag.blogspot.com': do_blogspot, 'smaointefanacha.blogspot.com': do_blogspot, 'imeall.blogspot.com': do_blogspot, 'coislife.ie': do_coislife_ie, 'meoneile.ie': do_meoneile_ie, 'peig.ie': do_peig_ie, 'www.forasnagaeilge.ie': do_forasnagaeilge_ie, } parsed_url = urlparse(url) host = parsed_url.netloc extract = _EXTRATORS.get(host) if extract: fr = fetch(data_dir, url) if fr is None: raise Exception("Failed to fetch " + url + " from " + data_dir) return extract(fr) # Corpus Crawler: utils.py _TAG_REGEX = re.compile(r'\<.+?\>', flags=re.DOTALL) def striptags(s): return _TAG_REGEX.sub('', s) def unichar(i): try: return chr(i) except ValueError: # non-BMP codepoint in narrow Python build return struct.pack('i', i).decode('utf-32') def replace_html_entities(html): entities = name2codepoint html = re.sub(r'&#([0-9]+);', lambda z:unichar(int(z.group(1))), html) html = re.sub(r'&#[xX]([0-9a-fA-F]+);', lambda z:unichar(int(z.group(1), 16)), html) html = re.sub(r'&([a-zA-Z]+);', lambda z:unichar(entities.get(z.group(1).lower(), 0x20)), html) return html def cleantext(html): html = re.sub(r'', ' ', html, flags=re.DOTALL) html = replace_html_entities(striptags(html)) # Some web sites insert zero-width spaces, possibly as byte order marks # (from Microsoft Notepad) which their scripts failed to recognize as such. html = html.replace('\u200B', '') return unicodedata.normalize('NFC', ' '.join(html.split())) def clean_paragraphs(html): text = html.replace('\n', ' ') text = re.sub(r'', '\n', text) text = re.sub(r'<(?:br|BR)\s*/?>', '\n', text) return list(filter(None, [cleantext(p) for p in text.split('\n')])) def extract(before, after, html): s = html.split(before, 1) return s[1].split(after)[0] if len(s) == 2 else None FetchResult = collections.namedtuple('FetchResult', ['headers', 'content', 'url', 'filepath']) def fetch(cache_dir, url): logger.info("fetching url %s from cache %s", url, cache_dir) try: digest = hashlib.sha256(url.encode('utf-8')).digest() filepath = os.path.join(cache_dir, "f" + base64.urlsafe_b64encode(digest).decode('utf-8')) except: digest = hashlib.sha256(url).digest() filepath = os.path.join(cache_dir, "f" + base64.urlsafe_b64encode(digest)) fp = Path(filepath) if not fp.is_file(): raise Exception("No such file: " + fp) try: with open(filepath, 'r', encoding='utf-8-sig', newline='') as f: file_content = f.read() if '\r\n\r\n\r\n' in file_content: splitter = '\r\n\r\n\r\n' else: splitter = '\n\n\n' cached = file_content.split(splitter, 1) if len(cached) == 2: headers, content = cached try: content = content.encode('utf-8') except: # already encoded as bytes pass headers = Message(headers) if not content: raise Exception("empty content") return FetchResult(headers, content, url, filepath) else: raise Exception("splitting headers and content failed") except IOError: raise Exception("fetch() failed") def do_udhr(fetchresult): out = {} text = fetchresult.content.decode('utf-8').split('---', 1)[1] out['location'] = fetchresult.url out['genre'] = 'Legal' paras = [] for paragraph in text.splitlines(): paragraph = paragraph.strip() if len(paragraph) > 0: paras.append(paragraph) out['text'] = paras return out # corpuscrawler: crawl_ga.py _ENGLISH_MONTHS = { 'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5, 'june': 6, 'july': 7, 'august': 8, 'september': 9, 'october': 10, 'november': 11, 'december': 12 } def _byline_to_pubdate(byline): date = re.search(r'(\d{1,2}) ([^ ]+?) (\d{4})', byline) if not date: return None day = int(date.group(1)) year = int(date.group(3)) month = _ENGLISH_MONTHS[date.group(2).lower()] if not month: return None out = "{}-{:0>2d}-{:0>2d}".format(year, month, day) return out def _rte_writable_paragraph(text): if text == '': return False if text.startswith('© RTÉ '): return False if text.startswith('By using this website, you consent'): return False if text.startswith('RTÉ.ie is the website of Raidió Teilifís Éireann'): return False if text.find('is not responsible for the content') >= 0: return False if text.find('RTÉ uses cookies in accordance with our Cookie Policy') >= 0: return False if re.match('^[\*\+]+$', text): return False return True def _rte_cleanall(html): section_article_regex = re.compile(r']+itemprop="articleBody"[^>]*>') search = section_article_regex.search(html) out = [] if search: body = extract(search.group(0), '', html) for para in clean_paragraphs(body): if _rte_writable_paragraph(para): out.append(para) return '\n'.join(out) for paragraph in re.findall(r'

(.+?)

', html): cleaned = cleantext(paragraph) if _rte_writable_paragraph(cleaned): out.append(cleaned) else: continue return out def _sceala_clean(paras): out = [] for para in paras: if '\n____' not in para: out.append(para) else: out.append(para.split('\n____')[0]) break return out def do_nuachtrte(fetchresult): out = {} pubdate_regex = re.compile(r'name="DC.date" (?:scheme="DCTERMS.URI" )?content="([0-9T:+\-]{19,25})"') html = fetchresult.content.decode('utf-8') pubdate_match = pubdate_regex.search(html) pubdate = pubdate_match.group(1) if pubdate_match else None if pubdate is None: pubdate = fetchresult.headers.get('Last-Modified') out['location'] = fetchresult.url if 'nuacht' in fetchresult.url: out['genre'] = 'News' if pubdate: out['publication-date'] = pubdate title = re.search(r'(.+?)', html) if title: title = striptags(title.group(1).split('- RTÉ')[0]).strip() if title: out['title'] = cleantext(title) cleaned = _rte_cleanall(html) if '/sceala/' in fetchresult.url: cleaned = _sceala_clean(cleaned) out['text'] = cleaned return out def do_meoneile_ie(fetchresult): out = {} html = fetchresult.content.decode('utf-8') title = extract(r'', '', html).strip() title = title.split('<')[0].strip() if title else '' video = re.search(r"]*>", html) body = extract("
", '', html) or '' byline = extract("