{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "7301572a-4803-4a16-b262-74e41e25803e", "metadata": {}, "outputs": [], "source": [ "from pathlib import Path\n", "\n", "import pandas as pd\n", "from datasets import Dataset, DatasetDict, load_dataset\n", "from huggingface_hub import Repository, create_repo\n", "from selectolax.parser import HTMLParser" ] }, { "cell_type": "code", "execution_count": null, "id": "a898baed-3640-4ca8-9d3d-b88f6c85a428", "metadata": {}, "outputs": [], "source": [ "def _parse_start_end(node):\n", " return int(node.attrs[\"start\"][1:]), int(node.attrs[\"end\"][1:])\n", "\n", "\n", "def get_original_text(sent_toks) -> str:\n", " empty_tokens = [i for i, t in enumerate(sent_toks) if not t.text().strip()]\n", " org_sent_toks = [t.text() for i, t in enumerate(sent_toks) if not i in empty_tokens]\n", " return \" \".join(org_sent_toks)\n", "\n", "\n", "def get_corrected_text(toks_cor, last_end, sent_end) -> str:\n", " cor_toks = []\n", " for tok in toks_cor:\n", " tok_start, tok_end = _parse_start_end(tok)\n", " if tok_start >= last_end and tok_end <= sent_end:\n", " cor_toks.append(tok.text())\n", " last_end = tok_end\n", " return last_end, \" \".join(cor_toks)\n", "\n", "\n", "def process_doc(doc, path):\n", " toks = doc.select('tier[category=\"tok\"] event').matches\n", " toks_cor = doc.select('tier[category=\"TH1\"] event').matches\n", " sents = doc.select('tier[category=\"sentence\"] event').matches\n", "\n", " last_end = 0\n", " for sent_no, org_sent in enumerate(sents):\n", " sent_start, sent_end = _parse_start_end(org_sent)\n", " sent_toks = toks[sent_start:sent_end]\n", " original_text = get_original_text(sent_toks)\n", " last_end, corrected_text = get_corrected_text(toks_cor, last_end, sent_end)\n", "\n", " yield (\n", " {\n", " \"original\": original_text,\n", " \"corrected\": corrected_text,\n", " \"id\": f\"{path.stem}-{sent_no}\",\n", " }\n", " )" ] }, { "cell_type": "code", "execution_count": null, "id": "191fe2e3-8a4e-47e2-9316-5b6028662c02", "metadata": {}, "outputs": [], "source": [ "DATASET_NAME = \"merlin\"\n", "dataset_path = Path.home() / DATASET_NAME\n", "if not Path(dataset_path).exists():\n", " repo_url = create_repo(name=DATASET_NAME, repo_type=\"dataset\")\n", " repo = Repository(local_dir=str(dataset_path), clone_from=repo_url)\n", " repo.lfs_track(\"*.jsonl\")" ] }, { "cell_type": "code", "execution_count": null, "id": "005d059f-5de4-4f14-bde3-0cc9ff2435c0", "metadata": {}, "outputs": [], "source": [ "MERLN_EXMARALDA_BASE = Path.home() / Path(\n", " \"Downloads/MERLIN Written Learner Corpus for Czech, German, Italian 1.1/merlin-exmaralda-v1.1/\"\n", ")\n", "\n", "for lang in (\"german\", \"czech\", \"italian\"):\n", " lang_docs = []\n", " for path in (MERLN_EXMARALDA_BASE / lang).glob(\"*.exb\"):\n", " with open(path) as fp:\n", " xml = HTMLParser(fp.read())\n", " docs = list(process_doc(xml, path))\n", " lang_docs.extend(docs)\n", " Dataset.from_dict(pd.DataFrame(lang_docs)).to_json(dataset_path / f\"{lang}.jsonl\")" ] }, { "cell_type": "code", "execution_count": null, "id": "f9d396b2-98dc-4c04-950f-0332a3a6d751", "metadata": {}, "outputs": [], "source": [ "repo.push_to_hub()" ] }, { "cell_type": "code", "execution_count": null, "id": "91377eaf-ffac-4df0-9c85-4f6ee979f99f", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.7" } }, "nbformat": 4, "nbformat_minor": 5 }