{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "8f719117-0591-4f80-a071-16b8b1a0a829", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2024-05-10 12:21:50.698143: I external/local_tsl/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.\n", "2024-05-10 12:21:50.701405: I external/local_tsl/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.\n", "2024-05-10 12:21:50.745330: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", "2024-05-10 12:21:51.413922: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n" ] } ], "source": [ "import flair\n", "\n", "from flair.datasets.sequence_labeling import ColumnCorpus\n", "from flair.file_utils import cached_path\n", "\n", "from pathlib import Path\n", "from typing import Optional, Union" ] }, { "cell_type": "code", "execution_count": 2, "id": "0ddcffce-b914-4250-848a-e8cb5b9cc049", "metadata": {}, "outputs": [], "source": [ "class NER_HISTNERO(ColumnCorpus):\n", " def __init__(\n", " self,\n", " base_path: Optional[Union[str, Path]] = None,\n", " in_memory: bool = True,\n", " **corpusargs,\n", " ) -> None:\n", " base_path = flair.cache_root / \"datasets\" if not base_path else Path(base_path)\n", " dataset_name = self.__class__.__name__.lower()\n", " data_folder = base_path / dataset_name\n", " data_path = flair.cache_root / \"datasets\" / dataset_name\n", "\n", " column_format = {0: \"text\", 1: \"ner\"}\n", "\n", " hf_download_path = \"https://huggingface.co/datasets/stefan-it/histnero/resolve/main\"\n", "\n", " for split in [\"train\", \"dev\", \"test\"]:\n", " cached_path(f\"{hf_download_path}/{split}.tsv\", data_path)\n", " \n", " super().__init__(\n", " data_folder,\n", " column_format = {0: \"text\", 1: \"ner\"},\n", " column_delimiter=\"\\t\",\n", " document_separator_token=\"-DOCSTART-\",\n", " in_memory=in_memory,\n", " comment_symbol=\"# \",\n", " **corpusargs,\n", " )" ] }, { "cell_type": "code", "execution_count": 3, "id": "caf404fb-b4f1-4688-81ef-abbd950beb0a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2024-05-10 12:21:56,117 Reading data from /home/stefan/.flair/datasets/ner_histnero\n", "2024-05-10 12:21:56,118 Train: /home/stefan/.flair/datasets/ner_histnero/train.tsv\n", "2024-05-10 12:21:56,119 Dev: /home/stefan/.flair/datasets/ner_histnero/dev.tsv\n", "2024-05-10 12:21:56,120 Test: /home/stefan/.flair/datasets/ner_histnero/test.tsv\n" ] } ], "source": [ "corpus = NER_HISTNERO()" ] }, { "cell_type": "code", "execution_count": 4, "id": "c38074b8-36c6-47f8-9a9c-b9685514e4bf", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Corpus: 8149 train + 1114 dev + 1117 test sentences\n" ] } ], "source": [ "print(str(corpus))" ] }, { "cell_type": "markdown", "id": "7f1bfac4-d09f-477a-b1d8-0543d2e4368c", "metadata": {}, "source": [ "# Tests\n", "\n", "We now check the number of parsed sentences and compare with the reference values from the HistNERo paper." ] }, { "cell_type": "code", "execution_count": 5, "id": "f1dc9c90-c14c-44c6-9716-5e341c73d370", "metadata": {}, "outputs": [], "source": [ "gold_training_split_sentences = {\n", " \"train\": 8_020,\n", " \"dev\": 1_003,\n", " \"test\": 1_003,\n", "}\n", "\n", "flair_corpus_mapping = {\n", " \"train\": corpus.train,\n", " \"dev\": corpus.dev,\n", " \"test\": corpus.test,\n", "}" ] }, { "cell_type": "code", "execution_count": 8, "id": "957921b4-53f6-4c49-a916-339ff0296eee", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✔️ Number of parsed sentences for train split\n", "✔️ Number of parsed sentences for dev split\n", "✔️ Number of parsed sentences for test split\n", "✔️ Number of parsed sentences for complete dataset\n" ] } ], "source": [ "actual_total_sentences = 0\n", "gold_total_sentences = sum(gold_training_split_sentences.values())\n", "\n", "for dataset_split in [\"train\", \"dev\", \"test\"]:\n", " gold_sentences = gold_training_split_sentences[dataset_split]\n", "\n", " actual_sentences = 0\n", " \n", " for sentence in flair_corpus_mapping[dataset_split]:\n", " # We do not count document marker as sentences!\n", " if sentence[0].text.startswith(\"-DOCSTART-\"):\n", " continue\n", " actual_sentences += 1\n", "\n", " actual_total_sentences += actual_sentences\n", "\n", " assert gold_sentences == actual_sentences, f\"Mismatch of parsed sentences for {dataset_split} split!\"\n", "\n", " print(f\"✔️ Number of parsed sentences for {dataset_split} split\")\n", "\n", "assert actual_total_sentences == gold_total_sentences, f\"Mismatch in total parsed sentences!\"\n", "\n", "print(f\"✔️ Number of parsed sentences for complete dataset\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.6" } }, "nbformat": 4, "nbformat_minor": 5 }