{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "3ab2e823-50c9-40d4-9401-3ed7869da6e2", "metadata": {}, "outputs": [], "source": [ "from datasets import load_dataset" ] }, { "cell_type": "code", "execution_count": 2, "id": "241ea0f7-02bf-4a3e-845c-e262b1d32031", "metadata": {}, "outputs": [], "source": [ "# Use specific revision for reproducibility!\n", "# See https://huggingface.co/datasets/avramandrei/histnero\n", "revision = \"433ca166efac28c952813c0e78bf301643cf5af3\"\n", "\n", "ds = load_dataset(\"avramandrei/histnero\", revision=revision)" ] }, { "cell_type": "code", "execution_count": 4, "id": "66878e9e-83e8-4010-b81c-cefbc2ef0da7", "metadata": {}, "outputs": [], "source": [ "# We are grouping together documents together first!\n", "def perform_document_grouping(dataset_split):\n", " # Document identifier -> Training example\n", " document_mapping = {}\n", "\n", " for document in dataset_split:\n", " doc_id = document[\"doc_id\"]\n", " if doc_id in document_mapping:\n", " document_mapping[doc_id].append(document)\n", " else:\n", " document_mapping[doc_id] = [document]\n", " return document_mapping\n", "\n", "def export_to_conll(grouped_dataset_split, export_filename):\n", " dataset_labels = ds[\"train\"].features[\"ner_tags\"].feature.names\n", " dataset_label_id_to_string = {idx: label_string for idx, label_string in enumerate(dataset_labels)}\n", "\n", " with open(export_filename, \"wt\") as f_out:\n", " for document_name, training_examples in grouped_dataset_split.items():\n", " f_out.write(\"-DOCSTART-\\tO\\n\\n\")\n", "\n", " for training_example in training_examples:\n", " tokens = training_example[\"tokens\"]\n", " ner_label_ids = training_example[\"ner_tags\"]\n", " ner_label_iobs = [dataset_label_id_to_string[ner_label_id] for ner_label_id in ner_label_ids]\n", "\n", " assert len(tokens) == len(ner_label_iobs)\n", "\n", " # Write some metadata first\n", " metadata = [\n", " {\"id\": training_example[\"id\"]},\n", " {\"doc_id\": training_example[\"doc_id\"]},\n", " {\"region\": training_example[\"region\"]},\n", " ]\n", "\n", " for metadata_entry in metadata:\n", " for metadata_name, metadata_value in metadata_entry.items():\n", " f_out.write(f\"# histnero:{metadata_name} = {metadata_value}\\n\")\n", " \n", " for token, ner_label_iob in zip(tokens, ner_label_iobs):\n", " f_out.write(f\"{token}\\t{ner_label_iob}\\n\")\n", "\n", " f_out.write(\"\\n\")" ] }, { "cell_type": "code", "execution_count": 7, "id": "afb1dc77-1cde-43d5-9d9a-e7b458c08bb5", "metadata": {}, "outputs": [], "source": [ "for dataset_split in [\"train\", \"valid\", \"test\"]:\n", " grouped_dataset = perform_document_grouping(ds[dataset_split])\n", "\n", " split_filename = \"dev\" if dataset_split == \"valid\" else dataset_split\n", " export_to_conll(grouped_dataset, f\"{split_filename}.tsv\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.6" } }, "nbformat": 4, "nbformat_minor": 5 }