Upload xlmr-ner.ipynb
Browse files- xlmr-ner.ipynb +1 -0
xlmr-ner.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.14","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[],"dockerImageVersionId":30787,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","execution":{"iopub.status.busy":"2024-10-10T09:28:17.789573Z","iopub.execute_input":"2024-10-10T09:28:17.789974Z","iopub.status.idle":"2024-10-10T09:28:17.796087Z","shell.execute_reply.started":"2024-10-10T09:28:17.789935Z","shell.execute_reply":"2024-10-10T09:28:17.795140Z"},"trusted":true},"execution_count":70,"outputs":[]},{"cell_type":"code","source":"import torch\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoTokenizer, XLMRobertaForTokenClassification, AdamW, get_linear_schedule_with_warmup\nfrom datasets import DatasetDict\nfrom seqeval.metrics import classification_report","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:31:09.927528Z","iopub.execute_input":"2024-10-10T09:31:09.928160Z","iopub.status.idle":"2024-10-10T09:31:09.933799Z","shell.execute_reply.started":"2024-10-10T09:31:09.928120Z","shell.execute_reply":"2024-10-10T09:31:09.932931Z"},"trusted":true},"execution_count":77,"outputs":[]},{"cell_type":"code","source":"from datasets import load_dataset\ndataset = load_dataset('masakhane/masakhaner2', 'kin') ","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:31:10.622463Z","iopub.execute_input":"2024-10-10T09:31:10.623463Z","iopub.status.idle":"2024-10-10T09:31:11.330817Z","shell.execute_reply.started":"2024-10-10T09:31:10.623419Z","shell.execute_reply":"2024-10-10T09:31:11.329841Z"},"trusted":true},"execution_count":78,"outputs":[]},{"cell_type":"code","source":"print(\"Original dataset sizes:\")\nprint(f\"Train: {len(dataset['train'])}\")\nprint(f\"Validation: {len(dataset['validation'])}\")\nprint(f\"Test: {len(dataset['test'])}\")","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:31:13.439874Z","iopub.execute_input":"2024-10-10T09:31:13.440252Z","iopub.status.idle":"2024-10-10T09:31:13.445759Z","shell.execute_reply.started":"2024-10-10T09:31:13.440213Z","shell.execute_reply":"2024-10-10T09:31:13.444745Z"},"trusted":true},"execution_count":79,"outputs":[{"name":"stdout","text":"Original dataset sizes:\nTrain: 7825\nValidation: 1118\nTest: 2235\n","output_type":"stream"}]},{"cell_type":"code","source":"def tokenize_and_align_labels(examples, tokenizer, max_length=128):\n tokenized_inputs = tokenizer(examples[\"tokens\"], truncation=True, is_split_into_words=True, \n max_length=max_length, padding=\"max_length\")\n\n labels = []\n for i, label in enumerate(examples[\"ner_tags\"]):\n word_ids = tokenized_inputs.word_ids(batch_index=i)\n previous_word_idx = None\n label_ids = []\n for word_idx in word_ids:\n if word_idx is None:\n label_ids.append(-100)\n elif word_idx != previous_word_idx:\n label_ids.append(label[word_idx])\n else:\n label_ids.append(-100)\n previous_word_idx = word_idx\n labels.append(label_ids)\n\n tokenized_inputs[\"labels\"] = labels\n return tokenized_inputs","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:31:13.983275Z","iopub.execute_input":"2024-10-10T09:31:13.983670Z","iopub.status.idle":"2024-10-10T09:31:13.990882Z","shell.execute_reply.started":"2024-10-10T09:31:13.983632Z","shell.execute_reply":"2024-10-10T09:31:13.989970Z"},"trusted":true},"execution_count":80,"outputs":[]},{"cell_type":"code","source":"model_name = 'Davlan/afro-xlmr-base'\ntokenizer = AutoTokenizer.from_pretrained(model_name)","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:31:37.056557Z","iopub.execute_input":"2024-10-10T09:31:37.056927Z","iopub.status.idle":"2024-10-10T09:31:38.513591Z","shell.execute_reply.started":"2024-10-10T09:31:37.056894Z","shell.execute_reply":"2024-10-10T09:31:38.512784Z"},"trusted":true},"execution_count":83,"outputs":[]},{"cell_type":"code","source":"tokenized_datasets = dataset.map(tokenize_and_align_labels, batched=True, \n fn_kwargs={\"tokenizer\": tokenizer})","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:31:39.503200Z","iopub.execute_input":"2024-10-10T09:31:39.503601Z","iopub.status.idle":"2024-10-10T09:31:42.720584Z","shell.execute_reply.started":"2024-10-10T09:31:39.503563Z","shell.execute_reply":"2024-10-10T09:31:42.719623Z"},"trusted":true},"execution_count":84,"outputs":[{"output_type":"display_data","data":{"text/plain":"Map: 0%| | 0/7825 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"36a483105dcb49cd9ce7cace72882e97"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Map: 0%| | 0/1118 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"360560558589415e841b515b795a9f99"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Map: 0%| | 0/2235 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"4b625e4edd0840d69be9396f75f5f1a9"}},"metadata":{}}]},{"cell_type":"code","source":"tokenized_datasets","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:31:51.951371Z","iopub.execute_input":"2024-10-10T09:31:51.951818Z","iopub.status.idle":"2024-10-10T09:31:51.958632Z","shell.execute_reply.started":"2024-10-10T09:31:51.951770Z","shell.execute_reply":"2024-10-10T09:31:51.957717Z"},"trusted":true},"execution_count":85,"outputs":[{"execution_count":85,"output_type":"execute_result","data":{"text/plain":"DatasetDict({\n train: Dataset({\n features: ['id', 'tokens', 'ner_tags', 'input_ids', 'attention_mask', 'labels'],\n num_rows: 7825\n })\n validation: Dataset({\n features: ['id', 'tokens', 'ner_tags', 'input_ids', 'attention_mask', 'labels'],\n num_rows: 1118\n })\n test: Dataset({\n features: ['id', 'tokens', 'ner_tags', 'input_ids', 'attention_mask', 'labels'],\n num_rows: 2235\n })\n})"},"metadata":{}}]},{"cell_type":"code","source":"print(\"Dataset sizes after tokenization:\")\nprint(f\"Train: {len(tokenized_datasets['train'])}\")\nprint(f\"Validation: {len(tokenized_datasets['validation'])}\")\nprint(f\"Test: {len(tokenized_datasets['test'])}\")\n\ncolumns_to_keep = ['input_ids', 'attention_mask', 'labels']\ntokenized_datasets = tokenized_datasets.remove_columns([col for col in tokenized_datasets['train'].column_names if col not in columns_to_keep])\n\nprint(\"Dataset sizes after column selection:\")\nprint(f\"Train: {len(tokenized_datasets['train'])}\")\nprint(f\"Validation: {len(tokenized_datasets['validation'])}\")\nprint(f\"Test: {len(tokenized_datasets['test'])}\")\n\n# Set the format of the datasets to PyTorch tensors\ntokenized_datasets = tokenized_datasets.with_format(\"torch\")","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:32:10.895341Z","iopub.execute_input":"2024-10-10T09:32:10.896459Z","iopub.status.idle":"2024-10-10T09:32:10.915439Z","shell.execute_reply.started":"2024-10-10T09:32:10.896381Z","shell.execute_reply":"2024-10-10T09:32:10.914449Z"},"trusted":true},"execution_count":86,"outputs":[{"name":"stdout","text":"Dataset sizes after tokenization:\nTrain: 7825\nValidation: 1118\nTest: 2235\nDataset sizes after column selection:\nTrain: 7825\nValidation: 1118\nTest: 2235\n","output_type":"stream"}]},{"cell_type":"code","source":"train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, batch_size=16)\neval_dataloader = DataLoader(tokenized_datasets[\"validation\"], batch_size=16)\ntest_dataloader = DataLoader(tokenized_datasets[\"test\"], batch_size=16)\n\nprint(\"DataLoader sizes:\")\nprint(f\"Train: {len(train_dataloader)}\")\nprint(f\"Validation: {len(eval_dataloader)}\")\nprint(f\"Test: {len(test_dataloader)}\")","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:32:23.871960Z","iopub.execute_input":"2024-10-10T09:32:23.872305Z","iopub.status.idle":"2024-10-10T09:32:23.878803Z","shell.execute_reply.started":"2024-10-10T09:32:23.872272Z","shell.execute_reply":"2024-10-10T09:32:23.877879Z"},"trusted":true},"execution_count":87,"outputs":[{"name":"stdout","text":"DataLoader sizes:\nTrain: 490\nValidation: 70\nTest: 140\n","output_type":"stream"}]},{"cell_type":"code","source":"label_list = dataset[\"train\"].features[\"ner_tags\"].feature.names\nnum_labels = len(label_list)\nid2label = {i: label for i, label in enumerate(label_list)}\nlabel2id = {v: k for k, v in id2label.items()}\n\nmodel = XLMRobertaForTokenClassification.from_pretrained(model_name, \n num_labels=num_labels,\n id2label=id2label,\n label2id=label2id)\n\n# Setup training\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel.to(device)","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:32:45.486111Z","iopub.execute_input":"2024-10-10T09:32:45.486513Z","iopub.status.idle":"2024-10-10T09:32:46.069446Z","shell.execute_reply.started":"2024-10-10T09:32:45.486475Z","shell.execute_reply":"2024-10-10T09:32:46.068557Z"},"trusted":true},"execution_count":88,"outputs":[{"name":"stderr","text":"Some weights of XLMRobertaForTokenClassification were not initialized from the model checkpoint at Davlan/afro-xlmr-base and are newly initialized: ['classifier.bias', 'classifier.weight']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n","output_type":"stream"},{"execution_count":88,"output_type":"execute_result","data":{"text/plain":"XLMRobertaForTokenClassification(\n (roberta): XLMRobertaModel(\n (embeddings): XLMRobertaEmbeddings(\n (word_embeddings): Embedding(250002, 768, padding_idx=1)\n (position_embeddings): Embedding(514, 768, padding_idx=1)\n (token_type_embeddings): Embedding(1, 768)\n (LayerNorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (encoder): XLMRobertaEncoder(\n (layer): ModuleList(\n (0-11): 12 x XLMRobertaLayer(\n (attention): XLMRobertaAttention(\n (self): XLMRobertaSdpaSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): XLMRobertaSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): XLMRobertaIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n (intermediate_act_fn): GELUActivation()\n )\n (output): XLMRobertaOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n )\n )\n )\n (dropout): Dropout(p=0.1, inplace=False)\n (classifier): Linear(in_features=768, out_features=9, bias=True)\n)"},"metadata":{}}]},{"cell_type":"code","source":"optimizer = AdamW(model.parameters(), lr=5e-5)\n\nnum_epochs = 3\nnum_training_steps = num_epochs * len(train_dataloader)\nlr_scheduler = get_linear_schedule_with_warmup(optimizer, \n num_warmup_steps=0,\n num_training_steps=num_training_steps)","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:33:05.878574Z","iopub.execute_input":"2024-10-10T09:33:05.879228Z","iopub.status.idle":"2024-10-10T09:33:05.888093Z","shell.execute_reply.started":"2024-10-10T09:33:05.879187Z","shell.execute_reply":"2024-10-10T09:33:05.887126Z"},"trusted":true},"execution_count":89,"outputs":[]},{"cell_type":"code","source":"# Training loop\ndef train_loop(dataloader, model, optimizer, lr_scheduler, epoch):\n model.train()\n for batch in dataloader:\n batch = {k: v.to(device) for k, v in batch.items()}\n outputs = model(**batch)\n loss = outputs.loss\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n print(f\"Epoch {epoch+1}/{num_epochs} completed\")\n\n# Evaluation loop\ndef eval_loop(dataloader, model):\n model.eval()\n predictions, true_labels = [], []\n for batch in dataloader:\n batch = {k: v.to(device) for k, v in batch.items()}\n with torch.no_grad():\n outputs = model(**batch)\n logits = outputs.logits\n predictions.extend(torch.argmax(logits, dim=2).cpu().numpy())\n true_labels.extend(batch[\"labels\"].cpu().numpy())\n \n true_predictions = [\n [label_list[p] for (p, l) in zip(prediction, label) if l != -100]\n for prediction, label in zip(predictions, true_labels)\n ]\n true_labels = [\n [label_list[l] for l in label if l != -100]\n for label in true_labels\n ]\n return true_predictions, true_labels","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:33:18.790192Z","iopub.execute_input":"2024-10-10T09:33:18.791105Z","iopub.status.idle":"2024-10-10T09:33:18.801758Z","shell.execute_reply.started":"2024-10-10T09:33:18.791062Z","shell.execute_reply":"2024-10-10T09:33:18.800763Z"},"trusted":true},"execution_count":90,"outputs":[]},{"cell_type":"code","source":"for epoch in range(num_epochs):\n train_loop(train_dataloader, model, optimizer, lr_scheduler, epoch)\n predictions, labels = eval_loop(eval_dataloader, model)\n print(classification_report(labels, predictions))\n\n# Final evaluation on test set\ntest_predictions, test_labels = eval_loop(test_dataloader, model)\nprint(\"Final Test Results:\")\nprint(classification_report(test_labels, test_predictions))","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:33:33.644667Z","iopub.execute_input":"2024-10-10T09:33:33.645314Z","iopub.status.idle":"2024-10-10T09:43:40.194990Z","shell.execute_reply.started":"2024-10-10T09:33:33.645275Z","shell.execute_reply":"2024-10-10T09:43:40.193982Z"},"trusted":true},"execution_count":91,"outputs":[{"name":"stdout","text":"Epoch 1/3 completed\n precision recall f1-score support\n\n DATE 0.65 0.78 0.71 373\n LOC 0.85 0.88 0.87 524\n ORG 0.69 0.88 0.78 512\n PER 0.95 0.91 0.93 667\n\n micro avg 0.79 0.87 0.83 2076\n macro avg 0.79 0.86 0.82 2076\nweighted avg 0.81 0.87 0.84 2076\n\nEpoch 2/3 completed\n precision recall f1-score support\n\n DATE 0.72 0.81 0.76 373\n LOC 0.87 0.90 0.88 524\n ORG 0.77 0.88 0.82 512\n PER 0.95 0.94 0.94 667\n\n micro avg 0.84 0.89 0.86 2076\n macro avg 0.83 0.88 0.85 2076\nweighted avg 0.84 0.89 0.87 2076\n\nEpoch 3/3 completed\n precision recall f1-score support\n\n DATE 0.73 0.82 0.77 373\n LOC 0.87 0.91 0.89 524\n ORG 0.80 0.88 0.84 512\n PER 0.95 0.95 0.95 667\n\n micro avg 0.85 0.90 0.87 2076\n macro avg 0.84 0.89 0.86 2076\nweighted avg 0.85 0.90 0.87 2076\n\nFinal Test Results:\n precision recall f1-score support\n\n DATE 0.73 0.79 0.76 791\n LOC 0.91 0.91 0.91 1592\n ORG 0.75 0.81 0.78 685\n PER 0.92 0.98 0.95 712\n\n micro avg 0.84 0.88 0.86 3780\n macro avg 0.83 0.87 0.85 3780\nweighted avg 0.84 0.88 0.86 3780\n\n","output_type":"stream"}]},{"cell_type":"code","source":"model.save_pretrained('afro_xlmr_ner')\ntokenizer.save_pretrained('afro_xlmr_ner')","metadata":{"execution":{"iopub.status.busy":"2024-10-10T09:44:40.637887Z","iopub.execute_input":"2024-10-10T09:44:40.638751Z","iopub.status.idle":"2024-10-10T09:44:43.504643Z","shell.execute_reply.started":"2024-10-10T09:44:40.638697Z","shell.execute_reply":"2024-10-10T09:44:43.503656Z"},"trusted":true},"execution_count":92,"outputs":[{"execution_count":92,"output_type":"execute_result","data":{"text/plain":"('afro_xlmr_ner/tokenizer_config.json',\n 'afro_xlmr_ner/special_tokens_map.json',\n 'afro_xlmr_ner/sentencepiece.bpe.model',\n 'afro_xlmr_ner/added_tokens.json',\n 'afro_xlmr_ner/tokenizer.json')"},"metadata":{}}]}]}
|