{ "cells": [ { "cell_type": "code", "execution_count": 65, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\Admin\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\transformers\\optimization.py:429: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n", " warnings.warn(\n", "Epoch 1/3: 100%|██████████| 1293/1293 [35:14<00:00, 1.64s/batch, Loss=0.000204]\n", "Epoch 2/3: 100%|██████████| 1293/1293 [35:04<00:00, 1.63s/batch, Loss=0.000154]\n", "Epoch 3/3: 100%|██████████| 1293/1293 [34:39<00:00, 1.61s/batch, Loss=7.12e-5] \n" ] } ], "source": [ "import torch\n", "from torch.utils.data import Dataset, DataLoader\n", "from transformers import AutoTokenizer, AutoModelForMaskedLM, AdamW\n", "import pandas as pd\n", "from tqdm import tqdm\n", "\n", "class GrammarCorrectionDataset(Dataset):\n", " def __init__(self, sentences, corrected_sentences, tokenizer, max_length=128):\n", " self.sentences = sentences\n", " self.corrected_sentences = corrected_sentences\n", " self.tokenizer = tokenizer\n", " self.max_length = max_length\n", "\n", " def __len__(self):\n", " return len(self.sentences)\n", " \n", " def __getitem__(self, idx):\n", " input_sentence = self.sentences[idx]\n", " corrected_sentence = self.corrected_sentences[idx]\n", "\n", " # Tokenize the input and corrected sentences separately\n", " inputs = self.tokenizer(\n", " [input_sentence], # Pass as a list\n", " [corrected_sentence], # Pass as a list\n", " return_tensors=\"pt\",\n", " padding=\"max_length\",\n", " truncation=True,\n", " max_length=self.max_length\n", " )\n", "\n", " return {\n", " \"input_ids\": inputs[\"input_ids\"].flatten(),\n", " \"attention_mask\": inputs[\"attention_mask\"].flatten(),\n", " \"labels\": inputs[\"input_ids\"].flatten() # Use input_ids as labels for MLM\n", " }\n", "\n", "\n", "\n", "def pad_collate(batch):\n", " # Find the length of the longest sentence in the batch\n", " max_len = max(len(batch_item[\"input_ids\"]) for batch_item in batch)\n", " \n", " # Pad each input to the length of the longest sentence in the batch\n", " for batch_item in batch:\n", " input_ids = batch_item[\"input_ids\"]\n", " attention_mask = batch_item[\"attention_mask\"]\n", " labels = batch_item[\"labels\"]\n", " \n", " padded_input_ids = torch.nn.functional.pad(input_ids, (0, max_len - len(input_ids)), value=tokenizer.pad_token_id)\n", " padded_attention_mask = torch.nn.functional.pad(attention_mask, (0, max_len - len(attention_mask)), value=0)\n", " padded_labels = torch.nn.functional.pad(labels, (0, max_len - len(labels)), value=tokenizer.pad_token_id)\n", " \n", " batch_item[\"input_ids\"] = padded_input_ids\n", " batch_item[\"attention_mask\"] = padded_attention_mask\n", " batch_item[\"labels\"] = padded_labels\n", " \n", " return {\n", " \"input_ids\": torch.stack([batch_item[\"input_ids\"] for batch_item in batch]),\n", " \"attention_mask\": torch.stack([batch_item[\"attention_mask\"] for batch_item in batch]),\n", " \"labels\": torch.stack([batch_item[\"labels\"] for batch_item in batch])\n", " }\n", "\n", "\n", "data = pd.read_csv(r'D:\\Thesis\\test_bert_data.csv')\n", "data = data.dropna()\n", "sentences = [str(i) for i in data['wrong'].values] # Convert Series to list\n", "corrected_sentences = [str(i) for i in data['right1'].values]\n", "\n", "# Initialize tokenizer and model\n", "tokenizer = AutoTokenizer.from_pretrained(\"jcblaise/roberta-tagalog-base\")\n", "model = AutoModelForMaskedLM.from_pretrained(\"jcblaise/roberta-tagalog-base\")\n", "\n", "# Create dataset and dataloader\n", "dataset = GrammarCorrectionDataset(sentences, corrected_sentences, tokenizer)\n", "dataloader = DataLoader(dataset, batch_size=4, shuffle=True, collate_fn=pad_collate)\n", "\n", "\n", "# Define optimizer and loss function\n", "optimizer = AdamW(model.parameters(), lr=5e-5)\n", "\n", "num_epochs = 3\n", "\n", "model.train()\n", "for epoch in range(num_epochs):\n", " tqdm_dataloader = tqdm(dataloader, desc=f\"Epoch {epoch + 1}/{num_epochs}\", unit=\"batch\")\n", " for batch in tqdm_dataloader:\n", " input_ids = batch[\"input_ids\"]\n", " attention_mask = batch[\"attention_mask\"]\n", " labels = batch[\"labels\"]\n", "\n", " outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)\n", " loss = outputs.loss\n", "\n", " optimizer.zero_grad()\n", " loss.backward()\n", " optimizer.step()\n", "\n", " # Update tqdm progress bar description with current loss\n", " tqdm_dataloader.set_postfix({\"Loss\": loss.item()})\n", "\n", " # Close the tqdm progress bar for the epoch\n", " tqdm_dataloader.close()\n", "\n", "# Save the fine-tuned model\n", "#model.save_pretrained(\"fine_tuned_model\")\n" ] }, { "cell_type": "code", "execution_count": 66, "metadata": {}, "outputs": [], "source": [ "model.save_pretrained(\"fine_tuned_model\")" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Original: Takbo takbo ang mga bata sa labas\n", "Corrected: Takbo ng takbo ang mga bata sa labas\n", "\n" ] } ], "source": [ "import torch\n", "from transformers import AutoTokenizer, AutoModelForMaskedLM\n", "\n", "# Load the fine-tuned model\n", "model = AutoModelForMaskedLM.from_pretrained(\"fine_tuned_model\")\n", "\n", "# Initialize tokenizer\n", "tokenizer = AutoTokenizer.from_pretrained(\"fine_tuned_model\")\n", "\n", "# Example new data\n", "new_data = [\n", " \"Takbo takbo ang mga bata sa labas\"\n", "]\n", "\n", "# Tokenize the new data\n", "tokenized_data = tokenizer(new_data, return_tensors=\"pt\", padding=True, truncation=True)\n", "\n", "# Pass the tokenized data through the model to get predictions\n", "with torch.no_grad():\n", " outputs = model(**tokenized_data)\n", "\n", "# Decode the predicted token IDs back to text\n", "predicted_texts = tokenizer.batch_decode(outputs.logits.argmax(dim=-1))\n", "\n", "# Print the original sentences and their corrected versions\n", "for original, corrected in zip(new_data, predicted_texts):\n", " print(f\"Original: {original}\")\n", " print(f\"Corrected: {corrected}\")\n", " print()\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.1" } }, "nbformat": 4, "nbformat_minor": 2 }