{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "0b473da4-70d7-482d-a44a-445835be7d6e",
   "metadata": {},
   "source": [
    "### Test using finetuned Llama model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b6f6f816-07eb-430b-979f-60e91b009dd0",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from trl import SFTTrainer\n",
    "from datasets import load_dataset\n",
    "from transformers import TrainingArguments, TextStreamer\n",
    "from unsloth.chat_templates import get_chat_template\n",
    "from unsloth import FastLanguageModel, is_bfloat16_supported\n",
    "import json\n",
    "import re\n",
    "from tqdm import tqdm\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import ast"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c13e95c0-4b35-4a41-bc7c-dfd0f0493f74",
   "metadata": {},
   "source": [
    "#### Define path, system, and model path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8c2ab96b-2a19-4666-84bf-29789f5b9ef6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Dictionary defining the system role and its instruction for categorization\n",
    "role_dict = {\n",
    "    'role': 'system',\n",
    "    'content': 'Categorize the paragraph given. Select one or more categories from \"synthesis condition\" and \"property\". If the paragraph does not fit into either of these categories, choose \"else\". \"property\" paragraph must include specific numerical value of the property. ex) surface area of 2500 m2/g'\n",
    "}\n",
    "\n",
    "model_path = \"text_categorize_1/\"\n",
    "\n",
    "# Load the CSV files containing categorized results\n",
    "d1 = pd.read_csv('/home/users/seunghh/l2m3_revision/data/categorize_results_else.csv', encoding='utf-8')\n",
    "d2 = pd.read_csv('/home/users/seunghh/l2m3_revision/data/categorize_results.csv', encoding='utf-8')\n",
    "d2.columns = d1.columns\n",
    "df = pd.concat([d1, d2])\n",
    "\n",
    "# Extract the classification column as a list\n",
    "true_list = df['Classification'].tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "49167921-9aae-47d3-aa07-9ec435ccf9e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def read_files(file_name):\n",
    "    \"\"\"\n",
    "    Read a JSONL file line-by-line and parse each line into a JSON object.\n",
    "    Return a list of these objects.\n",
    "    \"\"\"\n",
    "    data_list = []\n",
    "    with open(file_name, 'r') as g:\n",
    "        for line_number, line in enumerate(g, start=1):\n",
    "            line = line.strip()  # Remove whitespace\n",
    "            if line:  # Skip empty lines\n",
    "                try:\n",
    "                    json_obj = json.loads(line)\n",
    "                    data_list.append(json_obj)\n",
    "                except json.JSONDecodeError as e:\n",
    "                    # Print an error message if there's a problem parsing a line\n",
    "                    print(f\"Error parsing JSON on line {line_number}: {e}\")    \n",
    "    return data_list\n",
    "\n",
    "def parse_template_to_dicts(template_text):\n",
    "    \"\"\"\n",
    "    Given a template text that contains messages separated by <|im_start|> tokens,\n",
    "    split them and parse each message into a dict with 'role' and 'content'.\n",
    "    \"\"\"\n",
    "    # Split by the special token\n",
    "    parts = re.split(r\"<\\|im_start\\|>\", template_text)\n",
    "    messages = []\n",
    "    for part in parts:\n",
    "        part = part.strip()\n",
    "        if not part:\n",
    "            continue\n",
    "        # The first line is the role, the rest is the content\n",
    "        lines = part.split(\"\\n\", 1)\n",
    "        role = lines[0].strip()\n",
    "        content = lines[1].strip() if len(lines) > 1 else \"\"\n",
    "        messages.append({\"role\": role, \"content\": content})\n",
    "    return messages\n",
    "\n",
    "def make_input_data(df):\n",
    "    \"\"\"\n",
    "    Convert each row of the dataframe into a message format required for the model.\n",
    "    This uses the global role_dict for the system message and adds a 'user' message \n",
    "    containing the text from the row.\n",
    "    \"\"\"\n",
    "    results = []\n",
    "    for i, row in df.iterrows():\n",
    "        tmp = [role_dict]\n",
    "        user_tmp = {\n",
    "            'role': 'user',\n",
    "            'content': row['Clean_Text']\n",
    "        }\n",
    "        tmp.append(user_tmp)\n",
    "        results.append(tmp)\n",
    "    return results\n",
    "\n",
    "def make_clean_data(true_list):\n",
    "    \"\"\"\n",
    "    Clean the classification labels by parsing and stripping unnecessary characters,\n",
    "    making them into a cleaner, more standardized format.\n",
    "    \"\"\"\n",
    "    true_clean_list = []\n",
    "    for pred in tqdm(true_list):\n",
    "        tmp_list = []\n",
    "        _pred = pred.split(',')\n",
    "        \n",
    "        for p in _pred:\n",
    "            p = p.strip()\n",
    "            pred_clean = p.strip('[\"').strip('\"]')\n",
    "            pred_clean = pred_clean.replace(\"',\", \"'\")\n",
    "            pred_clean = pred_clean.replace(\"\\\\'\", \"'\")\n",
    "            \n",
    "            # Remove trailing commas and extra quotes\n",
    "            pred_clean = pred_clean.rstrip(\",\")\n",
    "            pred_clean = pred_clean.strip(\"'\\\"\")\n",
    "            pred_clean = f\"'{pred_clean}'\"\n",
    "    \n",
    "            # Safely evaluate the cleaned string into a Python object (e.g., a string)\n",
    "            pred_list = ast.literal_eval(pred_clean)\n",
    "            if pred_list:\n",
    "                tmp_list.append(pred_list)\n",
    "        \n",
    "        true_clean_list.append(tmp_list)\n",
    "    return true_clean_list\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1f016e65-7b77-4d73-a40f-3237a15d7655",
   "metadata": {},
   "source": [
    "#### Create Model and infer for test dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aa07c851-c1e6-431e-a146-fde17345e5a5",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Create input data for inference\n",
    "data = make_input_data(df)\n",
    "\n",
    "# Load the trained model and tokenizer for inference\n",
    "# Since the model was saved in 16-bit precision, we do not load it in 4-bit format\n",
    "model, tokenizer = FastLanguageModel.from_pretrained(\n",
    "    model_path, \n",
    "    load_in_4bit=False,\n",
    "    dtype=None,\n",
    ")\n",
    "\n",
    "# Initialize the model for inference\n",
    "inference_model = FastLanguageModel.for_inference(model)\n",
    "\n",
    "predicted_list = []\n",
    "for dt in tqdm(data):\n",
    "    messages = dt\n",
    "    # Apply the chat template to get tokenized input suitable for model generation\n",
    "    inputs = tokenizer.apply_chat_template(\n",
    "        messages,\n",
    "        tokenize=True,\n",
    "        add_generation_prompt=True,\n",
    "        return_tensors=\"pt\",\n",
    "    ).to(\"cuda\")\n",
    "    \n",
    "    # Use a text streamer to handle the generated output\n",
    "    text_streamer = TextStreamer(tokenizer)\n",
    "    \n",
    "    # Generate predictions from the model\n",
    "    outputs = inference_model.generate(\n",
    "        input_ids=inputs, \n",
    "        streamer=text_streamer, \n",
    "        max_new_tokens=128, \n",
    "        use_cache=True\n",
    "    )    \n",
    "    \n",
    "    # Decode the output tokens into text\n",
    "    decoded_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
    "    new_parse = parse_template_to_dicts(decoded_text)\n",
    "    predicted_list.append(new_parse[-1]['content'])\n",
    "\n",
    "# Clean both the true and predicted classifications\n",
    "true_clean_list = make_clean_data(true_list)\n",
    "predicted_clean_list = make_clean_data(predicted_list)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
