{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Assess the performance of a LoRAHub model on RE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-05-18T12:42:53.291342Z",
     "iopub.status.busy": "2024-05-18T12:42:53.291022Z",
     "iopub.status.idle": "2024-05-18T12:42:56.227183Z",
     "shell.execute_reply": "2024-05-18T12:42:56.226700Z",
     "shell.execute_reply.started": "2024-05-18T12:42:53.291321Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "from lorahub.algorithm import *\n",
    "import json\n",
    "\n",
    "import sys, os \n",
    "def get_ds_list(root):\n",
    "    task = [\"ner\", \"re\", \"ee\"]\n",
    "\n",
    "    ner = []\n",
    "    re = []\n",
    "    ee = []\n",
    "\n",
    "    for t in task:\n",
    "        path = root + t + \"/\"\n",
    "        for ds in os.listdir(path):\n",
    "            if t == \"ner\":\n",
    "                ner.append(path + ds)\n",
    "            elif t == \"re\":\n",
    "                re.append(path + ds)\n",
    "            elif t == \"ee\":\n",
    "                ee.append(path + ds)\n",
    "    return ner, re, ee\n",
    "\n",
    "def get_lora_list(root, task=\"all\"):\n",
    "    ner, re, ee = get_ds_list(root)\n",
    "\n",
    "    if task == \"ner\":\n",
    "        return ner\n",
    "    elif task == \"re\":\n",
    "        return re\n",
    "    elif task == \"ee\":\n",
    "        return ee\n",
    "    else:\n",
    "        return ner + re + ee\n",
    "\n",
    "# print(get_lora_list(\"/mnt/workspace/save/t5_xl/\"))\n",
    "\n",
    "\n",
    "\n",
    "def get_examples_for_learning(p):\n",
    "    res = []\n",
    "    for path in p:\n",
    "        with open(path, 'r') as f:\n",
    "              js = json.load(f)\n",
    "        res.extend(js)\n",
    "    return res\n",
    "\n",
    "\n",
    "def get_examples_for_inference(p):\n",
    "    res = []\n",
    "    for path in p:\n",
    "        with open(path, 'r') as f:\n",
    "              js = json.load(f)\n",
    "        res.extend(js)\n",
    "    return res\n",
    "\n",
    "\n",
    "def get_lora_module_list(task):\n",
    "    pefix = \"/mnt/save/t_xl/\"\n",
    "\n",
    "    all_loras = ['re_task/duie_re', 'ee_task/ace05_tuple_ee', 'ee_task/casie_tuple_ee', 'ee_task/duee_tuple_ee', 'ee_task/genia_tuple_ee',\n",
    "                 'ee_task/phee_tuple_ee', 'ner_task/ace_ner', 'ner_task/cnerta_ner', 'ner_task/conll_ner',\n",
    "                 'ner_task/multinerd_ner', 're_task/conll04_re', 're_task/gids_re',\n",
    "                 're_task/nyt11_re']\n",
    "\n",
    "    ner_loras = [\"ace05\",  \"conll03\",  \"mit-movie\",  \"mit-restaurant\",  \"ontonotes\"  \"wnut17\"]\n",
    "\n",
    "    re_loras = ['re_task/duie_re', 're_task/conll04_re',  're_task/gids_re', 're_task/nyt11_re']\n",
    "\n",
    "    ee_loras = ['ee_task/duee_tuple_ee', 'ee_task/ace05_tuple_ee', 'ee_task/casie_tuple_ee', 'ee_task/genia_tuple_ee',\n",
    "                'ee_task/phee_tuple_ee']\n",
    "\n",
    "    if task == 'ner':\n",
    "        p = ner_loras\n",
    "    elif task == 're':\n",
    "        p = re_loras\n",
    "    elif task == 'ee':\n",
    "        p = ee_loras\n",
    "    elif task == 'all':\n",
    "        p = all_loras\n",
    "    else:\n",
    "        raise ValueError(\"task must be one of 'ner', 're', 'ee', 'all'\")\n",
    "\n",
    "    return [pefix + x for x in p]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecutionIndicator": {
     "show": false
    },
    "execution": {
     "iopub.execute_input": "2024-05-18T12:42:56.869890Z",
     "iopub.status.busy": "2024-05-18T12:42:56.869436Z",
     "iopub.status.idle": "2024-05-18T12:50:38.436103Z",
     "shell.execute_reply": "2024-05-18T12:50:38.435468Z",
     "shell.execute_reply.started": "2024-05-18T12:42:56.869870Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "modules: ['/mnt/workspace/save/t5_xl/ner/wnut17', '/mnt/workspace/save/t5_xl/ner/mit-restaurant', '/mnt/workspace/save/t5_xl/ner/ace05', '/mnt/workspace/save/t5_xl/ner/conll03', '/mnt/workspace/save/t5_xl/ner/mit-movie', '/mnt/workspace/save/t5_xl/re/conll04', '/mnt/workspace/save/t5_xl/re/scierc', '/mnt/workspace/save/t5_xl/re/nyt11', '/mnt/workspace/save/t5_xl/re/gids', '/mnt/workspace/save/t5_xl/ee/phee', '/mnt/workspace/save/t5_xl/ee/casie', '/mnt/workspace/save/t5_xl/ee/ace05', '/mnt/workspace/save/t5_xl/ee/genia']\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading checkpoint shards: 100%|██████████| 2/2 [00:01<00:00,  1.54it/s]\n",
      "The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. \n",
      "The tokenizer class you load from this checkpoint is 'T5Tokenizer'. \n",
      "The class this function is called from is 'PreTrainedTokenizerFast'.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Begin to load lora modules\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "0it [00:00, ?it/s]/usr/local/lib/python3.10/dist-packages/peft/utils/save_and_load.py:195: UserWarning: Could not find a config file in /mnt/workspace/model_save/t5_xl - will assume that the vocabulary was not modified.\n",
      "  warnings.warn(\n",
      "1it [00:00,  5.25it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Loading /mnt/workspace/save/t5_xl/ner/wnut17 ...\n",
      "> Loading /mnt/workspace/save/t5_xl/ner/mit-restaurant ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "3it [00:00,  5.15it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Loading /mnt/workspace/save/t5_xl/ner/ace05 ...\n",
      "> Loading /mnt/workspace/save/t5_xl/ner/conll03 ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "5it [00:00,  5.26it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Loading /mnt/workspace/save/t5_xl/ner/mit-movie ...\n",
      "> Loading /mnt/workspace/save/t5_xl/re/conll04 ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "7it [00:01,  5.28it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Loading /mnt/workspace/save/t5_xl/re/scierc ...\n",
      "> Loading /mnt/workspace/save/t5_xl/re/nyt11 ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "9it [00:01,  5.29it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Loading /mnt/workspace/save/t5_xl/re/gids ...\n",
      "> Loading /mnt/workspace/save/t5_xl/ee/phee ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "11it [00:02,  5.22it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Loading /mnt/workspace/save/t5_xl/ee/casie ...\n",
      "> Loading /mnt/workspace/save/t5_xl/ee/ace05 ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "12it [00:02,  5.19it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Loading /mnt/workspace/save/t5_xl/ee/genia ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "13it [00:02,  4.99it/s]\n",
      "Running tokenizer on dataset: 100%|██████████| 288/288 [00:00<00:00, 3533.30 examples/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dataset({\n",
      "    features: ['input_ids', 'attention_mask', 'labels'],\n",
      "    num_rows: 288\n",
      "})\n",
      "> Begin to perform gradient-free optimization ...\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.10it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.2503526475694444\n",
      "39 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:10,  3.27it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.18713086772168802\n",
      "38 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.12328517294337607\n",
      "37 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.10152660924145299\n",
      "36 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.1229538094284188\n",
      "35 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.11375784588675214\n",
      "34 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.06978540665064104\n",
      "33 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.06511329585670406\n",
      "32 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.0604683117988782\n",
      "31 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.059539690588274574\n",
      "30 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.06990935496794873\n",
      "29 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.06450435279780983\n",
      "28 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.06209346412593483\n",
      "27 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.08347272302350428\n",
      "26 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.07673233116976515\n",
      "25 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.05958557185431207\n",
      "24 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.06119212097382846\n",
      "23 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.05650175718833396\n",
      "22 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.05325216484074662\n",
      "21 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.05285196455771238\n",
      "20 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.061063304566804616\n",
      "19 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.05361484959313244\n",
      "18 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.05788866642638028\n",
      "17 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.04942860356407017\n",
      "16 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.04500116396990451\n",
      "15 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.046837576854731076\n",
      "14 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.05032005641959365\n",
      "13 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.04622112800490771\n",
      "12 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.04700946964151044\n",
      "11 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.044269031567428785\n",
      "10 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.04288419716250924\n",
      "9 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.04162772031680161\n",
      "8 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.04244469165455694\n",
      "7 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.04267351245969233\n",
      "6 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.04081348078076749\n",
      "5 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.039296401796606525\n",
      "4 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.03982258059007773\n",
      "3 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.03759653327845071\n",
      "2 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.03765283573563638\n",
      "1 remaining budget and 0 running jobs\n",
      "Launching 1 jobs with new suggestions\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "36it [00:11,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updating fitness with value 0.037391934995628165\n",
      "0 remaining budget and 0 running jobs\n",
      "module_weights: [ 0.03110189  0.22246495 -0.0247352   0.01564039  0.02715719  1.04962912\n",
      "  0.37049855  0.12635716  0.13081292  0.02203337 -0.02234231  0.13655625\n",
      "  0.00429119]\n"
     ]
    }
   ],
   "source": [
    "# modules = get_lora_module_list(\"all\")\n",
    "# modules = [\n",
    "#     \"/mnt/workspace/t5_chat/save/t5-large/re_task/mix\",\n",
    "#     \"/mnt/workspace/t5_chat/save/t5-large/re_task/conll04\",\n",
    "#     \"/mnt/workspace/t5_chat/save/t5-large/ner_task/mix\",\n",
    "#     \"/mnt/workspace/t5_chat/save/t5-large/ner_task/mix-else/checkpoint-14000/\"\n",
    "# ]\n",
    "\n",
    "modules = get_lora_list(\"/mnt/workspace/save/t5_xl/\",\"all\")\n",
    "\n",
    "print(\"modules:\", modules)\n",
    "\n",
    "ds_train_path = [\"/mnt/workspace/data/RE/conll04_RE/train.json\"]\n",
    "ds_test_path = [\"/mnt/workspace/data/RE/conll04_RE/test.json\"]\n",
    "\n",
    "# construct input list and output list\n",
    "example_inputs, examples_outputs = [], []\n",
    "for example in get_examples_for_learning(ds_train_path):\n",
    "    example_inputs.append(example[\"prompt\"])\n",
    "    examples_outputs.append(example[\"response\"])\n",
    "\n",
    "# perform LoRAHub learning\n",
    "module_weights, model, tokenizer = lorahub_learning(lora_module_list=modules,\n",
    "                                                    example_inputs=example_inputs,\n",
    "                                                    example_outputs=examples_outputs,\n",
    "                                                    max_inference_step=40,\n",
    "                                                    batch_size=8,\n",
    "                                                   )\n",
    "\n",
    "print(\"module_weights:\", module_weights)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-05-18T12:50:38.438071Z",
     "iopub.status.busy": "2024-05-18T12:50:38.437471Z",
     "iopub.status.idle": "2024-05-18T12:50:38.441956Z",
     "shell.execute_reply": "2024-05-18T12:50:38.441492Z",
     "shell.execute_reply.started": "2024-05-18T12:50:38.438039Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/mnt/workspace/save/t5_xl/ner/wnut17 : \t\t\t0.0311018884332118\n",
      "/mnt/workspace/save/t5_xl/ner/mit-restaurant : \t\t\t0.22246494958674728\n",
      "/mnt/workspace/save/t5_xl/ner/ace05 : \t\t\t-0.02473519870602166\n",
      "/mnt/workspace/save/t5_xl/ner/conll03 : \t\t\t0.015640390831999917\n",
      "/mnt/workspace/save/t5_xl/ner/mit-movie : \t\t\t0.02715719242751352\n",
      "/mnt/workspace/save/t5_xl/re/conll04 : \t\t\t1.049629122882777\n",
      "/mnt/workspace/save/t5_xl/re/scierc : \t\t\t0.37049855324822817\n",
      "/mnt/workspace/save/t5_xl/re/nyt11 : \t\t\t0.12635715585938673\n",
      "/mnt/workspace/save/t5_xl/re/gids : \t\t\t0.13081291570959402\n",
      "/mnt/workspace/save/t5_xl/ee/phee : \t\t\t0.022033371670271883\n",
      "/mnt/workspace/save/t5_xl/ee/casie : \t\t\t-0.0223423113075498\n",
      "/mnt/workspace/save/t5_xl/ee/ace05 : \t\t\t0.13655625356007614\n",
      "/mnt/workspace/save/t5_xl/ee/genia : \t\t\t0.0042911883031399\n"
     ]
    }
   ],
   "source": [
    "for m, w in zip(modules, module_weights):\n",
    "    print(f\"{m} : \\t\\t\\t{w}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-05-18T12:01:10.108506Z",
     "iopub.status.busy": "2024-05-18T12:01:10.108168Z",
     "iopub.status.idle": "2024-05-18T12:01:23.334198Z",
     "shell.execute_reply": "2024-05-18T12:01:23.333662Z",
     "shell.execute_reply.started": "2024-05-18T12:01:10.108487Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2024-05-18 20:01:10,137] [INFO] [real_accelerator.py:203:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n",
      "\u001b[93m [WARNING] \u001b[0m async_io requires the dev libaio .so object and headers but these were not found.\n",
      "\u001b[93m [WARNING] \u001b[0m async_io: please install the libaio-dev package with apt\n",
      "\u001b[93m [WARNING] \u001b[0m If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found.\n",
      "\u001b[93m [WARNING] \u001b[0m Please specify the CUTLASS repo directory as environment variable $CUTLASS_PATH\n",
      "\u001b[93m [WARNING] \u001b[0m sparse_attn requires a torch version >= 1.5 and < 2.0 but detected 2.2\n",
      "\u001b[93m [WARNING] \u001b[0m using untested triton version (2.2.0), only 1.0.0 is known to be compatible\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/bin/ld: cannot find -laio: No such file or directory\n",
      "collect2: error: ld returned 1 exit status\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "('/mnt/workspace/save/comb_conll_re/tokenizer_config.json',\n",
       " '/mnt/workspace/save/comb_conll_re/special_tokens_map.json',\n",
       " '/mnt/workspace/save/comb_conll_re/tokenizer.json')"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "save_path = \"/mnt/workspace/save/comb_conll_re\"\n",
    "model.save_pretrained(save_path)\n",
    "tokenizer.save_pretrained(save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## performance of lorahub model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-05-18T12:50:38.443227Z",
     "iopub.status.busy": "2024-05-18T12:50:38.442755Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Running tokenizer on dataset: 100%|██████████| 288/288 [00:00<00:00, 4178.14 examples/s]\n",
      "  0%|          | 0/18 [00:00<?, ?it/s]/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_base.py:2707: UserWarning: `max_length` is ignored when `padding`=`True` and there is no truncation strategy. To pad to max length, use `padding='max_length'`.\n",
      "  warnings.warn(\n",
      " 67%|██████▋   | 12/18 [00:15<00:08,  1.35s/it]"
     ]
    }
   ],
   "source": [
    "example_inputs, examples_outputs = [], []\n",
    "with open(\"/mnt/workspace/data/RE/conll04_RE/test.json\", \"r\", encoding=\"utf-8\") as f:\n",
    "        js = json.load(f)\n",
    "for example in js:\n",
    "    example_inputs.append(example[\"prompt\"])\n",
    "    examples_outputs.append(example[\"response\"])\n",
    "\n",
    "example_predictions1, perf = assess_task_performance(example_inputs=example_inputs,\n",
    "                                                    model_or_name_path=model,\n",
    "                                                    tokenizer_or_tokenizer_path=tokenizer,\n",
    "                                                    batch_size=16,\n",
    "                                                    # can set as None if you do not have the ground truth\n",
    "                                                    example_outputs=examples_outputs,\n",
    "                                                    task = \"re\"\n",
    "                                                    )\n",
    "# print(\"example_predictions:\", example_predictions)\n",
    "print(\"task accuracy:\", perf)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "def ee_calculate_metrics(predicted: List[str], actual: List[str]):\n",
    "    def ee_parse_tuple_string(tuple_string: str) -> List[Tuple[str, str, str]]:\n",
    "        tuple_string = tuple_string.replace('，',',').replace('（','(').replace('）', ')').replace(\"：\", \":\").replace(\" \", \"\").replace(\"the\", \"\")\n",
    "        pattern = re.compile(r'\\(([^,]*),? ?([^,]*)?,? ?([^)]*)?\\)')\n",
    "        tuples = pattern.findall(tuple_string)\n",
    "        tuples = set(('EMPTY', 'EMPTY', 'EMPTY') if t == ('', '', '') else t for t in tuples)\n",
    "        return tuples\n",
    "\n",
    "    tp = 0\n",
    "    fp = 0\n",
    "    fn = 0\n",
    "\n",
    "    for p, a in zip(predicted, actual):\n",
    "        predicted_tuples = ee_parse_tuple_string(p)\n",
    "        actual_tuples = ee_parse_tuple_string(a)\n",
    "\n",
    "        tp_temp = 0\n",
    "        for pt in predicted_tuples:\n",
    "            for at in actual_tuples:\n",
    "                if (pt[0] in at[0] and pt[1] in at[1] and pt[2] in at[2]) or (at[0] in pt[0] and at[1] in pt[1] and at[2] in pt[2]):\n",
    "                    tp_temp += 1\n",
    "                    break\n",
    "\n",
    "        tp += tp_temp\n",
    "        fp += len(predicted_tuples) - tp_temp\n",
    "        fn += len(actual_tuples) - tp_temp\n",
    "\n",
    "    precision = tp / (tp + fp) if (tp + fp) > 0 else 1\n",
    "    recall = tp / (tp + fn) if (tp + fn) > 0 else 1\n",
    "    f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 1\n",
    "    \n",
    "    # print(tp, fp, fn)\n",
    "\n",
    "    return {\"precision\": precision, \"recall\": recall, \"f1\": f1}\n",
    "print(ee_calculate_metrics(example_predictions1, examples_outputs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-05-18T12:07:46.652395Z",
     "iopub.status.busy": "2024-05-18T12:07:46.652062Z",
     "iopub.status.idle": "2024-05-18T12:07:47.992714Z",
     "shell.execute_reply": "2024-05-18T12:07:47.992209Z",
     "shell.execute_reply.started": "2024-05-18T12:07:46.652369Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "import torch\n",
    "torch.cuda.empty_cache()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## performance of only lora model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-05-18T12:32:00.228514Z",
     "iopub.status.busy": "2024-05-18T12:32:00.228028Z",
     "iopub.status.idle": "2024-05-18T12:32:26.201977Z",
     "shell.execute_reply": "2024-05-18T12:32:26.201489Z",
     "shell.execute_reply.started": "2024-05-18T12:32:00.228494Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading checkpoint shards: 100%|██████████| 2/2 [00:01<00:00,  1.47it/s]\n",
      "Running tokenizer on dataset: 100%|██████████| 288/288 [00:00<00:00, 4295.26 examples/s]\n",
      "  0%|          | 0/9 [00:00<?, ?it/s]/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_base.py:2707: UserWarning: `max_length` is ignored when `padding`=`True` and there is no truncation strategy. To pad to max length, use `padding='max_length'`.\n",
      "  warnings.warn(\n",
      "100%|██████████| 9/9 [00:18<00:00,  2.08s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "task accuracy: {'precision': 0.5533980582524272, 'recall': 0.4453125, 'f1': 0.4935064935064935}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "from lorahub.algorithm import *\n",
    "import json\n",
    "from transformers import AutoTokenizer, AutoModelForSeq2SeqLM\n",
    "import peft\n",
    "import torch\n",
    "\n",
    "example_inputs, examples_outputs = [], []\n",
    "with open(\"/mnt/workspace/data/RE/conll04_RE/test.json\", \"r\", encoding=\"utf-8\") as f:\n",
    "        js = json.load(f)\n",
    "for example in js:\n",
    "    example_inputs.append(example[\"prompt\"])\n",
    "    examples_outputs.append(example[\"response\"])\n",
    "\n",
    "lora_path = \"/mnt/workspace/save/t5_xl/re/conll04/\"\n",
    "model_path = \"/mnt/workspace/model_save/t5_xl\"\n",
    "device = \"cuda\"\n",
    "\n",
    "model = AutoModelForSeq2SeqLM.from_pretrained(model_path, torch_dtype = torch.bfloat16).to(\"cuda\")\n",
    "tokenizer = AutoTokenizer.from_pretrained(lora_path)\n",
    "\n",
    "model.load_adapter(lora_path,adapter_name=\"duie\")\n",
    "model.set_adapter(\"duie\")\n",
    "\n",
    "example_predictions2, perf = assess_task_performance(example_inputs=example_inputs,\n",
    "                                                    model_or_name_path=model,\n",
    "                                                    tokenizer_or_tokenizer_path=tokenizer,\n",
    "                                                    batch_size=32,\n",
    "                                                    # can set as None if you do not have the ground truth\n",
    "                                                    example_outputs=examples_outputs,\n",
    "                                                    task = \"re\"\n",
    "                                                    )\n",
    "# print(\"example_predictions:\", example_predictions)\n",
    "print(\"task accuracy:\", perf)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-05-18T12:32:29.017388Z",
     "iopub.status.busy": "2024-05-18T12:32:29.017027Z",
     "iopub.status.idle": "2024-05-18T12:32:29.025608Z",
     "shell.execute_reply": "2024-05-18T12:32:29.025061Z",
     "shell.execute_reply.started": "2024-05-18T12:32:29.017368Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'precision': 0.6636363636363637, 'recall': 0.538083538083538, 'f1': 0.5943012211668929}\n"
     ]
    }
   ],
   "source": [
    "def ee_calculate_metrics(predicted: List[str], actual: List[str]):\n",
    "    def ee_parse_tuple_string(tuple_string: str) -> List[Tuple[str, str, str]]:\n",
    "        tuple_string = tuple_string.replace('，',',').replace('（','(').replace('）', ')').replace(\"：\", \":\").replace(\" \", \"\").replace(\"the\", \"\")\n",
    "        pattern = re.compile(r'\\(([^,]*),? ?([^,]*)?,? ?([^)]*)?\\)')\n",
    "        tuples = pattern.findall(tuple_string)\n",
    "        tuples = set(('EMPTY', 'EMPTY', 'EMPTY') if t == ('', '', '') else t for t in tuples)\n",
    "        return tuples\n",
    "\n",
    "    tp = 0\n",
    "    fp = 0\n",
    "    fn = 0\n",
    "\n",
    "    for p, a in zip(predicted, actual):\n",
    "        predicted_tuples = ee_parse_tuple_string(p)\n",
    "        actual_tuples = ee_parse_tuple_string(a)\n",
    "\n",
    "        tp_temp = 0\n",
    "        for pt in predicted_tuples:\n",
    "            for at in actual_tuples:\n",
    "                if (pt[0] in at[0] and pt[1] in at[1] and pt[2] in at[2]) or (at[0] in pt[0] and at[1] in pt[1] and at[2] in pt[2]):\n",
    "                    tp_temp += 1\n",
    "                    break\n",
    "\n",
    "        tp += tp_temp\n",
    "        fp += len(predicted_tuples) - tp_temp\n",
    "        fn += len(actual_tuples) - tp_temp\n",
    "\n",
    "    precision = tp / (tp + fp) if (tp + fp) > 0 else 1\n",
    "    recall = tp / (tp + fn) if (tp + fn) > 0 else 1\n",
    "    f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 1\n",
    "    \n",
    "    # print(tp, fp, fn)\n",
    "\n",
    "    return {\"precision\": precision, \"recall\": recall, \"f1\": f1}\n",
    "print(ee_calculate_metrics(example_predictions2, examples_outputs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
