{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import wandb\n",
    "from datetime import datetime\n",
    "import shutil\n",
    "import importlib\n",
    "import os\n",
    "from importlib.machinery import SourceFileLoader\n",
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\n",
    "import random\n",
    "from torch.nn.utils.rnn import pad_sequence\n",
    "from torch import nn\n",
    "\n",
    "from GPUtil import showUtilization as gpu_usage\n",
    "from numba import cuda"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import (\n",
    "    MODEL_WITH_LM_HEAD_MAPPING,\n",
    "    WEIGHTS_NAME,\n",
    "    AdamW,\n",
    "    AutoConfig,\n",
    "    AutoModelWithLMHead,\n",
    "    AutoTokenizer,\n",
    "    PreTrainedModel,\n",
    "    PreTrainedTokenizer,\n",
    "    AutoModel,\n",
    "    GPT2LMHeadModel,\n",
    "    AutoModelForCausalLM,\n",
    "    get_linear_schedule_with_warmup,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "6"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "SPECIAL_TOKENS = { \n",
    "    \"<sp_1>\": \"<sp_1>\",\n",
    "    \"</sp_1>\": \"</sp_1>\",\n",
    "    \"<sp_2>\": \"<sp_2>\",\n",
    "    \"</sp_2>\": \"</sp_2>\",\n",
    "    \"<persona>\": \"<persona>\",\n",
    "    \"</persona>\": \"</persona>\",\n",
    "}\n",
    "config = AutoConfig.from_pretrained(\"microsoft/DialoGPT-medium\")\n",
    "# config.n_positions = 512 \n",
    "# config.n_embd = 1024\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\n",
    "tokenizer.add_tokens(list(SPECIAL_TOKENS.values()), special_tokens=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BaseExperiment:\n",
    "    def __init__(self, \n",
    "        model=None, \n",
    "        tokenizer=None, \n",
    "        dataloader_train=None,\n",
    "        dataloader_valid=None,\n",
    "        dataloader_test=None,\n",
    "        loss_func_class=None,\n",
    "        estimate_func_class=None,\n",
    "        experiment_config=None,\n",
    "        optimizer_class=None,\n",
    "        sheduler_class=None,\n",
    "        project_name=None,\n",
    "        notebook_name=None,\n",
    "        name_run=\"\",\n",
    "        model_description=\"\",\n",
    "        do_unit_tests=True,\n",
    "        pretrained_model_name=None\n",
    "        ): \n",
    "        assert notebook_name != None, f\"notebook_name should be valid filename, but get {notebook_name}\"\n",
    "\n",
    "        # datasets\n",
    "        self.dataloader_train = dataloader_train\n",
    "        self.dataloader_valid = dataloader_valid\n",
    "        self.dataloader_test = dataloader_test\n",
    "        \n",
    "        # wandb\n",
    "        self.notebook_name = notebook_name\n",
    "        self.project_name = project_name \n",
    "        self.experiment_config = experiment_config\n",
    "        self.wandb_run = None\n",
    "        self.name_run = name_run\n",
    "        self.model_description = model_description\n",
    "        self.model_name = \"pytorch_model\"\n",
    "        self.pure_model_name = \"pytorch_model\"\n",
    "        self.model_artifact = None\n",
    "        self.pretrained_model_name = pretrained_model_name\n",
    "\n",
    "        self.optimizer_class = optimizer_class\n",
    "        self.sheduler_class = sheduler_class\n",
    "        self.loss_func_class = loss_func_class\n",
    "        self.estimate_func_class = estimate_func_class\n",
    "\n",
    "        self.model = model\n",
    "        self.tokenizer = tokenizer\n",
    "        self.optimizer = None\n",
    "        self.sheduler = None\n",
    "        self.loss_func = None\n",
    "        self.estimate_func = None\n",
    "        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "        # self.device = torch.device('cpu')\n",
    "        print(f\"Using device {self.device}\")\n",
    "\n",
    "        # prepare for experiment\n",
    "        self.setup()\n",
    "        if do_unit_tests:\n",
    "            self.unit_tests()\n",
    "\n",
    "    def setup(self):\n",
    "        self.model.to(self.device)\n",
    "        # - Freeze selective layers:\n",
    "        # - Freeze all layers except last n:\n",
    "        if self.experiment_config['freeze_layers'] > 0:\n",
    "            for parameter in self.model.parameters():\n",
    "                parameter.requires_grad = False\n",
    "\n",
    "            for i, m in enumerate(self.model.transformer.h):        \n",
    "                #Only un-freeze the last n transformer blocks\n",
    "                if i+1 > 12 - self.experiment_config['freeze_layers']:\n",
    "                    for parameter in m.parameters():\n",
    "                        parameter.requires_grad = True \n",
    "\n",
    "            for parameter in self.model.transformer.ln_f.parameters():        \n",
    "                parameter.requires_grad = True\n",
    "\n",
    "            for parameter in self.model.lm_head.parameters():        \n",
    "                parameter.requires_grad = True\n",
    "        if self.experiment_config['do_weight_decay']:\n",
    "            # Prepare optimizer and schedule (linear warmup and decay)\n",
    "            no_decay = [\"bias\", \"LayerNorm.weight\"]\n",
    "            optimizer_grouped_parameters = [\n",
    "                {\n",
    "                    \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n",
    "                    \"weight_decay\": self.experiment_config['weight_decay'],\n",
    "                },\n",
    "                {\"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n",
    "            ]\n",
    "            self.optimizer = self.optimizer_class(optimizer_grouped_parameters, **self.experiment_config['optimizer'])\n",
    "        else:\n",
    "            self.optimizer = self.optimizer_class(self.model.parameters(), **self.experiment_config['optimizer'])\n",
    "\n",
    "        if self.sheduler_class != None:\n",
    "            # num_training_steps = len(self.dataloader_train) // self.experiment_config[\"sheduler\"] * self.experiment_config['epochs']\n",
    "            self.sheduler = self.sheduler_class(\n",
    "                self.optimizer, \n",
    "                **self.experiment_config['sheduler']\n",
    "                )\n",
    "\n",
    "        self.loss_func = self.loss_func_class()\n",
    "        self.estimate_func = self.estimate_func_class()\n",
    "\n",
    "        # setup wandb\n",
    "        # save model structure and weights to wandb\n",
    "        self.model_artifact = wandb.Artifact(\n",
    "            self.name_run, type=\"model\",\n",
    "            description=self.model_description,\n",
    "            metadata=self.experiment_config)\n",
    "\n",
    "\n",
    "    def get_date(self):\n",
    "        now = datetime.now()\n",
    "        date_time = now.strftime(\"%m_%d_%Y__%H:%M:%S\")\n",
    "        return date_time\n",
    "\n",
    "    def unit_tests(self):\n",
    "        # test training\n",
    "        X, y = next(iter(self.dataloader_train))\n",
    "        X, y = X.to(self.device), y.to(self.device)\n",
    "\n",
    "        loss = self.model(X, labels=X).loss\n",
    "        self.optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        self.optimizer.step()\n",
    "\n",
    "        # test valid\n",
    "        X, y = next(iter(self.dataloader_valid))\n",
    "        X, y = X.to(self.device), y.to(self.device)\n",
    "        test_loss = self.model(X, labels=X).loss\n",
    "\n",
    "        # initial validation\n",
    "        self.model.eval()\n",
    "        test_loss, correct = 0, 0\n",
    "        num_batches = len(self.dataloader_valid)\n",
    "        size = len(self.dataloader_valid.dataset)\n",
    "\n",
    "        with torch.no_grad():\n",
    "            for X, y in self.dataloader_valid:\n",
    "                X, y = X.to(self.device), y.to(self.device)\n",
    "                valid_loss = self.model(X, labels=X).loss\n",
    "                test_loss += valid_loss\n",
    "                break\n",
    "\n",
    "        test_loss /= num_batches\n",
    "\n",
    "        print(\"tests ok\")\n",
    "\n",
    "\n",
    "    def train(self):\n",
    "        with wandb.init(project=self.project_name, entity=\"dimweb\",\n",
    "                        settings=wandb.Settings(\n",
    "                            ),\n",
    "                        name=self.name_run,\n",
    "                        config=self.experiment_config,\n",
    "                        ) as run:\n",
    "\n",
    "            self.run = run\n",
    "\n",
    "            # start train\n",
    "            epochs = self.experiment_config['epochs']\n",
    "            for i in range(epochs):\n",
    "                print(f\"Epoch: {i}\")\n",
    "                self.train_steps()\n",
    "                self.valid_steps()\n",
    "            \n",
    "            # sync model\n",
    "            self.wandb_save_model()\n",
    "            \n",
    "            print(f\"train end\")\n",
    "    \n",
    "    def save_model_class(self):\n",
    "        # save class\n",
    "        model_class_name = self.experiment_config['model_class_name']\n",
    "        class_script_path_dest = f\"{os.path.join(wandb.run.dir, model_class_name)}.py\"\n",
    "        class_script_path_src = f\"./models/{model_class_name}.py\"\n",
    "        shutil.copy2(class_script_path_src, class_script_path_dest)\n",
    "        self.model_artifact.add_file(class_script_path_dest)\n",
    "        wandb.save(class_script_path_dest)\n",
    "\n",
    "    def wandb_save_model(self):\n",
    "\n",
    "        saved_path = str(wandb.run.dir).replace(\"/files\", \"_local\")\n",
    "        self.model.save_pretrained(saved_path)\n",
    "        self.tokenizer.save_pretrained(saved_path)\n",
    "\n",
    "        self.experiment_config['saved_path'] = saved_path \n",
    "\n",
    "        # save notebook\n",
    "        notebook_path = os.path.join(wandb.run.dir, self.notebook_name)\n",
    "        shutil.copy2(self.notebook_name, notebook_path)\n",
    "        self.model_artifact.add_file(notebook_path)\n",
    "        wandb.save(notebook_path)\n",
    "\n",
    "        wandb.log_artifact(self.model_artifact)\n",
    "    \n",
    "    def train_steps(self):\n",
    "        raise NotImplementedError(\"You need specify training steps\")\n",
    "\n",
    "    def valid_steps(self):\n",
    "        raise NotImplementedError(\"You need specify valid steps\")\n",
    "    \n",
    "    def load_model(self, artifact_name=\"\"):\n",
    "        with wandb.init(project=\"gpt_persona_bot\", job_type=\"inference\"):\n",
    "            model_artifact = wandb.use_artifact(artifact_name)\n",
    "            model_config = model_artifact.metadata\n",
    "            model_folder = model_config['saved_path'] \n",
    "            self.model = AutoModelForCausalLM.from_pretrained(model_folder)\n",
    "            self.tokenizer = AutoTokenizer.from_pretrained(model_folder)\n",
    "            self.model.to(self.device)\n",
    "            self.free_gpu_cache()\n",
    "    \n",
    "    def free_gpu_cache(self):\n",
    "        print(\"Initial GPU Usage\")\n",
    "        gpu_usage()                             \n",
    "\n",
    "        torch.cuda.empty_cache()\n",
    "\n",
    "        cuda.select_device(0)\n",
    "        cuda.close()\n",
    "        cuda.select_device(0)\n",
    "\n",
    "        print(\"GPU Usage after emptying the cache\")\n",
    "        gpu_usage()\n",
    "\n",
    "    def test(self, artifact_name=\"\"):\n",
    "        raise NotImplementedError(\"You need specify test steps\")\n",
    "\n",
    "\n",
    "class Experiment(BaseExperiment):\n",
    "    def __init__(self, **kwargs): \n",
    "        super(Experiment, self).__init__(**kwargs)\n",
    "    \n",
    "    def train_steps(self):\n",
    "        self.model.train()\n",
    "        interval = self.experiment_config['check_interval']\n",
    "        \n",
    "        for batch, (X, y) in enumerate(self.dataloader_train):\n",
    "            # Send data to training device\n",
    "            X, y = X.to(self.device), y.to(self.device)\n",
    "            \n",
    "            # Compute prediction error\n",
    "            loss = self.model(X, labels=X).loss\n",
    "            perplexity = torch.exp(torch.tensor(loss.item()))\n",
    "            # Backpropagation\n",
    "            self.optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            self.optimizer.step()\n",
    "            \n",
    "            if self.sheduler != None:\n",
    "                self.sheduler.step()\n",
    "            \n",
    "            # Progress output\n",
    "            if batch % interval == 0:\n",
    "                wandb.log({\"train_loss\": loss.item()})\n",
    "                wandb.log({\"train_perplexity\": perplexity})\n",
    "\n",
    "    def valid_steps(self):\n",
    "        self.model.eval()\n",
    "        test_loss, correct = 0, 0\n",
    "        num_batches = len(self.dataloader_valid)\n",
    "        size = len(self.dataloader_valid.dataset)\n",
    "\n",
    "        with torch.no_grad():\n",
    "            for X, y in self.dataloader_valid:\n",
    "                X, y = X.to(self.device), y.to(self.device)\n",
    "                loss = self.model(X, labels=y).loss\n",
    "                test_loss += loss.item()\n",
    "\n",
    "        test_loss /= num_batches\n",
    "        # correct /= size\n",
    "        perplexity = torch.exp(torch.tensor(test_loss))\n",
    "        \n",
    "        wandb.log({\"val_loss\": test_loss})\n",
    "        wandb.log({\"valid_perplexity\": perplexity})\n",
    "    \n",
    "    @staticmethod\n",
    "    def last_index(array, elem):\n",
    "        return len(array) - 1 - array[::-1].index(elem)\n",
    "\n",
    "    @staticmethod\n",
    "    def test(artifact_name=\"\", persona=\"\", user_inputs=None, interact=False, cuda=False):\n",
    "        with wandb.init(project=\"gpt_persona_bot\", job_type=\"inference\"):\n",
    "            model_folder = \"\"\n",
    "            if ':' in artifact_name:\n",
    "                model_artifact = wandb.use_artifact(artifact_name)\n",
    "                model_dir = model_artifact.download()\n",
    "                model_config = model_artifact.metadata\n",
    "                model_folder = model_config['saved_path'] \n",
    "            else:\n",
    "                model_folder = artifact_name\n",
    "            print(model_folder)\n",
    "            model = AutoModelForCausalLM.from_pretrained(model_folder)\n",
    "            model.eval()\n",
    "            tokenizer = AutoTokenizer.from_pretrained(model_folder)\n",
    "            device = torch.device(\"cuda\") if cuda else torch.device(\"cpu\")\n",
    "            model = model.to(device)\n",
    "            print(\"Start conversation\")\n",
    "            print(f\"Persona: {persona}\")\n",
    "            persona = f\"{SPECIAL_TOKENS['<persona>']}{persona}{SPECIAL_TOKENS['</persona>']}\"\n",
    "            # persona = f\"{SPECIAL_TOKENS['<persona>']}{persona}\"\n",
    "            persona_ids = tokenizer.encode(persona, return_tensors='pt')\n",
    "            persona_ids = persona_ids.to(device)\n",
    "            VOCAB_TOKENS = tokenizer.get_added_vocab()\n",
    "\n",
    "            last_index = 0\n",
    "            steps = len(user_inputs)\n",
    "            history = []\n",
    "            \n",
    "            if interact:\n",
    "                steps = 15\n",
    "            # global_step\n",
    "            for step in range(steps):\n",
    "                # print(\"-\"*40,step)\n",
    "                if interact:\n",
    "                    user_input = input()\n",
    "                else:\n",
    "                    user_input = user_inputs[step]\n",
    "                print(f\"User: {user_input}\")\n",
    "\n",
    "                user_input = f\"{SPECIAL_TOKENS['<sp_1>']}{user_input}{SPECIAL_TOKENS['</sp_1>']}{SPECIAL_TOKENS['<sp_2>']}\"\n",
    "                history.append(user_input)\n",
    "                new_user_input_ids = tokenizer.encode(user_input, return_tensors='pt')\n",
    "                new_user_input_ids = new_user_input_ids.to(device)\n",
    "                history_chat = \"\".join(history[-3:])\n",
    "                \n",
    "                history_ids = tokenizer.encode(history_chat, return_tensors='pt')\n",
    "                history_ids = history_ids.to(device)\n",
    "                bot_input_ids = torch.cat([persona_ids, history_ids], dim=-1)\n",
    "\n",
    "                # generated a response while limiting the total chat history to 1000 tokens, \n",
    "                model_response = model.generate(\n",
    "                    bot_input_ids, \n",
    "                    max_length=350,\n",
    "                    pad_token_id=tokenizer.eos_token_id,  \n",
    "                    do_sample=True, \n",
    "                    num_beams=2, \n",
    "                    temperature = 0.95,\n",
    "                    top_k=100, \n",
    "                    top_p=0.95,\n",
    "                )\n",
    "\n",
    "                model_response = model_response.to(device)\n",
    "                model_response_list = list(model_response[0])\n",
    "                \n",
    "                end_speaker_index = Experiment.last_index(model_response_list, VOCAB_TOKENS['</sp_2>'])\n",
    "                model_response = model_response[:, :end_speaker_index+1]\n",
    "\n",
    "                chat_history_ids = model_response\n",
    "                bot_response_decode = tokenizer.decode(chat_history_ids[0][len(bot_input_ids[0])-1:], skip_special_tokens=True) \n",
    "                last_history = history[-1]\n",
    "                last_history = f\"{last_history}{bot_response_decode}{SPECIAL_TOKENS['</sp_2>']}\"\n",
    "                \n",
    "                history[-1] = last_history\n",
    "                print(f\"Bot: {bot_response_decode}\")\n",
    "                # print(history)\n",
    "    @staticmethod\n",
    "    def test_with_ranking(\n",
    "            artifact_name=\"\", \n",
    "            persona=\"\", \n",
    "            user_inputs=None, \n",
    "            interact=False, \n",
    "            cuda=False,\n",
    "            sentence_ranker=None,\n",
    "            threshhold=0.35,\n",
    "            persona_amount_sentences=3,\n",
    "            beam=2\n",
    "        ):\n",
    "        \n",
    "        model_folder = artifact_name\n",
    "        print(model_folder)\n",
    "        model = AutoModelForCausalLM.from_pretrained(model_folder)\n",
    "        model.eval()\n",
    "        tokenizer = AutoTokenizer.from_pretrained(model_folder)\n",
    "        device = torch.device(\"cuda\") if cuda else torch.device(\"cpu\")\n",
    "        model = model.to(device)\n",
    "        print(\"Start conversation\")\n",
    "        VOCAB_TOKENS = tokenizer.get_added_vocab()\n",
    "\n",
    "        steps = len(user_inputs)\n",
    "        history = []\n",
    "        \n",
    "        if interact:\n",
    "            steps = 15\n",
    "        for step in range(steps):\n",
    "            if interact:\n",
    "                user_input = input()\n",
    "            else:\n",
    "                user_input = user_inputs[step]\n",
    "            # get more relevant persona pieces\n",
    "            persona, max_prob = sentence_ranker.rank_sentences([user_input], k=persona_amount_sentences)\n",
    "            persona = \" \".join(persona)\n",
    "\n",
    "            print(f\"Persona: {persona}\")\n",
    "            print(f\"Dreaming: {True if max_prob < threshhold else False} - {max_prob} \")\n",
    "            persona = f\"{SPECIAL_TOKENS['<persona>']}{persona}{SPECIAL_TOKENS['</persona>']}\"\n",
    "\n",
    "            persona_ids = tokenizer.encode(persona, return_tensors='pt')\n",
    "            persona_ids = persona_ids.to(device)\n",
    "            \n",
    "            print(f\"User: {user_input}\")\n",
    "\n",
    "            user_input = f\"{SPECIAL_TOKENS['<sp_1>']}{user_input}{SPECIAL_TOKENS['</sp_1>']}{SPECIAL_TOKENS['<sp_2>']}\"\n",
    "            history.append(user_input)\n",
    "\n",
    "            new_user_input_ids = tokenizer.encode(user_input, return_tensors='pt')\n",
    "            new_user_input_ids = new_user_input_ids.to(device)\n",
    "\n",
    "            history_chat = \"\".join(history[-3:])\n",
    "            print()\n",
    "            history_ids = tokenizer.encode(history_chat, return_tensors='pt')\n",
    "            history_ids = history_ids.to(device)\n",
    "\n",
    "            bot_input_ids = torch.cat([persona_ids, history_ids], dim=-1)\n",
    "            if max_prob > threshhold:\n",
    "                model_response = model.generate(\n",
    "                    bot_input_ids, \n",
    "                    max_length=250,\n",
    "                    pad_token_id=tokenizer.eos_token_id,  \n",
    "                    do_sample=True, \n",
    "                    num_beams=beam, \n",
    "                    temperature = 0.95,\n",
    "                    top_k=100, \n",
    "                    top_p=0.95,\n",
    "                )\n",
    "            else:\n",
    "                model_response = model.generate(\n",
    "                    bot_input_ids, \n",
    "                    max_length=250,\n",
    "                    pad_token_id=tokenizer.eos_token_id,  \n",
    "                    do_sample=True, \n",
    "                    # num_beams=3, \n",
    "                    temperature = 0.95,\n",
    "                    top_k=100, \n",
    "                    top_p=0.95,\n",
    "                )\n",
    "\n",
    "            model_response = model_response.to(device)\n",
    "            model_response_list = list(model_response[0])\n",
    "            end_speaker_index = len(bot_input_ids[0]) + model_response_list[len(bot_input_ids[0])+1:].index(VOCAB_TOKENS['</sp_2>'])\n",
    "\n",
    "            bot_response_decode = tokenizer.decode(model_response[0][len(bot_input_ids[0])-1:end_speaker_index], skip_special_tokens=True) \n",
    "            last_history = history[-1]\n",
    "            last_history = f\"{last_history}{bot_response_decode}{SPECIAL_TOKENS['</sp_2>']}\"\n",
    "            \n",
    "            history[-1] = last_history\n",
    "            print(f\"Bot: {bot_response_decode}\")\n",
    "            print()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Создаем датасет"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "persona_chat_original = pd.read_csv(\"./persona_chat.csv\")\n",
    "# persona_chat_original = persona_chat_original\n",
    "# persona_chat_original = persona_chat_original[:3000]\n",
    "# persona_chat_original.head(3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PersonaChatGenerator:\n",
    "\tdef __init__(self, \n",
    "\t\tinitial_dataset=None,\n",
    "\t\ttokenizer=None\n",
    "\t):\n",
    "\t\tself.initial_dataset = initial_dataset\n",
    "\t\tself.processed_dataset = []\n",
    "\t\tself.tokenizer = tokenizer\n",
    "\t\tself.process_dataset()\n",
    "\n",
    "\tdef process_dataset(self):\n",
    "\t\tprocessed_dataset = {\n",
    "\t\t\t\"persona\": [],\n",
    "\t\t\t\"history\": [],\n",
    "\t\t\t# \"target\": []\n",
    "\t\t}\n",
    "\n",
    "\t\tsp_1_start = SPECIAL_TOKENS['<sp_1>']\n",
    "\t\tsp_1_end = SPECIAL_TOKENS['</sp_1>']\n",
    "\t\tsp_2_start = SPECIAL_TOKENS['<sp_2>']\n",
    "\t\tsp_2_end = SPECIAL_TOKENS['</sp_2>']\n",
    "\t\tpersona_start = SPECIAL_TOKENS['<persona>']\n",
    "\t\tpersona_end = SPECIAL_TOKENS['</persona>']\n",
    "\t\trelu = lambda x: x if x > 0 else 0 \n",
    "\t\tfor i in range(len(self.initial_dataset)):\n",
    "\t\t\tpersona = self.initial_dataset['Persona'].iloc[i]\n",
    "\t\t\tchat = self.initial_dataset['chat'].iloc[i].split(\"\\n\")\n",
    "\t\t\tchat = chat[:-1]\n",
    "\t\t\thistory = []\n",
    "\t\t\tfor j in range(len(chat)):\n",
    "\t\t\t\treply = chat[j]\n",
    "\t\t\t\tif (j+1) % 2 == 0:\n",
    "\t\t\t\t\treply = f\"{sp_2_start}{reply}{sp_2_end}\"\n",
    "\t\t\t\t\thistory.append(reply)\n",
    "\t\t\t\t\ttemp_history = history[relu(j-4):j+1]\n",
    "\t\t\t\t\ttemp_history = \"\".join(temp_history)\n",
    "\t\t\t\t\tprocessed_dataset['persona'].append(persona)\n",
    "\t\t\t\t\tprocessed_dataset['history'].append(temp_history)\n",
    "\n",
    "\t\t\t\telse:\n",
    "\t\t\t\t\treply = f\"{sp_1_start}{reply}{sp_1_end}\"\n",
    "\t\t\t\t\thistory.append(reply)\n",
    "\n",
    "\t\tdataset = pd.DataFrame(data=processed_dataset)\n",
    "\t\treturn dataset\n",
    "\n",
    "train_dataset_csv, valid_dataset_csv = train_test_split(persona_chat_original, test_size=0.01)\n",
    "train_dataset_csv, valid_dataset_csv = train_dataset_csv.reset_index(), valid_dataset_csv.reset_index()\n",
    "train_dataset_generator = PersonaChatGenerator(\n",
    "\tinitial_dataset=train_dataset_csv,\n",
    "\ttokenizer=tokenizer\n",
    ")\n",
    "\n",
    "valid_dataset_generator = PersonaChatGenerator(\n",
    "\tinitial_dataset=valid_dataset_csv,\n",
    "\ttokenizer=tokenizer\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PersonaChatDataset(Dataset):\n",
    "\tdef __init__(self, \n",
    "\t\tinitial_dataset=None,\n",
    "\t\ttokenizer=None,\n",
    "\t\tis_validation=False\n",
    "\t):\n",
    "\t\tself.initial_dataset = initial_dataset\n",
    "\t\tself.tokenizer = tokenizer\n",
    "\t\tself.is_validation = is_validation\n",
    "\t\n",
    "\tdef __len__(self):\n",
    "\t\treturn len(self.initial_dataset)\n",
    "\t\n",
    "\tdef __getitem__(self, idx):\n",
    "\t\trow = self.initial_dataset.iloc[idx]\n",
    "\t\tpersona = [item.strip() for item in row['persona'].split(\".\") if len(item) > 0 ]\n",
    "\t\tif not self.is_validation:\n",
    "\t\t\trandom.shuffle(persona)\n",
    "\t\tpersona = [item+\". \" for item in persona]\n",
    "\t\tpersona[-1] = persona[-1][:-1]\n",
    "\t\tpersona = [SPECIAL_TOKENS['<persona>']] + persona + [SPECIAL_TOKENS['</persona>']]\n",
    "\t\t\n",
    "\t\tpersona = [torch.tensor(self.tokenizer.encode(item)).flatten() for item in persona]\n",
    "\t\tpersona = torch.cat([*persona])\n",
    "\n",
    "\t\thistory = row['history']\n",
    "\t\t\n",
    "\t\thistory = self.tokenizer.encode(history)\n",
    "\t\thistory = torch.tensor(history).flatten()\n",
    "\t\n",
    "\t\tfeature = torch.cat([persona, history])\n",
    "\n",
    "\t\treturn {\n",
    "\t\t\t\"feature\": feature,\n",
    "\t\t\t\"target\": feature \n",
    "\t\t}\n",
    "\n",
    "train_dataset = PersonaChatDataset(\n",
    "\tinitial_dataset=train_dataset_generator.process_dataset(),\n",
    "\ttokenizer=tokenizer\n",
    ")\n",
    "\n",
    "valid_dataset = PersonaChatDataset(\n",
    "\tinitial_dataset=valid_dataset_generator.process_dataset(),\n",
    "\ttokenizer=tokenizer,\n",
    "\tis_validation=True\n",
    ")\n",
    "\n",
    "def collate(examples):\n",
    "\tfeatures = [item['feature'] for item in examples]\n",
    "\tfeatures = pad_sequence(features, batch_first=True)\n",
    "\t\n",
    "\ttarget = [item['target'] for item in examples]\n",
    "\ttarget = pad_sequence(features, batch_first=True)\n",
    "\treturn features.to(torch.long), target.to(torch.long) \n",
    "\n",
    "train_dataloader = DataLoader(\n",
    "    train_dataset, \n",
    "\tbatch_size=4, \n",
    "\tcollate_fn=collate, \n",
    "\tdrop_last = True,\n",
    "\tshuffle=True\n",
    ")\n",
    "\n",
    "valid_dataloader = DataLoader(\n",
    "    valid_dataset, \n",
    "\tbatch_size=8, \n",
    "\tcollate_fn=collate, \n",
    "\tdrop_last=False,\n",
    "\tshuffle=False\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'<persona>i am bald with a thick beard. i enjoy comedies. i exercise often and have nice muscles. i work as an attorney. i wear nice clothes.</persona><sp_2>do you have a lot of them?</sp_2><sp_1>i do actually! i have 7. i am only 19 years old.</sp_1><sp_2>that is a lot. i do not have any.</sp_2><sp_1>do you like junk food? i like pizza puffs.</sp_1><sp_2>i do not. i am on a diet, gotta maintain my muscles.</sp_2>'"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer.decode(train_dataset[4]['feature'])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## тренируем модель"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using device cuda\n",
      "tests ok\n"
     ]
    }
   ],
   "source": [
    "exp_config = {\n",
    "    \"batch_size\": 4,\n",
    "    \"check_interval\": 100,\n",
    "    \"epochs\": 1,\n",
    "    \"optimizer\": {\n",
    "        \"lr\": 5e-5\n",
    "    },\n",
    "    \"model_name\": \"pytorch_model\",\n",
    "    \"saved_path\": \"\",\n",
    "    \"do_weight_decay\": False,\n",
    "    \"weight_decay\": 0.0,\n",
    "    \"freeze_layers\": 2\n",
    "}\n",
    "\n",
    "exp_config[\"sheduler\"] = {\n",
    "    # \"max_lr\": 0.01, \n",
    "    # \"steps_per_epoch\": len(train_dataloader), \n",
    "    # \"epochs\": exp_config[\"epochs\"]\n",
    "    # \"step_size\": 25\n",
    "    \"num_warmup_steps\": 1000,\n",
    "    \"num_training_steps\": len(train_dataloader)\n",
    "\n",
    "}\n",
    "model =  AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\", config=config, ignore_mismatched_sizes=True)\n",
    "# model =  AutoModelForCausalLM.from_pretrained(\"/home/dimweb/sandbox/persona_bot/gpt_persona_v1/wandb/run-20220708_230130-aurwatvq_local\")\n",
    "model.resize_token_embeddings(len(tokenizer))\n",
    "# model = GPT2LMModel(**exp_config['model_args'])\n",
    "\n",
    "# не хочу создавать глобальные переменные \n",
    "exp_params = {\n",
    "    \"model\": model, \n",
    "    \"tokenizer\": tokenizer,\n",
    "    \"dataloader_train\": train_dataloader,\n",
    "    \"dataloader_valid\": valid_dataloader,\n",
    "    \"dataloader_test\": valid_dataloader,\n",
    "    \"loss_func_class\": nn.CrossEntropyLoss,\n",
    "    \"estimate_func_class\": nn.CrossEntropyLoss,\n",
    "    \"experiment_config\": exp_config,\n",
    "    \"optimizer_class\": torch.optim.Adam,\n",
    "    \"sheduler_class\": None,\n",
    "    \"notebook_name\": \"gpt_persona_v1.ipynb\",\n",
    "    \"project_name\": \"gpt_persona_bot\",\n",
    "    \"name_run\": \"persona_gpt\",\n",
    "    \"model_description\": \"Уменьшил до 2 блоков\",\n",
    "    \"do_unit_tests\": True,\n",
    "}\n",
    "\n",
    "experiment_test = Experiment(**exp_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "16264"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(train_dataloader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mdimweb\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "Tracking run with wandb version 0.12.21"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Run data is saved locally in <code>/data/home/dimweb/sandbox/persona_bot/gpt_persona_v1/wandb/run-20220716_201842-1ezhlysz</code>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Syncing run <strong><a href=\"https://wandb.ai/dimweb/gpt_persona_bot/runs/1ezhlysz\" target=\"_blank\">persona_gpt</a></strong> to <a href=\"https://wandb.ai/dimweb/gpt_persona_bot\" target=\"_blank\">Weights & Biases</a> (<a href=\"https://wandb.me/run\" target=\"_blank\">docs</a>)<br/>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m Saving files without folders. If you want to preserve sub directories pass base_path to wandb.save, i.e. wandb.save(\"/mnt/folder/file.h5\", base_path=\"/mnt\")\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train end\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "Waiting for W&B process to finish... <strong style=\"color:green\">(success).</strong>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "8e222f3ca1724593be0f3c87416b5f8d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "VBox(children=(Label(value='0.048 MB of 0.048 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<style>\n",
       "    table.wandb td:nth-child(1) { padding: 0 10px; text-align: left ; width: auto;} td:nth-child(2) {text-align: left ; width: 100%}\n",
       "    .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; justify-content: flex-start; width: 100% }\n",
       "    .wandb-col { display: flex; flex-direction: column; flex-basis: 100%; flex: 1; padding: 10px; }\n",
       "    </style>\n",
       "<div class=\"wandb-row\"><div class=\"wandb-col\"><h3>Run history:</h3><br/><table class=\"wandb\"><tr><td>train_loss</td><td>█▅▇▅▅▅▆▆▃▄▅▅▆▆▆▅▅▄▅▄▆▅▆▄▃▄▄▄▂▃▄▅▅▄▁▅▄▂▄▃</td></tr><tr><td>train_perplexity</td><td>█▄▆▃▄▄▅▅▂▃▄▄▄▅▅▄▄▃▄▃▅▃▄▃▂▃▂▃▂▂▃▄▃▃▁▄▃▁▃▂</td></tr><tr><td>val_loss</td><td>▁</td></tr><tr><td>valid_perplexity</td><td>▁</td></tr></table><br/></div><div class=\"wandb-col\"><h3>Run summary:</h3><br/><table class=\"wandb\"><tr><td>train_loss</td><td>1.53061</td></tr><tr><td>train_perplexity</td><td>4.621</td></tr><tr><td>val_loss</td><td>1.38318</td></tr><tr><td>valid_perplexity</td><td>3.98755</td></tr></table><br/></div></div>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Synced <strong style=\"color:#cdcd00\">persona_gpt</strong>: <a href=\"https://wandb.ai/dimweb/gpt_persona_bot/runs/1ezhlysz\" target=\"_blank\">https://wandb.ai/dimweb/gpt_persona_bot/runs/1ezhlysz</a><br/>Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 1 other file(s)"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Find logs at: <code>./wandb/run-20220716_201842-1ezhlysz/logs</code>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "experiment_test.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "what are you up to this evening ?\n",
      "air in my area is fresh that is why i play sports\n",
      "did you just come in from playing sports ?\n",
      "i am working out now , and drinking water\n",
      "i just got done sewing a new shirt\n",
      "no , i am working out now , and watching tv\n",
      "i have no time for tv being an art teacher\n",
      "that is nice of you , i am getting a degree at school now\n",
      "oh really what are you getting your degree in\n",
      "it is good to be busy sometimes , keeps you focus\n",
      "it really does which is why i love to sew my own clothing\n",
      "i am working on psychology because i had 2 car crashes in past\n",
      "oh wow do you like to visit thrift shops ?\n",
      "it affects my driving now , because of fear .\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(valid_dataset_csv.iloc[0]['chat'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/data/home/dimweb/sandbox/persona_bot/gpt_persona_v1/wandb/run-20220710_230742-8ryub57u_local\n",
      "Start conversation\n",
      "Persona: I would love to have a dog in future. I love dogs so much. Dogs are the best friends. I do not have a dog yet.\n",
      "Dreaming: True - 0.4889436960220337 \n",
      "User: What do you think about dogs?\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "50260 is not in list",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[1;32m/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb Cell 16\u001b[0m in \u001b[0;36m<cell line: 62>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=50'>51</a>\u001b[0m user_inputs \u001b[39m=\u001b[39m [\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=51'>52</a>\u001b[0m \t\u001b[39m\"\u001b[39m\u001b[39mWhat do you think about dogs?\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=52'>53</a>\u001b[0m     \u001b[39m\"\u001b[39m\u001b[39mHi. What is your name?\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=56'>57</a>\u001b[0m \t\u001b[39m\"\u001b[39m\u001b[39mWhere is your dad?\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=57'>58</a>\u001b[0m ]\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=59'>60</a>\u001b[0m \u001b[39m# user_inputs = [item for i, item in enumerate(valid_dataset_csv.iloc[number_example]['chat'].split(\"\\n\")) if len(item) and (i+1) % 2 != 0]\u001b[39;00m\n\u001b[0;32m---> <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=61'>62</a>\u001b[0m Experiment\u001b[39m.\u001b[39;49mtest_with_ranking(\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=62'>63</a>\u001b[0m \t\u001b[39m# artifact_name=\"/data/home/dimweb/sandbox/persona_bot/gpt_persona_v1/wandb/run-20220716_201842-1ezhlysz_local\",\u001b[39;49;00m\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=63'>64</a>\u001b[0m \tartifact_name\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39m/data/home/dimweb/sandbox/persona_bot/gpt_persona_v1/wandb/run-20220710_230742-8ryub57u_local\u001b[39;49m\u001b[39m\"\u001b[39;49m,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=64'>65</a>\u001b[0m \t\u001b[39m# artifact_name=\"/data/home/dimweb/sandbox/persona_bot/gpt_persona_v1/wandb/run-20220711_171905-u49i12fh_local\",\u001b[39;49;00m\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=65'>66</a>\u001b[0m \tpersona\u001b[39m=\u001b[39;49mpersona,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=66'>67</a>\u001b[0m \tuser_inputs\u001b[39m=\u001b[39;49muser_inputs,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=67'>68</a>\u001b[0m \tinteract\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=68'>69</a>\u001b[0m \tcuda\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=69'>70</a>\u001b[0m \tsentence_ranker\u001b[39m=\u001b[39;49msentence_ranker,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=70'>71</a>\u001b[0m \tthreshhold\u001b[39m=\u001b[39;49m\u001b[39m0.5\u001b[39;49m,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=71'>72</a>\u001b[0m \tpersona_amount_sentences\u001b[39m=\u001b[39;49m\u001b[39m3\u001b[39;49m,\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=72'>73</a>\u001b[0m \tbeam\u001b[39m=\u001b[39;49m\u001b[39m1\u001b[39;49m\n\u001b[1;32m     <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=73'>74</a>\u001b[0m )\n",
      "\u001b[1;32m/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb Cell 16\u001b[0m in \u001b[0;36mExperiment.test_with_ranking\u001b[0;34m(artifact_name, persona, user_inputs, interact, cuda, sentence_ranker, threshhold, persona_amount_sentences, beam)\u001b[0m\n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=499'>500</a>\u001b[0m model_response_list \u001b[39m=\u001b[39m \u001b[39mlist\u001b[39m(model_response[\u001b[39m0\u001b[39m])\n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=500'>501</a>\u001b[0m \u001b[39m# print(tokenizer.decode(model_response[0], skip_special_tokens=False))\u001b[39;00m\n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=501'>502</a>\u001b[0m \u001b[39m# end_speaker_index = Experiment.last_index(model_response_list, VOCAB_TOKENS['</sp_2>'])\u001b[39;00m\n\u001b[0;32m--> <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=502'>503</a>\u001b[0m end_speaker_index \u001b[39m=\u001b[39m \u001b[39mlen\u001b[39m(bot_input_ids[\u001b[39m0\u001b[39m]) \u001b[39m+\u001b[39m model_response_list[\u001b[39mlen\u001b[39;49m(bot_input_ids[\u001b[39m0\u001b[39;49m])\u001b[39m+\u001b[39;49m\u001b[39m1\u001b[39;49m:]\u001b[39m.\u001b[39;49mindex(VOCAB_TOKENS[\u001b[39m'\u001b[39;49m\u001b[39m</sp_2>\u001b[39;49m\u001b[39m'\u001b[39;49m])\n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=503'>504</a>\u001b[0m \u001b[39m# print(end_speaker_index, len(bot_input_ids), len(model_response_list))\u001b[39;00m\n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=504'>505</a>\u001b[0m \n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=505'>506</a>\u001b[0m \u001b[39m# model_response = model_response[:, :end_speaker_index+1]\u001b[39;00m\n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=506'>507</a>\u001b[0m \n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=507'>508</a>\u001b[0m \u001b[39m# chat_history_ids = model_response\u001b[39;00m\n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=508'>509</a>\u001b[0m \u001b[39m# bot_response_decode = tokenizer.decode(chat_history_ids[0][len(bot_input_ids[0])-1:], skip_special_tokens=True) \u001b[39;00m\n\u001b[1;32m    <a href='vscode-notebook-cell://ssh-remote%2Blnsigo.mipt.ru/home/dimweb/sandbox/persona_bot/gpt_persona_v1/gpt_persona_v1_experiments.ipynb#ch0000015vscode-remote?line=509'>510</a>\u001b[0m bot_response_decode \u001b[39m=\u001b[39m tokenizer\u001b[39m.\u001b[39mdecode(model_response[\u001b[39m0\u001b[39m][\u001b[39mlen\u001b[39m(bot_input_ids[\u001b[39m0\u001b[39m])\u001b[39m-\u001b[39m\u001b[39m1\u001b[39m:end_speaker_index], skip_special_tokens\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m) \n",
      "\u001b[0;31mValueError\u001b[0m: 50260 is not in list"
     ]
    }
   ],
   "source": [
    "from sentence_transformers import SentenceTransformer\n",
    "sentence_model = SentenceTransformer('nli-distilroberta-base-v2')\n",
    "\n",
    "persona = open(\"./persona_sentences_2.txt\").read()\n",
    "\n",
    "class SentenceRanker:\n",
    "\tdef __init__(self, persona_sentences=None, sentence_model=None):\n",
    "\t\tself.persona_sentences = persona_sentences\n",
    "\t\tself.sentence_model = sentence_model\n",
    "\t\tself.sentence_embeddings = self.sentence_model.encode(\n",
    "\t\t\tpersona_sentences, \n",
    "\t\t\tconvert_to_tensor=True\n",
    "\t\t)\n",
    "\t\tself.ranked_sentences = {}\n",
    "\t\n",
    "\tdef rank_sentences(self, query, k=5):\n",
    "\t\tkey = f\"{query}_{k}\"\n",
    "\t\tif self.ranked_sentences.get(key, False):\n",
    "\t\t\treturn self.ranked_sentences[key]\n",
    "\t\tuser_sentence_embeddings = sentence_model.encode(query, convert_to_tensor=True)\n",
    "\n",
    "\t\tcos_sim_ranks = self.cos_sim(\n",
    "\t\t\tuser_sentence_embeddings,\n",
    "\t\t\tself.sentence_embeddings\n",
    "\t\t)\n",
    "\t\t\n",
    "\t\ttop_indices = torch.argsort(cos_sim_ranks, descending=True)\n",
    "\t\tmax_prob = cos_sim_ranks[top_indices][0]\n",
    "\t\ttop_indices = list(top_indices[:k].cpu().numpy())\n",
    "\t\tsimilar_sentences = [self.persona_sentences[idx] for idx in top_indices]\n",
    "\t\tself.ranked_sentences[key] = similar_sentences \n",
    "\t\treturn similar_sentences, max_prob\n",
    "\t\n",
    "\tdef cos_sim(self, a, b):\n",
    "\t\ta_norm = torch.nn.functional.normalize(a, p=2, dim=1)\n",
    "\t\tb_norm = torch.nn.functional.normalize(b, p=2, dim=1)\n",
    "\t\treturn torch.sum(a_norm * b_norm, dim=1)\n",
    "\n",
    "\n",
    "persona_sentences = persona.split(\"\\n\")\n",
    "persona_sentences = [item.strip() for item in persona_sentences if len(item) > 0]\n",
    "sentence_ranker = SentenceRanker(\n",
    "\tpersona_sentences=persona_sentences,\n",
    "\tsentence_model=sentence_model\n",
    ")\n",
    "\n",
    "\n",
    "number_example = 16\n",
    "persona = valid_dataset_csv['Persona'][number_example]\n",
    "\n",
    "user_inputs = [\n",
    "\t\"What do you think about dogs?\",\n",
    "    \"Hi. What is your name?\",\n",
    "    \"What do you like?\",\n",
    "    \"What is your job?\",\n",
    "\t\"Tell me about yourself please.\",\n",
    "\t\"Where is your dad?\",\n",
    "]\n",
    "\n",
    "# user_inputs = [item for i, item in enumerate(valid_dataset_csv.iloc[number_example]['chat'].split(\"\\n\")) if len(item) and (i+1) % 2 != 0]\n",
    "\n",
    "Experiment.test_with_ranking(\n",
    "\t# artifact_name=\"/data/home/dimweb/sandbox/persona_bot/gpt_persona_v1/wandb/run-20220716_201842-1ezhlysz_local\",\n",
    "\tartifact_name=\"/data/home/dimweb/sandbox/persona_bot/gpt_persona_v1/wandb/run-20220710_230742-8ryub57u_local\",\n",
    "\t# artifact_name=\"/data/home/dimweb/sandbox/persona_bot/gpt_persona_v1/wandb/run-20220711_171905-u49i12fh_local\",\n",
    "\tpersona=persona,\n",
    "\tuser_inputs=user_inputs,\n",
    "\tinteract=False,\n",
    "\tcuda=True,\n",
    "\tsentence_ranker=sentence_ranker,\n",
    "\tthreshhold=0.5,\n",
    "\tpersona_amount_sentences=3,\n",
    "\tbeam=2\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Ranking paragraphs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(['If I could have any superpower, it would be to speak any language so I could connect with anyone in the world.',\n",
       "  'One of my hidden talents is negotiating with others, which I believe is what makes me a strong sales representative.',\n",
       "  'My favourite subject in school was English, which is why I decided to become a writer.',\n",
       "  \"When I was younger, I wanted to be a pilot, which led me to later earn my pilot's license.\",\n",
       "  'My favourite part about my job is pitching unique advertising campaign ideas that help small businesses stand out from competitors.'],\n",
       " tensor([ 0.3229,  0.2916,  0.2554,  0.2393,  0.2238,  0.2046,  0.1786,  0.1779,\n",
       "          0.1743,  0.1632,  0.1603,  0.1571,  0.1568,  0.1431,  0.1418,  0.1247,\n",
       "          0.1121,  0.1099,  0.0970,  0.0930,  0.0860,  0.0735,  0.0626,  0.0546,\n",
       "          0.0409,  0.0323,  0.0272, -0.0027, -0.0079, -0.0209], device='cuda:0'))"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sentence_transformers import SentenceTransformer\n",
    "sentence_model = SentenceTransformer('nli-distilroberta-base-v2')\n",
    "\n",
    "persona = open(\"./persona_sentences.txt\").read()\n",
    "\n",
    "class SentenceRanker:\n",
    "\tdef __init__(self, persona_sentences=None, sentence_model=None):\n",
    "\t\tself.persona_sentences = persona_sentences\n",
    "\t\tself.sentence_model = sentence_model\n",
    "\t\tself.sentence_embeddings = self.sentence_model.encode(\n",
    "\t\t\tpersona_sentences, \n",
    "\t\t\tconvert_to_tensor=True\n",
    "\t\t)\n",
    "\t\tself.ranked_sentences = {}\n",
    "\t\n",
    "\tdef rank_sentences(self, query, k=5):\n",
    "\t\tkey = f\"{query}_{k}\"\n",
    "\t\tif self.ranked_sentences.get(key, False):\n",
    "\t\t\treturn self.ranked_sentences[key]\n",
    "\t\tuser_sentence_embeddings = sentence_model.encode(query, convert_to_tensor=True)\n",
    "\n",
    "\t\tcos_sim_ranks = self.cos_sim(\n",
    "\t\t\tuser_sentence_embeddings,\n",
    "\t\t\tself.sentence_embeddings\n",
    "\t\t)\n",
    "\t\t\n",
    "\t\ttop_indices = torch.argsort(cos_sim_ranks, descending=True)\n",
    "\t\tmax_prob = cos_sim_ranks[top_indices]\n",
    "\t\ttop_indices = list(top_indices[:k].cpu().numpy())\n",
    "\t\tsimilar_sentences = [self.persona_sentences[idx] for idx in top_indices]\n",
    "\t\tself.ranked_sentences[key] = similar_sentences \n",
    "\t\treturn similar_sentences, max_prob\n",
    "\t\n",
    "\tdef cos_sim(self, a, b):\n",
    "\t\tprint(a.shape, b.shape)\n",
    "\t\ta_norm = torch.nn.functional.normalize(a, p=2, dim=1)\n",
    "\t\tb_norm = torch.nn.functional.normalize(b, p=2, dim=1)\n",
    "\t\treturn torch.sum(a_norm * b_norm, dim=1)\n",
    "\n",
    "\n",
    "persona_sentences = persona.split(\"\\n\")\n",
    "persona_sentences = [item for item in persona_sentences if len(item) > 0]\n",
    "sentence_ranker = SentenceRanker(\n",
    "\tpersona_sentences=persona_sentences,\n",
    "\tsentence_model=sentence_model\n",
    ")\n",
    "\n",
    "user_sentence = [\n",
    "\t\"Tell me about yourself please.\"\n",
    "]\n",
    "\n",
    "sentence_ranker.rank_sentences(\n",
    "\tuser_sentence, 5\n",
    ")\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
