{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "4eacc22c",
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import Dict, List, Union\n",
    "\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.nn import CrossEntropyLoss,BCELoss\n",
    "from sentence_transformers import SentenceTransformer\n",
    "\n",
    "from torch.utils.data import  DataLoader,Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "382bc045",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = '3'\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ffefba71",
   "metadata": {},
   "outputs": [],
   "source": [
    "class MLP(torch.nn.Module):\n",
    "    \"\"\"\n",
    "    A simple feedforward layer or a stack of multiple ones\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, feature_model_out, feature_dense_in, label_num=1, dropout: float = 0.1):\n",
    "        super(MLP, self).__init__()\n",
    "        self.activation_relu = nn.ReLU()\n",
    "        self.activation_sigmoid = nn.Sigmoid()\n",
    "        self.dense_one =  nn.Linear(in_features=feature_model_out, out_features=feature_dense_in)\n",
    "        self.dense_two =  nn.Linear(in_features=feature_dense_in, out_features=label_num)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.activation_relu(self.dense_one(self.dropout(x)))\n",
    "        x = self.activation_sigmoid(self.dense_two(self.dropout(x)))   #使用sigmoid的做法\n",
    "#         x = self.dense_two(self.dropout(x)) #使用多分类的做法\n",
    "        return x\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "5c77f040",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SbertClassifier(nn.Module):\n",
    "    \"\"\"\n",
    "    \"\"\"\n",
    "    def __init__(self, pretrained_model_name, hidden_num, label_num) -> None:\n",
    "        super().__init__()\n",
    "        self.encoder = SentenceTransformer(pretrained_model_name)\n",
    "        st_dim = self.encoder.get_sentence_embedding_dimension()\n",
    "        self.clf = MLP(st_dim*2,hidden_num,label_num)\n",
    "        \n",
    "    #保证两个输入句子\n",
    "    def forward(self, text_a, text_b) -> torch.Tensor:\n",
    "        with torch.no_grad():\n",
    "            embedded_a = self.encoder.encode(text_a, convert_to_numpy=False, convert_to_tensor=True)\n",
    "            embedded_a.to(device)\n",
    "            embedded_b = self.encoder.encode(text_b, convert_to_numpy=False, convert_to_tensor=True)\n",
    "            embedded_b.to(device)\n",
    "            embedded = torch.cat((embedded_a,embedded_b),dim=-1)\n",
    "        with torch.enable_grad():\n",
    "            return self.clf(embedded).squeeze()\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "e51613d4",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SbertClassificationModel(nn.Module):\n",
    "    #初始化模型\n",
    "    def __init__(self, sbert_classifier: SbertClassifier,lr: float = 0.001, weight_decay: float = 0.01):\n",
    "        super().__init__()\n",
    "        self.sbert_classifier = sbert_classifier\n",
    "        self.lr = lr\n",
    "        self.weight_decay = weight_decay\n",
    "#         self.loss =  CrossEntropyLoss()\n",
    "        self.loss = BCELoss()\n",
    "        self.trainable_params = list(self.sbert_classifier.clf.parameters())\n",
    "      \n",
    "        self.max_len = 512\n",
    "    \n",
    "    #预测标签概率\n",
    "#     def predict_label_probabilities(self, **kwargs):\n",
    "#         return F.softmax(self.get_logits(**kwargs), dim=-1).detach().cpu().numpy()\n",
    "    #预测0-1的概率\n",
    "    def get_logits(self, input_a, input_b):\n",
    "        input_sentence_a, input_sentence_b = input_a, input_b\n",
    "        return self.sbert_classifier.forward(input_sentence_a, input_sentence_b)\n",
    "\n",
    "    def get_loss(self, input_a, input_b, label):\n",
    "        logits = self.get_logits(input_a, input_b)\n",
    "        loss = self.loss(logits, label)\n",
    "        return  loss\n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "8c9011f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyDataset(Dataset):\n",
    "    \"\"\"\n",
    "        下载数据、初始化数据，都可以在这里完成\n",
    "    \"\"\"\n",
    "    def __init__(self,input_a, input_b, label):\n",
    "    #         xy = np.loadtxt('../dataSet/diabetes.csv.gz', delimiter=',', dtype=np.float32) # 使用numpy读取数据\n",
    "        self.input_a = input_a\n",
    "        self.input_b = input_b\n",
    "#         self.label = torch.IntTensor(label)\n",
    "        self.label = torch.FloatTensor(label)\n",
    "        \n",
    "        self.len =  len(self.input_a)\n",
    "    \n",
    "    def __getitem__(self, index):\n",
    "        return  self.input_a[index], self.input_b[index],  self.label[index]\n",
    "\n",
    "    def __len__(self):\n",
    "        return self.len\n",
    "    \n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "836962ea",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['石家庄售后点在哪里', '电脑冲不了电', '扬天M7200c00如何切换F1F12', '笔记本更换壳子', '怎么进安全系统']\n",
      "['要申请售后吗这个问题', '不行，冲不上电', '恢复F1F12标准功能', '更改管理员', '安全模式怎么进？']\n",
      "[0.0, 1.0, 1.0, 0.0, 1.0]\n"
     ]
    }
   ],
   "source": [
    "# my_data = MyDataset([\"我想车市一下\",\"ok,就好了\"],[\"我想车市一下\",\"ok,就好了\"], [0.0,1.0])\n",
    "# train_loader2 = DataLoader(dataset=my_data, batch_size=2, shuffle=True)\n",
    "\n",
    "#加载数据集\n",
    "def loadFile(filep):\n",
    "    input_sentence_a, input_sentence_b, labels = [], [], [] \n",
    "    with open(filep,'r', encoding=\"utf-8\") as fr:\n",
    "        for line in fr:\n",
    "            con = line.strip().split('\\t')\n",
    "            if len(con)==3:\n",
    "                input_sentence_a.append(con[0])\n",
    "                input_sentence_b.append(con[1])\n",
    "                labels.append(float(con[2]))\n",
    "#                 labels.append(int(con[2]))\n",
    "    return input_sentence_a, input_sentence_b, labels\n",
    "train_sentence_a, train_sentence_b, train_label = loadFile(\"./0208/test_50/train.txt\")\n",
    "print(train_sentence_a[0:5])\n",
    "print(train_sentence_b[0:5])\n",
    "print(train_label[0:5])\n",
    "train_data = MyDataset(train_sentence_a, train_sentence_b, train_label)\n",
    "dev_sentence_a, dev_sentence_b, dev_label  = loadFile(\"./0208/test_50/dev.txt\")\n",
    "dev_data = MyDataset(dev_sentence_a, dev_sentence_b, dev_label)\n",
    "train_loader = DataLoader(dataset=train_data, batch_size=32, shuffle=True)\n",
    "dev_loader = DataLoader(dataset=dev_data, batch_size=32, shuffle=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bdf3b22f",
   "metadata": {},
   "outputs": [],
   "source": [
    "ST_model =  SbertClassifier(\"./distiluse-base-multilingual-cased-v2/\", 10, 1)\n",
    "stTrainModel = SbertClassificationModel(ST_model)\n",
    "stTrainModel.to(device)\n",
    "_opt = torch.optim.AdamW(params=stTrainModel.trainable_params, lr=0.001, weight_decay=0.01)\n",
    "# loss_function =  CrossEntropyLoss()\n",
    "loss_function =  BCELoss()\n",
    "BCELoss\n",
    "best_acc = 0\n",
    "\n",
    "for epoch in range(20):\n",
    "    stTrainModel.train()\n",
    "    for i, data in enumerate(train_loader):    \n",
    "        _opt.zero_grad()\n",
    "        input_a, input_b ,label = data\n",
    "#         label =label.to(torch.int64).to(device)  #用于softmax分类\n",
    "        loss = stTrainModel.get_loss(input_a, input_b, label.to(device))\n",
    "        loss.backward()\n",
    "        _opt.step()\n",
    "    dev_acc = 0\n",
    "    dev_loss = 0\n",
    "    epoch_num = 0\n",
    "    for i, data in enumerate(dev_loader):\n",
    "        stTrainModel.eval()\n",
    "        input_a, input_b ,label = data\n",
    "#         label =label.to(torch.int64).to(device) #用于softmax分类\n",
    "        _var_logits = stTrainModel.get_logits(input_a, input_b)\n",
    "        _loss = loss_function(_var_logits, label.to(device))\n",
    "        dev_loss += _loss\n",
    "#         print(label.shape)\n",
    "#         print(_var_logits.shape)\n",
    "        epoch_num += 1\n",
    "#         dev_acc +=(_var_logits.argmax(1)==label).sum().item() #多分类使用\n",
    "        label = label.to(device)\n",
    "        mask = _var_logits.ge(0.5).float().squeeze()  # 以0.5为阈值进行分类\n",
    "        correct = (mask == label).sum()\n",
    "        dev_acc+=correct\n",
    "    if dev_acc>best_acc:\n",
    "        best_acc = dev_acc\n",
    "        torch.save(stTrainModel,\"0210_model/checkpoint.pkl\")\n",
    "    print(\"dev data acc:{}    loss:{}\".format(dev_acc/len(dev_sentence_a), dev_loss/epoch_num))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a606e2dc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "8f156de0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.6187, device='cuda:0', grad_fn=<SqueezeBackward0>)"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#测试阶段\n",
    "model = torch.load('0210_model/checkpoint.pkl')\n",
    "test_sentence_a, test_sentence_b = \"测试句子1\",\"测试句子2\"\n",
    "model.get_logits(test_sentence_a, test_sentence_b)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6cad386",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:py_dongxz] *",
   "language": "python",
   "name": "conda-env-py_dongxz-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
