{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "signed-lotus",
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/env python3\n",
    "# -*- coding: utf-8 -*-\n",
    " \n",
    "import torch\n",
    "from torch.autograd import Variable\n",
    "import os\n",
    "import random\n",
    "import linecache\n",
    "import numpy as np\n",
    "import torchvision\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torchvision import transforms\n",
    "from PIL import Image\n",
    "import matplotlib.pyplot as plt\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    " \n",
    " \n",
    "root = r'./data/faces/training/'\n",
    " \n",
    "class Config():\n",
    "    root = r'F:\\dataset\\att_faces'\n",
    "    txt_root = 'train.txt'\n",
    "    train_batch_size = 32\n",
    "    train_number_epochs = 32\n",
    " \n",
    "def show_plot(iteration, loss):\n",
    "    plt.plot(iteration, loss)\n",
    "    plt.show()\n",
    " \n",
    "#把数据集所有图片的名字+图片的类型列出来写到一个txt文件中\n",
    "def convert(train=True):\n",
    "    if (train):\n",
    "        try:\n",
    "            f = open(Config.txt_root, 'w')\n",
    "        except:\n",
    "            print('error')\n",
    "    data_path = root+'/'\n",
    "    if (not os.path.exists(data_path)):\n",
    "        os.makedirs(data_path)\n",
    "    for i in range(40):\n",
    "        for j in range(10):\n",
    "            img_path = data_path + 's' + str(i + 1) + '/' + str(j + 1) + '.pgm'\n",
    "            f.write(img_path + ' ' + str(i) + '\\n')\n",
    "    f.close()\n",
    " \n",
    "class MyDataset(Dataset):\n",
    "    def __init__(self, txt, transform=None, target_transform=None, should_invert=False):\n",
    " \n",
    "        self.transform = transform\n",
    "        self.target_transform = target_transform\n",
    "        self.should_invert = should_invert\n",
    "        self.txt = txt\n",
    " \n",
    "    def __getitem__(self, index):\n",
    "        line = linecache.getline(self.txt, random.randint(1, self.__len__()))\n",
    "        line.strip('\\n')\n",
    "        img0_list = line.split()\n",
    "        should_get_same_class = random.randint(0, 1) #若0则取两张不同人的照片，否则则取是否同一个人的都可以\n",
    "        if should_get_same_class:\n",
    "            while True:\n",
    "                img1_list = linecache.getline(self.txt, random.randint(1, self.__len__())).strip('\\n').split()\n",
    "                if img0_list[1] == img1_list[1]:\n",
    "                    break\n",
    "        else:\n",
    "            img1_list = linecache.getline(self.txt, random.randint(1, self.__len__())).strip('\\n').split()\n",
    " \n",
    "        img0 = Image.open(img0_list[0])\n",
    "        img1 = Image.open(img1_list[0])\n",
    " \n",
    " \n",
    "        if self.transform is not None:\n",
    "            img0 = self.transform(img0)\n",
    "            img1 = self.transform(img1)\n",
    " \n",
    "        return img0, img1, torch.from_numpy(np.array([int(img1_list[1] != img0_list[1])], dtype=np.float32))\n",
    " \n",
    "    def __len__(self):\n",
    "        fh = open(self.txt, 'r')\n",
    "        num = len(fh.readlines())\n",
    "        fh.close()\n",
    "        return num\n",
    " \n",
    "class SiameseNetwork(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(SiameseNetwork, self).__init__()\n",
    "        self.cnn1 = nn.Sequential(\n",
    "            nn.Conv2d(1, 4, kernel_size=5), #pgm是灰度图的格式，所以第一层卷积输入层是1\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.BatchNorm2d(4),\n",
    "            nn.Dropout2d(p=.2),\n",
    " \n",
    "            nn.Conv2d(4, 8, kernel_size=5),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.BatchNorm2d(8),\n",
    "            nn.Dropout2d(p=.2),\n",
    " \n",
    "            nn.Conv2d(8, 8, kernel_size=5),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.BatchNorm2d(8),\n",
    "            nn.Dropout2d(p=.2),\n",
    "        )\n",
    " \n",
    "        self.fc1 = nn.Sequential(\n",
    "            nn.Linear(8 * 88 * 88, 500),\n",
    "            nn.ReLU(inplace=True),\n",
    " \n",
    "            nn.Linear(500, 500),\n",
    "            nn.ReLU(inplace=True),\n",
    " \n",
    "            nn.Linear(500, 3)\n",
    "        )\n",
    " \n",
    "    def forward_once(self, x):\n",
    "        output = self.cnn1(x)\n",
    "        output = output.view(output.size()[0], -1)\n",
    "        output = self.fc1(output)\n",
    "        return output\n",
    " \n",
    "    def forward(self, input1, input2):\n",
    "        output1 = self.forward_once(input1)\n",
    "        output2 = self.forward_once(input2)\n",
    "        return output1, output2\n",
    " \n",
    " \n",
    "# Custom Contrastive Loss\n",
    "class ContrastiveLoss(torch.nn.Module):\n",
    "    def __init__(self, margin=2.0):\n",
    "        super(ContrastiveLoss, self).__init__()\n",
    "        self.margin = margin\n",
    " \n",
    "    def forward(self, output1, output2, label):\n",
    "        euclidean_distance = F.pairwise_distance(output1, output2)\n",
    "        loss_contrastive = torch.mean((label) * torch.pow(euclidean_distance, 2) +\n",
    "                                      (1-label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))\n",
    "        return loss_contrastive*0.5\n",
    " \n",
    "if __name__ == '__main__':\n",
    "    convert(True)\n",
    "    # Training\n",
    "    train_data = MyDataset(txt=Config.txt_root, transform=transforms.Compose(\n",
    "        [transforms.Resize((100, 100)), transforms.ToTensor()]), should_invert=False)\n",
    "    train_dataloader = DataLoader(dataset=train_data, shuffle=True, num_workers=2, batch_size=Config.train_batch_size)\n",
    " \n",
    "    net = SiameseNetwork().cuda()\n",
    "    criterion = ContrastiveLoss()\n",
    "    optimizer = optim.Adam(net.parameters(), lr=0.0005)\n",
    " \n",
    "    counter = []\n",
    "    loss_history = []\n",
    "    iteration_number = 0\n",
    " \n",
    "    for epoch in range(0, Config.train_number_epochs):\n",
    "        for i, data in enumerate(train_dataloader,0):\n",
    "            img0, img1, label = data\n",
    "            img0, img1, label = Variable(img0).cuda(), Variable(img1).cuda(), Variable(label).cuda()\n",
    "            output1, output2 = net(img0, img1)\n",
    "            optimizer.zero_grad()\n",
    "            loss_contrastive = criterion(output1, output2, label)\n",
    "            loss_contrastive.backward()\n",
    "            optimizer.step()\n",
    " \n",
    "            if i % 10 == 0:\n",
    "                print(\"Epoch:{},  Current loss {}\\n\".format(epoch, loss_contrastive.item()))\n",
    "                iteration_number += 10\n",
    "                counter.append(iteration_number)\n",
    "                loss_history.append(loss_contrastive.item())\n",
    "    torch.save(net,'model.pth')\n",
    "    show_plot(counter, loss_history)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
