# -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 22:59:27 2024

@author: Lenovo
"""
import torch
from torch.autograd import Variable
import os
import random
import linecache
import numpy as np
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
 
 
root = r'D:\luanshengNT\att_faces'
 
class Config():
    root = r'D:\luanshengNT\att_faces'
    txt_root = 'train.txt'
    train_batch_size = 32
    train_number_epochs = 32
 
def show_plot(iteration, loss):
    plt.plot(iteration, loss)
    plt.show()
 
#把数据集所有图片的名字+图片的类型列出来写到一个txt文件中
def convert(train=True):
    if (train):
        try:
            f = open(Config.txt_root, 'w')
        except:
            print('error')
    data_path = root+'/'
    if (not os.path.exists(data_path)):
        os.makedirs(data_path)
    for i in range(40):
        for j in range(10):
            img_path = data_path + 's' + str(i + 1) + '/' + str(j + 1) + '.pgm'
            f.write(img_path + ' ' + str(i) + '\n')
    f.close()
 
class MyDataset(Dataset):
    def __init__(self, txt, transform=None, target_transform=None, should_invert=False):
 
        self.transform = transform
        self.target_transform = target_transform
        self.should_invert = should_invert
        self.txt = txt
 
    def __getitem__(self, index):
        line = linecache.getline(self.txt, random.randint(1, self.__len__()))
        line.strip('\n')
        img0_list = line.split()
        should_get_same_class = random.randint(0, 1) #若0则取两张不同人的照片，否则则取是否同一个人的都可以
        if should_get_same_class:
            while True:
                img1_list = linecache.getline(self.txt, random.randint(1, self.__len__())).strip('\n').split()
                if img0_list[1] == img1_list[1]:
                    break
        else:
            img1_list = linecache.getline(self.txt, random.randint(1, self.__len__())).strip('\n').split()
 
        img0 = Image.open(img0_list[0])
        img1 = Image.open(img1_list[0])
 
 
        if self.transform is not None:
            img0 = self.transform(img0)
            img1 = self.transform(img1)
 
        return img0, img1, torch.from_numpy(np.array([int(img1_list[1] != img0_list[1])], dtype=np.float32))
 
    def __len__(self):
        fh = open(self.txt, 'r')
        num = len(fh.readlines())
        fh.close()
        return num
 
class SiameseNetwork(nn.Module):
    def __init__(self):
        super(SiameseNetwork, self).__init__()
        self.cnn1 = nn.Sequential(
            nn.Conv2d(1, 4, kernel_size=5), #pgm是灰度图的格式，所以第一层卷积输入层是1
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(4),
            nn.Dropout2d(p=.2),
 
            nn.Conv2d(4, 8, kernel_size=5),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(8),
            nn.Dropout2d(p=.2),
 
            nn.Conv2d(8, 8, kernel_size=5),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(8),
            nn.Dropout2d(p=.2),
        )
 
        self.fc1 = nn.Sequential(
            nn.Linear(8 * 88 * 88, 500),
            nn.ReLU(inplace=True),
 
            nn.Linear(500, 500),
            nn.ReLU(inplace=True),
 
            nn.Linear(500, 3)
        )
 
    def forward_once(self, x):
        output = self.cnn1(x)
        output = output.view(output.size()[0], -1)
        output = self.fc1(output)
        return output
 
    def forward(self, input1, input2):
        output1 = self.forward_once(input1)
        output2 = self.forward_once(input2)
        return output1, output2
 
 
# Custom Contrastive Loss
class ContrastiveLoss(torch.nn.Module):
    def __init__(self, margin=2.0):
        super(ContrastiveLoss, self).__init__()
        self.margin = margin
 
    def forward(self, output1, output2, label):
        euclidean_distance = F.pairwise_distance(output1, output2)
        loss_contrastive = torch.mean((label) * torch.pow(euclidean_distance, 2) +
                                      (1-label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
        return loss_contrastive*0.5

if __name__ == '__main__':
    convert(True)
    # Training
    train_data = MyDataset(txt=Config.txt_root, transform=transforms.Compose(
        [transforms.Resize((100, 100)), transforms.ToTensor()]), should_invert=False)
    train_dataloader = DataLoader(dataset=train_data, shuffle=True, num_workers=2, batch_size=Config.train_batch_size)
 
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)
 
    counter = []
    loss_history = []
    iteration_number = 0
 
    for epoch in range(0, Config.train_number_epochs):
        for i, data in enumerate(train_dataloader, 0):
            img0, img1, label = data
            img0, img1, label = Variable(img0), Variable(img1), Variable(label)
            output1, output2 = net(img0, img1)
            optimizer.zero_grad()
            loss_contrastive = criterion(output1, output2, label)
            loss_contrastive.backward()
            optimizer.step()
 
            if i % 10 == 0:
                print("Epoch:{},  Current loss {}\n".format(epoch, loss_contrastive.item()))
                iteration_number += 10
                counter.append(iteration_number)
                loss_history.append(loss_contrastive.item())
    torch.save(net,'model.pth')
    show_plot(counter, loss_history)

