# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 18:08:47 2024

@author: Lenovo
"""

import torch
from torch.autograd import Variable
import os
import random
import linecache
import numpy as np
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
 
 
root = r'D:\luanshengNT\KinFaceW-I\images\father-dau'
 
class Config():
    root = r'D:\luanshengNT\KinFaceW-I\images\father-dau'
    txt_root = 'train.txt'
    train_batch_size = 32
    train_number_epochs = 32
 
def show_plot(iteration, loss):
    plt.plot(iteration, loss)
    plt.show()
 
#把数据集所有图片的名字+图片的类型列出来写到一个txt文件中
def convert(train=True):
    if (train):
        try:
            f = open(Config.txt_root, 'w')
        except:
            print('error')
    data_path = root+'/'
    if (not os.path.exists(data_path)):
        os.makedirs(data_path)
    for i in range(134):
        i1 = f"{i+1:03d}"
        img_path = data_path + 'fd_' + i1 + '_'
        img_path01=img_path 
        for j in range(2):
            img_path02 = img_path01 + str(j + 1) + '.jpg'
            f.write(img_path02 + ' ' + str(i) + '\n')
    f.close()
 
class MyDataset(Dataset):
    def __init__(self, txt, transform=None, target_transform=None, should_invert=False):
        # 初始化方法
        self.transform = transform  # 图像转换函数
        self.target_transform = target_transform  # 目标转换函数
        self.should_invert = should_invert  # 是否翻转
        self.txt = txt  # 数据集文件路径
 
    def __getitem__(self, index):
        # 获取数据集中指定索引的样本
        line = linecache.getline(self.txt, random.randint(1, self.__len__()))  # 从数据集文件中随机读取一行数据
        line.strip('\n')
        img0_list = line.split()  # 将数据切分为列表
 
        should_get_same_class = random.randint(0, 1)  # 随机决定是否获取同一类别的图像
        if should_get_same_class:
            while True:
                img1_list = linecache.getline(self.txt, random.randint(1, self.__len__())).strip('\n').split()  # 获取同一类别的图像
                if img0_list[1] == img1_list[1]:  # 判断是否为同一类别的图像
                    break
        else:
            img1_list = linecache.getline(self.txt, random.randint(1, self.__len__())).strip('\n').split()  # 获取不同类别的图像
        img0 = Image.open(img0_list[0])
        img1 = Image.open(img1_list[0])
        img0 = img0.convert("L")
        img1 = img1.convert("L")
 
        if self.transform is not None:
            img0 = self.transform(img0)
            img1 = self.transform(img1)
 


        return img0, img1, torch.from_numpy(np.array([int(img1_list[1] != img0_list[1])], dtype=np.float32))  # 返回图像对及标签
 
    def __len__(self):
        # 返回数据集长度
        fh = open(self.txt, 'r')
        num = len(fh.readlines())  # 统计数据集文件的行数，即样本数量
        fh.close()
        return num
 
# 导入必要的库
class SiameseNetwork(nn.Module):  # 定义孪生网络类
    def __init__(self):
        super(SiameseNetwork, self).__init__()
        self.cnn1 = nn.Sequential(  # 定义卷积神经网络
            nn.Conv2d(1, 4, kernel_size=5),  # 第一层卷积核：输入通道数为1，输出通道数为4，核大小为5
            nn.ReLU(inplace=True),  # ReLU激活函数
            nn.BatchNorm2d(4),  # Batch Normalization
            nn.Dropout2d(p=.2),  # 二维 Dropout

            nn.Conv2d(4, 8, kernel_size=5),  # 第二层卷积核
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(8),
            nn.Dropout2d(p=.2),

            nn.Conv2d(8, 8, kernel_size=5),  # 第三层卷积核
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(8),
            nn.Dropout2d(p=.2),
        )

        self.fc1 = nn.Sequential(  # 定义全连接神经网络
            nn.Linear(8 * 88 * 88, 500),  # 输入维度为8*88*88，输出维度为500
            nn.ReLU(inplace=True),  # ReLU激活函数

            nn.Linear(500, 500),  # 第二个全连接层
            nn.ReLU(inplace=True),
 
            nn.Linear(500, 3)  # 第三个全连接层，输出维度为3
        )
 
    def forward_once(self, x):  # 前向传播
        output = self.cnn1(x)  # 卷积神经网络层
        output = output.view(output.size()[0], -1)  # 展平操作
        output = self.fc1(output)  # 全连接神经网络层
        return output
 
    def forward(self, input1, input2):  # 定义前向传播
        output1 = self.forward_once(input1)  # 输入1的前向传播
        output2 = self.forward_once(input2)  # 输入2的前向传播
        return output1, output2  # 返回输出
 
 
# 自定义对比损失函数
class ContrastiveLoss(torch.nn.Module):  # 定义对比损失函数类
    def __init__(self, margin=2.0):  # 初始化函数
        super(ContrastiveLoss, self).__init__()
        self.margin = margin  # 对比损失函数的边际值
 
    def forward(self, output1, output2, label):  # 前向传播函数
        euclidean_distance = F.pairwise_distance(output1, output2)  # 计算欧氏距离
        loss_contrastive = torch.mean((label) * torch.pow(euclidean_distance, 2) +
                                      (1-label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))  # 对比损失计算公式
        return loss_contrastive * 0.5  # 返回损失值乘以0.5
 
if __name__ == '__main__':  # 主程序入口
    convert(True)  # 转换数据集
    # 训练
    train_data = MyDataset(txt=Config.txt_root, transform=transforms.Compose(
        [transforms.Resize((100, 100)), transforms.ToTensor()]), should_invert=False)  # 准备训练数据集
    train_dataloader = DataLoader(dataset=train_data, shuffle=True, num_workers=2, batch_size=Config.train_batch_size)  # 创建数据加载器
 
    net = SiameseNetwork()  # 实例化孪生网络
    criterion = ContrastiveLoss()  # 实例化对比损失函数
    optimizer = optim.Adam(net.parameters(), lr=0.0005)  # 使用Adam优化器
 
    counter = []  # 计数器
    loss_history = []  # 损失历史记录
    iteration_number = 0  # 迭代次数
 
    for epoch in range(0, Config.train_number_epochs):  # 遍历每个epoch
        for i, data in enumerate(train_dataloader, 0):  # 遍历数据加载器
            img0, img1, label = data  # 获取图像数据和标签
            img0, img1, label = Variable(img0), Variable(img1), Variable(label)  # 转为Variable/直接使用 Tensor 类
            output1, output2 = net(img0, img1)  # 孪生网络前向传播
            optimizer.zero_grad()  # 梯度清零
            loss_contrastive = criterion(output1, output2, label)  # 计算对比损失
            loss_contrastive.backward()  # 反向传播
            optimizer.step()  # 参数更新
 
            if i % 10 == 0:  # 每10步打印一次损失
                print("Epoch:{},  Current loss {}\n".format(epoch, loss_contrastive.item()))
                iteration_number += 10
                counter.append(iteration_number)
                loss_history.append(loss_contrastive.item())
    torch.save(net, 'model.pth')  # 保存模型
    show_plot(counter, loss_history)  # 显示训练过程中的损失变化

