# -*- coding: utf-8 -*-
"""
Created on Thu Mar 21 15:40:41 2024

@author: Lenovo
"""
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from torchvision.utils import save_image

# 定义孪生网络模型
class SiameseNetwork(nn.Module):
    def __init__(self):
        super(SiameseNetwork, self).__init__()
        self.cnn1 = nn.Sequential(
            nn.Conv2d(1, 4, kernel_size=5), #pgm是灰度图的格式，所以第一层卷积输入层是1
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(4),
            nn.Dropout2d(p=.2),
 
            nn.Conv2d(4, 8, kernel_size=5),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(8),
            nn.Dropout2d(p=.2),
 
            nn.Conv2d(8, 8, kernel_size=5),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(8),
            nn.Dropout2d(p=.2),
        )
 
        self.fc1 = nn.Sequential(
            nn.Linear(8 * 88 * 88, 500),
            nn.ReLU(inplace=True),
 
            nn.Linear(500, 500),
            nn.ReLU(inplace=True),
 
            nn.Linear(500, 3)
        )
 
    def forward_once(self, x):
        output = self.cnn1(x)
        output = output.view(output.size()[0], -1)
        output = self.fc1(output)
        return output
 
    def forward(self, input1, input2):
        output1 = self.forward_once(input1)
        output2 = self.forward_once(input2)
        return output1, output2
 
class CustomDataset(torch.nn.Module):
    def __init__(self, margin=2.0):
        super(ContrastiveLoss, self).__init__()
        self.margin = margin
 
    def forward(self, output1, output2, label):
        euclidean_distance = F.pairwise_distance(output1, output2)
        loss_contrastive = torch.mean((label) * torch.pow(euclidean_distance, 2) +
                                      (1-label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
        return loss_contrastive*0.5
'''
# 自定义数据集类
class CustomDataset(Dataset):
    def __init__(self, data_pairs, root_dir, transform=None):
        self.data_pairs = data_pairs
        self.root_dir = root_dir
        self.transform = transform

    def __len__(self):
        return len(self.data_pairs)

    def __getitem__(self, index):
        img1_path, img2_path, label = self.data_pairs[index]
        img1 = Image.open(img1_path).convert('L')
        img2 = Image.open(img2_path).convert('L')
# 使用transforms.Resize()来保持长宽比例调整图片大小
        resize_transform = transforms.Resize((100, 100))
        img1 = resize_transform(img1)
        img2 = resize_transform(img2)
        
        if self.transform:
            img1 = self.transform(img1)
            img2 = self.transform(img2)


        return img1, img2, label
'''
#%%
# 设置根目录和数据对
root_dir = r'D:\luanshengNT\KinFaceW-I\images\father-dau'
image_files = sorted(os.listdir(root_dir))
data_pairs = []

#%%  消除一些隐藏文件带来的无法识别影响
image_folder = "D:\\luanshengNT\\KinFaceW-I\\images\\father-dau"
valid_image_files = []

for filename in os.listdir(image_folder):
    file_path = os.path.join(image_folder, filename)
    try:
        img = Image.open(file_path)  # 尝试打开文件
        img.close()  # 关闭文件
        valid_image_files.append(file_path)
    except (IOError, SyntaxError):  # 捕获无法打开的文件
        print(f"Ignoring non-image file: {file_path}")
#%%
for i in range(0, len(valid_image_files)-1, 2):
    img1_path = os.path.join(root_dir, valid_image_files[i])
    img2_path = os.path.join(root_dir, valid_image_files[i+1])
    data_pairs.append((img1_path, img2_path, 1))
#print("Valid image files:")
#print(valid_image_files)
#%%test data
print(data_pairs[:5])  # 打印前5条数据
print("Total pairs:", len(data_pairs))

# 打印第一个元组的内容和长度
print("First pair:", data_pairs[0])
print("Length of the first pair:", len(data_pairs[0]))

# 如果需要查看具体路径的字符串，可以这样：
for pair in data_pairs:
    print("Image 1 path:", pair[0])
    print("Image 2 path:", pair[1])
    print("Label:", pair[2])
# 打印列表长度
print("Total pairs:", len(data_pairs))

# 选择一个元组，比如第一个元组，然后打印该元组的长度
sample_pair = data_pairs[0]
print("Length of the sample pair:", len(sample_pair))
#%%
# 定义数据转换
transform = transforms.Compose([
    transforms.Resize((100, 100)),
    transforms.ToTensor()
])

# 创建自定义数据集实例
custom_dataset = CustomDataset(data_pairs, root_dir)

# 创建数据加载器
data_loader = DataLoader(custom_dataset, batch_size=32, shuffle=True)

# 实例化孪生网络模型
siamese_net = SiameseNetwork()

# 定义损失函数和优化器
criterion = nn.TripletMarginLoss(margin=1.0, p=2)
optimizer = optim.Adam(siamese_net.parameters(), lr=0.001)

# 定义训练数量
num_epochs = 50

# 训练模型
for epoch in range(num_epochs):
    total_loss = 0.0
    for batch_idx, (input1, input2, label) in enumerate(data_loader):
        optimizer.zero_grad()
        output1, output2 = siamese_net(input1, input2)
        loss = criterion(output1, output2, label.float())
        loss.backward()
        optimizer.step()
        total_loss += loss.item()

        if batch_idx % 10 == 0:
            print(f'Epoch {epoch+1}, Batch {batch_idx}, Loss: {loss.item()}')

    print(f'Epoch {epoch+1}, Avg. Loss: {total_loss / len(data_loader)}')

