import PIL.Image
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import torch.nn.functional as F
import os
import cv2
import PIL
import ot  # 使用POT库进行最优传输计算 回归最经典的POT
import random
import matplotlib.pyplot as plt
loss_history = []
# 数据集的下载链接 https://openi.nlm.nih.gov/faq

# 加载IU X-ray数据集
def load_iu_xray_data(data_dir, max_samples=1000):
    images = []
    labels = []
    for root, dirs, files in os.walk(data_dir):
        for file in files:
            if file.endswith(".png"):
                img_path = os.path.join(root, file)
                img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
                img = cv2.resize(img, (32, 32))  # 调整图像大小
                img = img.astype('float32') / 255  # 归一化
                images.append(img)
                labels.append(os.path.basename(root))  # 使用文件夹名称作为标签
                if len(images) >= max_samples:
                    break
        if len(images) >= max_samples:
            break
    images = np.array(images)
    labels = np.array(labels)
    return images, labels

data_dir = 'data/iu_xray/iu_xray'  # 替换为IU X-ray数据集的路径
X_train, y_train = load_iu_xray_data(data_dir, max_samples=1000)

class TripletDataset(Dataset):
    def __init__(self, X, y, num_triplets):
        self.X = X
        self.y = y
        self.triplets = self.create_triplets(num_triplets)
    
    def create_triplets(self, num_triplets):
        triplets = []
        labels_set = set(self.y)
        label_to_indices = {label: np.where(self.y == label)[0] for label in labels_set}
        
        for _ in range(num_triplets):
            anchor_label = np.random.choice(list(labels_set))
            negative_label = np.random.choice(list(labels_set - {anchor_label}))
            
            anchor_index = np.random.choice(label_to_indices[anchor_label])
            positive_index = np.random.choice(label_to_indices[anchor_label])
            while positive_index == anchor_index:
                positive_index = np.random.choice(label_to_indices[anchor_label])
            
            negative_index = np.random.choice(label_to_indices[negative_label])
            
            triplets.append([anchor_index, positive_index, negative_index])
        
        return np.array(triplets)
    
    def __len__(self):
        return len(self.triplets)
    
    def __getitem__(self, idx):
        anchor_index, positive_index, negative_index = self.triplets[idx]
        anchor = self.X[anchor_index]
        positive = self.X[positive_index]
        negative = self.X[negative_index]
        return (torch.tensor(anchor, dtype=torch.float32),
                torch.tensor(positive, dtype=torch.float32),
                torch.tensor(negative, dtype=torch.float32))

# 注意力机制模型
# 后续再考虑用resNet
# class AttentionBlock(nn.Module):
#     def __init__(self, in_channels):
#         super(AttentionBlock, self).__init__()
#         self.conv = nn.Conv2d(in_channels, 1, kernel_size=1)
    
#     def forward(self, x):
#         attention_map = torch.sigmoid(self.conv(x))
#         return x * attention_map

class AttentionBlock(nn.Module):
    def __init__(self, in_channels):
        super(AttentionBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, in_channels // 8, kernel_size=1)
        self.conv2 = nn.Conv2d(in_channels // 8, in_channels, kernel_size=1)
    
    def forward(self, x):
        # 计算注意力权重
        attention = F.relu(self.conv1(x))
        attention = torch.sigmoid(self.conv2(attention))
        # 应用注意力权重
        x = x * attention
        return x

# 这模型最好不要这么命名，应该是DML模型，但是核心是三元组损失
class TripletModel(nn.Module):
    def __init__(self):
        super(TripletModel, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(128 * 4 * 4, 128)
        self.attention = AttentionBlock(128)
        self.fc2 = nn.Linear(128, 64)
    
    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = self.pool(torch.relu(self.conv2(x)))
        x = self.pool(torch.relu(self.conv3(x)))
        x = self.attention(x)
        x = x.view(-1, 128 * 4 * 4)
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x

def triplet_loss(anchor, positive, negative, alpha=0.2):
    pos_dist = torch.sum(torch.square(anchor - positive), dim=1)
    neg_dist = torch.sum(torch.square(anchor - negative), dim=1)
    basic_loss = pos_dist - neg_dist + alpha
    loss = torch.mean(torch.clamp(basic_loss, min=0.0))
    return loss

class CombinedLoss(nn.Module):
    def __init__(self, alpha=0.5, beta=0.5):
        super(CombinedLoss, self).__init__()
        self.alpha = alpha
        self.beta = beta
    
    def triplet_loss(self, anchor, positive, negative, margin=0.2):
        pos_dist = torch.sum(torch.square(anchor - positive), dim=1)
        neg_dist = torch.sum(torch.square(anchor - negative), dim=1)
        basic_loss = pos_dist - neg_dist + margin
        loss = torch.mean(torch.clamp(basic_loss, min=0.0))
        return loss
    
    def optimal_transport_loss(self, anchor, positive, negative):
        # 计算最优传输损失
        # 这里假设anchor, positive, negative是嵌入向量
        # 计算代价矩阵C
        C = torch.cdist(anchor, positive) + torch.cdist(anchor, negative)
        # 计算最优传输矩阵T
        T = ot.emd([], [], C.detach().numpy())
        # 计算最优传输损失
        ot_loss = torch.sum(torch.tensor(T) * C)
        return ot_loss
    
    def forward(self, anchor, positive, negative):
        triplet_loss_value = self.triplet_loss(anchor, positive, negative)
        ot_loss_value = self.optimal_transport_loss(anchor, positive, negative)
        # print("triplet_loss_value",triplet_loss_value)
        # print("ot_loss_value",ot_loss_value)
        combined_loss = self.alpha * triplet_loss_value + self.beta * ot_loss_value * 1e7
        return combined_loss



def use():
    num_triplets = 1500
    train_dataset = TripletDataset(X_train, y_train, num_triplets)
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)

    model = TripletModel()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    combined_loss = CombinedLoss(alpha=0.9, beta=0.1)  # 调整alpha和beta

    num_epochs = 20
    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        for batch in train_loader:
            anchor, positive, negative = batch
            anchor = anchor.unsqueeze(1)  # 增加通道维度
            positive = positive.unsqueeze(1)  # 增加通道维度
            negative = negative.unsqueeze(1)  # 增加通道维度
            optimizer.zero_grad()
            anchor_embedding = model(anchor)
            positive_embedding = model(positive)
            negative_embedding = model(negative)
            # loss = triplet_loss(anchor_embedding, positive_embedding, negative_embedding)
            loss = combined_loss(anchor_embedding, positive_embedding, negative_embedding)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

        avg_loss = running_loss / len(train_loader)
        loss_history.append(avg_loss)  # 添加这一行
        print(f'Epoch {epoch+1}/{num_epochs}, Loss: {running_loss/len(train_loader)}')

    # 保存训练好的模型
    # 在训练结束后，绘制损失曲线
    plt.plot(loss_history, label='Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training Loss Curve')
    plt.legend()
    plt.show()
    torch.save(model.state_dict(), 'src/model_pth/triplet_model_iu_xray_attention.pth')

if __name__ == "__main__":
    use()