import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from model import *
from dataset import *
from tqdm import tqdm  # 添加进度条显示

# 添加GPU支持
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def train_model(model, train_dataloader, val_dataloader, optimizer, num_epochs, margin=1.0,  # 修正拼写错误
                s_p=1024, s_n=1024, s_p_hard=128, s_n_hard=128):
    model.to(device)  # 模型移到GPU
    best_val_loss = float('inf')

    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        train_loss = 0.0
        for batch in tqdm(train_dataloader, desc=f"Epoch {epoch} Training"):
            x1_pos, x2_pos, x1_neg, x2_neg = batch

            # 数据移到GPU
            x1_pos, x2_pos = x1_pos.to(device), x2_pos.to(device)
            x1_neg, x2_neg = x1_neg.to(device), x2_neg.to(device)

            # 合并正负样本
            x1 = torch.cat([x1_pos, x1_neg], dim=0)
            x2 = torch.cat([x2_pos, x2_neg], dim=0)
            targets = torch.cat([torch.ones(s_p, device=device),  # 直接在GPU创建tensor
                                 torch.zeros(s_n, device=device)], dim=0)

            # 前向传播
            out1, out2 = model(x1, x2)
            distances = F.pairwise_distance(out1, out2, p=2)
            losses = torch.where(targets == 1, distances,
                                 torch.clamp(margin - distances, min=0))

            # 困难样本挖掘
            pos_losses = losses[:s_p]
            neg_losses = losses[s_p:]
            _, pos_indices = torch.topk(pos_losses, s_p_hard)
            _, neg_indices = torch.topk(neg_losses, s_n_hard)
            selected_loss = torch.cat([pos_losses[pos_indices],
                                       neg_losses[neg_indices]])

            # 反向传播
            total_loss = selected_loss.mean()
            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

            train_loss += total_loss.item() * x1.size(0)

        # 计算平均训练损失
        train_loss = train_loss / len(train_dataloader.dataset)

        # 验证阶段
        val_loss, val_acc = validate(model, val_dataloader, margin, device)
        print(f"Epoch {epoch} | Train Loss: {train_loss:.4f} | "
              f"Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.2%}")

        # 保存最佳模型
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), "best_model.pth")


def validate(model, val_loader, margin, device):
    model.eval()
    total_loss = 0.0
    correct = 0
    total = 0

    with torch.no_grad():
        for batch in tqdm(val_loader, desc="Validating"):
            x1_pos, x2_pos, x1_neg, x2_neg = batch

            # 数据移到GPU
            x1_pos, x2_pos = x1_pos.to(device), x2_pos.to(device)
            x1_neg, x2_neg = x1_neg.to(device), x2_neg.to(device)

            # 动态获取正负样本数量
            s_p = x1_pos.size(0)
            s_n = x1_neg.size(0)

            x1 = torch.cat([x1_pos, x1_neg], dim=0)
            x2 = torch.cat([x2_pos, x2_neg], dim=0)
            targets = torch.cat([torch.ones(s_p, device=device),
                                 torch.zeros(s_n, device=device)], dim=0)

            out1, out2 = model(x1, x2)
            distances = F.pairwise_distance(out1, out2, p=2)

            # 计算损失
            losses = torch.where(targets == 1, distances,
                                 torch.clamp(margin - distances, min=0))
            total_loss += losses.mean().item() * x1.size(0)

            # 计算准确率
            preds = (distances < margin).float()
            correct += (preds == targets).sum().item()
            total += x1.size(0)

    return total_loss / total, correct / total


if __name__ == '__main__':
    # 训练参数
    num_epochs = 100
    margin = 1.0
    s_p, s_n = 128, 128
    s_p_hard, s_n_hard = 64, 64

    # 初始化模型（自动移到GPU）
    base_cnn = DeepDescriptor().to(device)
    siamese_net = SiameseNetwork(base_cnn).to(device)
    optimizer = torch.optim.SGD(siamese_net.parameters(), lr=0.01, momentum=0.9)

    # 数据集（添加pin_memory加速GPU传输）
    train_dataset = MVSDataset(root_dir=r"D:\project\FeatureExtration\datasets\output",
        mode='train',
        pairs_per_epoch=10000,
        val_ratio=0.2,
        seed=88)
    val_dataset = MVSDataset(root_dir=r"D:\project\FeatureExtration\datasets\output",
        mode='val',
        pairs_per_epoch=2000,
        val_ratio=0.2,
        seed=88)

    train_loader = DataLoader(train_dataset, batch_size=s_p,
                              num_workers=5, pin_memory=True, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=s_p,
                            num_workers=5, pin_memory=True, drop_last=True)

    # 开始训练
    train_model(siamese_net, train_loader, val_loader, optimizer, num_epochs, margin,
                s_p, s_n, s_p_hard, s_n_hard)



# import torch
# import torch.nn as nn
# import torch.nn.functional as F
# from model import *
# from dataset import *
#
# def hinge_embedding_loss(output1, output2, target, margin=1.0):
#     distance = F.pairwise_distance(output1, output2, p=2)
#     loss = torch.where(target == 1,
#                       distance,
#                       torch.clamp(margin - distance, min=0))
#     return loss.mean()
#
#
# def train_model(model, train_dataloader, val_datalodar, optimizer, margin=1.0,
#                 s_p=1024, s_n=1024, s_p_hard=128, s_n_hard=128):
#     model.train()
#     for epoch in range(num_epochs):
#         for batch in train_dataloader:
#             # 假设batch包含正负样本对
#             x1_pos, x2_pos, x1_neg, x2_neg = batch
#
#             # 合并正负样本
#             x1 = torch.cat([x1_pos, x1_neg], dim=0)
#             x2 = torch.cat([x2_pos, x2_neg], dim=0)
#             targets = torch.cat([torch.ones(s_p), torch.zeros(s_n)], dim=0)
#
#             # 前向传播
#             out1, out2 = model(x1, x2)
#             distances = F.pairwise_distance(out1, out2, p=2)
#             losses = torch.where(targets == 1, distances, torch.clamp(margin - distances, min=0))
#
#             # 分离正负损失
#             pos_losses = losses[:s_p]
#             neg_losses = losses[s_p:]
#
#             # 选择困难样本
#             _, pos_indices = torch.topk(pos_losses, s_p_hard)
#             _, neg_indices = torch.topk(neg_losses, s_n_hard)
#             selected_loss = torch.cat([pos_losses[pos_indices], neg_losses[neg_indices]])
#
#             # 计算平均损失并反向传播
#             total_loss = selected_loss.mean()
#             print(f'epoch {epoch} : loss = {total_loss}')
#             optimizer.zero_grad()
#             total_loss.backward()
#             optimizer.step()
#
# if __name__ == '__main__':
#
#     # 训练参数
#     num_epochs = 100
#     margin = 1.0
#     s_p, s_n = 1024, 1024  # 每批次正负样本数
#     s_p_hard, s_n_hard = 128, 128  # 困难样本数
#
#     # 初始化模型与优化器
#     base_cnn = DeepDescriptor()
#     siamese_net = SiameseNetwork(base_cnn)
#     optimizer = torch.optim.SGD(siamese_net.parameters(), lr=0.01, momentum=0.9)
#
#     # 学习率调整
#     scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.1)
#
#     # 数据加载
#     train_dataset = MVSDataset(
#         root_dir=r"D:\project\FeatureExtration\datasets\output",
#         mode='train',
#         pairs_per_epoch=10000,
#         val_ratio=0.2,
#         seed=88
#     )
#
#     val_dataset = MVSDataset(
#         root_dir=r"D:\project\FeatureExtration\datasets\output",
#         mode='val',
#         pairs_per_epoch=2000,
#         val_ratio=0.2,
#         seed=88
#     )
#
#     # 创建数据加载器
#     train_loader = DataLoader(
#         train_dataset,
#         batch_size=s_p,
#         shuffle=False,  # 保持True以增强随机性
#         num_workers=5,
#         pin_memory=True
#     )
#
#     val_loader = DataLoader(
#         val_dataset,
#         batch_size=s_p,
#         shuffle=False,
#         num_workers=5,
#         pin_memory=True
#     )
#
#     # 开始训练
#     train_model(siamese_net, train_loader, val_loader, optimizer, margin, s_p, s_n, s_p_hard, s_n_hard)