import os
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from data_loader import KneeDataset
from menet_net import MENetVNet
import matplotlib.pyplot as plt
from scipy.spatial.distance import directed_hausdorff
import numpy as np

# 设置数据路径
image_dir = "jpg"
label_dir = "png"
output_dir = "segmentation_results"
os.makedirs(output_dir, exist_ok=True)

# 检查是否有可用的GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
if device.type == "cuda":
    print(f"GPU: {torch.cuda.get_device_name(0)}")

# 加载数据
dataset = KneeDataset(image_dir, label_dir)
dataloader = DataLoader(dataset, batch_size=8, shuffle=True)

# 初始化模型并移动到GPU
model = MENetVNet().to(device)

# 损失函数和优化器
criterion = torch.nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.0005)

# 评估函数
def calculate_metrics(outputs, labels):
    outputs = (outputs > 0.5).float()
    labels = labels.float()

    # DSC和IoU
    intersection = (outputs * labels).sum()
    union = outputs.sum() + labels.sum()
    dsc = (2 * intersection) / (union + 1e-5)  # 避免除零
    iou = intersection / (union - intersection + 1e-5)  # 避免除零

    # 转换为numpy
    outputs_np = outputs.cpu().numpy()
    labels_np = labels.cpu().numpy()

    # ASD
    def average_surface_distance(pred, true):
        pred_points = np.argwhere(pred > 0)
        true_points = np.argwhere(true > 0)
        if pred_points.size == 0 or true_points.size == 0:
            return float('inf')
        pred_to_true = np.mean([np.min(np.linalg.norm(pred_point - true_points, axis=1)) for pred_point in pred_points])
        true_to_pred = np.mean([np.min(np.linalg.norm(true_point - pred_points, axis=1)) for true_point in true_points])
        return (pred_to_true + true_to_pred) / 2

    asd = average_surface_distance(outputs_np[0, 0], labels_np[0, 0])

    # HD
    hd = max(directed_hausdorff(outputs_np[0, 0], labels_np[0, 0])[0],
             directed_hausdorff(labels_np[0, 0], outputs_np[0, 0])[0])

    # RMSE
    rmse = torch.sqrt(torch.mean((outputs - labels) ** 2)).item()

    # F1 Score
    tp = (outputs * labels).sum()
    fp = (outputs * (1 - labels)).sum()
    fn = ((1 - outputs) * labels).sum()
    f1 = (2 * tp) / (2 * tp + fp + fn + 1e-5)

    return dsc.item(), iou.item(), asd, hd, rmse, f1.item()

# 训练模型
# 初始化日志文件
num_epochs = 150  
log_file = os.path.join(output_dir, "metrics_log.txt")
with open(log_file, "w") as f:
    f.write("Epoch, Avg_Loss, Avg_DSC, Avg_IoU, Avg_ASD, Avg_HD, Avg_RMSE, Avg_F1\n")  # 添加所有指标标题

# 用于累积10个epoch的指标
metrics_accumulator = {'loss': [], 'dsc': [], 'iou': [], 'asd': [], 'hd': [], 'rmse': [], 'f1': []}

# 记录最优模型相关变量初始化
best_dsc = 0
best_epoch = 0
best_model_state_dict = None
for epoch in range(1, num_epochs + 1):
    model.train()
    epoch_loss = 0
    metrics = {'dsc': [], 'iou': [], 'asd': [], 'hd': [], 'rmse': [], 'f1': []}

    for images, labels in dataloader:
        images, labels = images.to(device), labels.to(device)

        outputs = model(images)
        loss = criterion(outputs, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        epoch_loss += loss.item()

        # 计算指标
        dsc, iou, asd, hd, rmse, f1 = calculate_metrics(outputs, labels)
        metrics['dsc'].append(dsc)
        metrics['iou'].append(iou)
        metrics['asd'].append(asd)
        metrics['hd'].append(hd)
        metrics['rmse'].append(rmse)
        metrics['f1'].append(f1)

    # 计算本epoch的平均指标
    avg_dsc = sum(metrics['dsc']) / len(metrics['dsc'])
    avg_iou = sum(metrics['iou']) / len(metrics['iou'])
    avg_asd = sum(metrics['asd']) / len(metrics['asd'])
    avg_hd = sum(metrics['hd']) / len(metrics['hd'])
    avg_rmse = sum(metrics['rmse']) / len(metrics['rmse'])
    avg_f1 = sum(metrics['f1']) / len(metrics['f1'])
    avg_loss = epoch_loss / len(dataloader)

    # 打印每个epoch的指标
    print(f"Epoch [{epoch}/{num_epochs}], Loss: {avg_loss:.4f}, DSC: {avg_dsc:.4f}, IoU: {avg_iou:.4f}, "
          f"ASD: {avg_asd:.4f}, HD: {avg_hd:.4f}, RMSE: {avg_rmse:.4f}, F1: {avg_f1:.4f}")

    # 累积指标
    metrics_accumulator['loss'].append(avg_loss)
    metrics_accumulator['dsc'].append(avg_dsc)
    metrics_accumulator['iou'].append(avg_iou)
    metrics_accumulator['asd'].append(avg_asd)
    metrics_accumulator['hd'].append(avg_hd)
    metrics_accumulator['rmse'].append(avg_rmse)
    metrics_accumulator['f1'].append(avg_f1)

    # 判断是否为当前最优模型
    if avg_dsc > best_dsc:
        best_dsc = avg_dsc
        best_epoch = epoch
        best_model_state_dict = model.state_dict()

    # 每10个epoch计算平均值并写入日志
    if epoch % 10 == 0:
        avg_metrics = {key: sum(values) / len(values) for key, values in metrics_accumulator.items()}
        with open(log_file, "a") as f:
            f.write(f"{epoch}, {avg_metrics['loss']:.4f}, {avg_metrics['dsc']:.4f}, {avg_metrics['iou']:.4f}, "
                    f"{avg_metrics['asd']:.4f}, {avg_metrics['hd']:.4f}, {avg_metrics['rmse']:.4f}, {avg_metrics['f1']:.4f}\n")
        # 清空累积器
        metrics_accumulator = {key: [] for key in metrics_accumulator}

# 保存最优模型
torch.save(best_model_state_dict, os.path.join(output_dir, "best_model.pth"))
print(f"Best model saved at epoch {best_epoch} with DSC: {best_dsc:.4f}")

# 保存分割结果
model.eval()
with torch.no_grad():
    for i, (images, labels) in enumerate(dataloader):
        images, labels = images.to(device), labels.to(device)
        outputs = model(images).cpu().numpy()

        for j in range(len(images)):
            input_image = images[j].cpu().squeeze().numpy()
            predicted_label = outputs[j].squeeze()
            plt.imsave(os.path.join(output_dir, f"result_{i * len(images) + j}.png"), predicted_label, cmap="gray")

import torch

print(f"PyTorch version: {torch.__version__}")
print(f"CUDA Available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
    print(f"GPU: {torch.cuda.get_device_name(0)}")
else:
    print("CUDA is not available.")