import torch
import torchvision
from torchvision import transforms
from torch import nn
from yuanshi_dataset import MyDataset
from model import UnetPlusPlus
from torch.utils.data import DataLoader
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

def calculate_accuracy(output, target):
    with torch.no_grad():
        pred = output.argmax(dim=1)
        target = target.squeeze()
        correct = pred == target
        accuracy = correct.float().mean().item()
    return accuracy

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_gpus = torch.cuda.device_count()
print(f"Using {num_gpus} GPUs.")

 # 数据增强
data_transforms = transforms.Compose([
     # transforms.RandomHorizontalFlip(),
     # transforms.RandomVerticalFlip(),
     # transforms.RandomRotation(30),
     #transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
     transforms.Resize((256, 256)),
     transforms.ToTensor(),
 ])

# data_transforms = A.Compose([
#     A.HorizontalFlip(p=0.5),  # 随机水平翻转
#     A.VerticalFlip(p=0.5),  # 随机垂直翻转
#     A.RandomRotate90(p=0.5),  # 随机90度旋转
#     A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=15, p=0.5),  # 平移、缩放、旋转
#     A.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), max_pixel_value=255.0)# 归一化
# ])

# 加载数据集
train_dataset = MyDataset(r'D:\PyCharm\data\Thyroid\Thyroid_Dataset_all\tg3k\thyroid-train-image',
                            r'D:\PyCharm\data\Thyroid\Thyroid_Dataset_all\tg3k\thyroid-train-mask',
                             transform=data_transforms)
test_dataset = MyDataset(r'D:\PyCharm\data\Thyroid\Thyroid_Dataset_all\tg3k\thyroid-test-image',
                           r'D:\PyCharm\data\Thyroid\Thyroid_Dataset_all\tg3k\thyroid-test-mask',
                            transform=data_transforms)
train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True,drop_last=True)
test_loader = DataLoader(test_dataset, batch_size=4, shuffle=False,drop_last=True)

model = UnetPlusPlus(2)
if num_gpus > 1:
    model = nn.DataParallel(model)

model.to(device)

loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay=1e-4)

epoch = 3

patience = 5  # 早停的耐心系数
best_val_loss = np.inf
best_model_wts = model.state_dict()
no_improve_epochs = 0

train_losses = []
test_losses = []
train_accuracies = []
test_accuracies = []
test_oas = []
test_ious = []
test_f1s = []


for i in range(epoch):
    print("——————第{}轮训练——————".format(i + 1))

    # Train
    model.train()
    epoch_train_loss = 0
    epoch_train_accuracy = 0
    for data in tqdm(train_loader, desc="Training"):
        imgs, labels = data
        imgs, labels = imgs.to(device), labels.to(device)

        output = model(imgs)
        labels = labels.squeeze().long()

        loss = loss_func(output, labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        epoch_train_loss += loss.item()
        epoch_train_accuracy += calculate_accuracy(output, labels)

    train_loss = epoch_train_loss / len(train_loader)
    train_accuracy = epoch_train_accuracy / len(train_loader)
    train_losses.append(train_loss)
    train_accuracies.append(train_accuracy)
    print(f"Epoch {i + 1} Train Loss: {train_loss:.4f}, Train Accuracy: {train_accuracy:.4f}")

    # Test
    model.eval()
    epoch_test_loss = 0
    epoch_test_accuracy = 0
    total_correct = 0
    total_pixels = 0
    total_tp = 0
    total_fp = 0
    total_fn = 0
    with torch.no_grad():
        for data in tqdm(test_loader, desc="Testing"):
            imgs, labels = data
            imgs, labels = imgs.to(device), labels.to(device)

            output = model(imgs)
            pred = output.argmax(dim=1)  # 获取预测结果 [B, H, W]
            labels = labels.squeeze().long()# 确保标签形状匹配 [B, H, W]

            # print("Output size",output.shape)
            # print("labels size",labels.shape)

            #计算损失和准确率
            loss = loss_func(output, labels)
            epoch_test_loss += loss.item()
            epoch_test_accuracy += calculate_accuracy(output, labels)

            # 计算像素级指标
            correct = (pred == labels).sum().item()
            total_correct += correct
            total_pixels += labels.numel()

            # 计算TP/FP/FN（假设1为水体类别）
            tp = ((pred == 1) & (labels == 1)).sum().item()
            fp = ((pred == 1) & (labels == 0)).sum().item()
            fn = ((pred == 0) & (labels == 1)).sum().item()
            total_tp += tp
            total_fp += fp
            total_fn += fn

    #最终计算
    test_loss = epoch_test_loss / len(test_loader)#损失
    test_accuracy = epoch_test_accuracy / len(test_loader)#准确率
    oa = total_correct / total_pixels #总体准确率
    iou = total_tp / (total_tp + total_fp + total_fn + 1e-10)  # 交并比
    recall = total_tp / (total_tp + total_fn + 1e-10)  # 召回率
    precision = total_tp / (total_tp + total_fp + 1e-10)  # 精确率
    f1 = 2 * (precision * recall) / (precision + recall + 1e-10)  # F1-Score


    test_losses.append(test_loss)
    test_oas.append(oa)
    test_ious.append(iou)
    test_f1s.append(f1)
    test_accuracies.append(test_accuracy)
    print(f"Epoch {i + 1} Test Metrics:")
    print(f"Loss: {test_loss:.4f} | OA: {oa:.4f} | IoU: {iou:.4f}")
    print(f"Precision: {precision:.4f} | Recall: {recall:.4f} | F1: {f1:.4f}")
    # 早停判断
    if test_loss < best_val_loss:
        best_val_loss = test_loss
        best_model_wts = model.state_dict()
        no_improve_epochs = 0
        print(f"Epoch {i + 1} - New best model saved with test loss: {best_val_loss:.4f}")
    else:
        no_improve_epochs += 1
        print(f"Epoch {i + 1} - No improvement in test loss for {no_improve_epochs} epochs.")

    if no_improve_epochs >= patience:
        print("Early stopping due to no improvement in validation loss.")
        break

# 保存最佳模型
model.load_state_dict(best_model_wts)
torch.save(model.state_dict(), 'best_unet_model.pth')
print("Best model saved.")

# 绘制训练和测试曲线
plt.figure(figsize=(16, 12))

plt.subplot(2, 2, 1)
plt.plot(range(1, epoch + 1), train_losses, label='Train Loss')
plt.plot(range(1, epoch + 1), test_losses, label='Test Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training and Testing Loss Curve')
plt.legend()
plt.grid(True)

plt.subplot(2, 2, 2)
plt.plot(range(1, epoch + 1), train_accuracies, label='Train Accuracy')
plt.plot(range(1, epoch + 1), test_accuracies, label='Test Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training and Testing Accuracy Curve')
plt.legend()
plt.grid(True)

plt.subplot(2, 2, 3)
plt.plot(range(1, epoch + 1), test_oas, label='Test OA', marker='o')
plt.xlabel('Epoch')
plt.ylabel('Overall Accuracy')
plt.title('Test Overall Accuracy Curve')
plt.legend()
plt.grid(True)

plt.subplot(2, 2, 4)
plt.plot(range(1, epoch + 1), test_ious, label='Test IoU', marker='s')
plt.plot(range(1, epoch + 1), test_f1s, label='Test F1-Score', marker='^')
plt.xlabel('Epoch')
plt.ylabel('Score')
plt.title('Test IoU and F1-Score Curve')
plt.legend()
plt.grid(True)

plt.tight_layout()
plt.savefig('training_and_testing_curves.png')  # 保存图像文件

# 保存指标到CSV文件
metrics_dict = {
    'Epoch': list(range(1, epoch + 1)),
    'Train Loss': train_losses,
    'Test Loss': test_losses,
    'Train Accuracy': train_accuracies,
    'Test OA': test_oas,
    'Test IoU': test_ious,
    'Test F1-Score': test_f1s
}

metrics_df = pd.DataFrame(metrics_dict)
metrics_df.to_csv('training_and_testing_metrics.csv', index=False)

print("Metrics have been saved to 'training_and_testing_metrics.csv'")