import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from torchvision.utils import make_grid
from tqdm import tqdm
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt


# 定义UNet模型
class UNet(nn.Module):
    def __init__(self, in_channels=3, out_channels=1, init_features=32):
        super(UNet, self).__init__()
        features = init_features

        self.encoder1 = self._block(in_channels, features)
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.encoder2 = self._block(features, features * 2)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.encoder3 = self._block(features * 2, features * 4)
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.encoder4 = self._block(features * 4, features * 8)
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.bottleneck = self._block(features * 8, features * 16)

        self.upconv4 = nn.ConvTranspose2d(features * 16, features * 8, kernel_size=2, stride=2)
        self.decoder4 = self._block(features * 16, features * 8)
        self.upconv3 = nn.ConvTranspose2d(features * 8, features * 4, kernel_size=2, stride=2)
        self.decoder3 = self._block(features * 8, features * 4)
        self.upconv2 = nn.ConvTranspose2d(features * 4, features * 2, kernel_size=2, stride=2)
        self.decoder2 = self._block(features * 4, features * 2)
        self.upconv1 = nn.ConvTranspose2d(features * 2, features, kernel_size=2, stride=2)
        self.decoder1 = self._block(features * 2, features)

        self.conv = nn.Conv2d(in_channels=features, out_channels=out_channels, kernel_size=1)

    def _block(self, in_channels, out_channels):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):
        enc1 = self.encoder1(x)
        enc2 = self.encoder2(self.pool1(enc1))
        enc3 = self.encoder3(self.pool2(enc2))
        enc4 = self.encoder4(self.pool3(enc3))

        bottleneck = self.bottleneck(self.pool4(enc4))

        dec4 = self.upconv4(bottleneck)
        dec4 = torch.cat((dec4, enc4), dim=1)
        dec4 = self.decoder4(dec4)
        dec3 = self.upconv3(dec4)
        dec3 = torch.cat((dec3, enc3), dim=1)
        dec3 = self.decoder3(dec3)
        dec2 = self.upconv2(dec3)
        dec2 = torch.cat((dec2, enc2), dim=1)
        dec2 = self.decoder2(dec2)
        dec1 = self.upconv1(dec2)
        dec1 = torch.cat((dec1, enc1), dim=1)
        dec1 = self.decoder1(dec1)

        return self.conv(dec1)


# 自定义数据集类
class SegmentationDataset(Dataset):
    def __init__(self, image_dir, mask_dir, transform=None):
        self.image_dir = image_dir
        self.mask_dir = mask_dir
        self.transform = transform
        self.images = os.listdir(image_dir)

    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        img_path = os.path.join(self.image_dir, self.images[idx])
        mask_path = os.path.join(self.mask_dir, self.images[idx].split('.')[0] + '.png')

        image = Image.open(img_path).convert("RGB")
        mask = Image.open(mask_path).convert("L")  # 灰度模式

        if self.transform:
            image = self.transform(image)
            mask = self.transform(mask)

        return image, mask


# 计算IoU (Intersection over Union)
def iou_score(output, target):
    smooth = 1e-5

    output = torch.sigmoid(output).data.cpu().numpy()
    target = target.data.cpu().numpy()

    output = (output > 0.5).astype(np.uint8)
    target = (target > 0.5).astype(np.uint8)

    intersection = (output & target).sum()
    union = (output | target).sum()

    return (intersection + smooth) / (union + smooth)


# 计算准确率
def pixel_accuracy(output, target):
    output = torch.sigmoid(output).data.cpu().numpy()
    target = target.data.cpu().numpy()

    output = (output > 0.5).astype(np.uint8)
    target = (target > 0.5).astype(np.uint8)

    correct = (output == target).sum()
    total = target.size

    return correct / total


# 主训练函数
def train_unet(dataset_root, epochs=10, batch_size=4, lr=0.001):
    # 数据预处理和增强
    transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.ToTensor(),
    ])

    # 加载数据集
    train_dataset = SegmentationDataset(
        image_dir=os.path.join(dataset_root, 'train2017'),
        mask_dir=os.path.join(dataset_root, 'train_masks_png'),
        transform=transform
    )

    val_dataset = SegmentationDataset(
        image_dir=os.path.join(dataset_root, 'val2017'),
        mask_dir=os.path.join(dataset_root, 'val_masks_png'),
        transform=transform
    )

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

    # 初始化模型、损失函数和优化器
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = UNet(in_channels=3, out_channels=1).to(device)
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=lr)

    # 训练和验证
    train_losses = []
    val_losses = []
    train_iou = []
    val_iou = []
    train_acc = []
    val_acc = []

    for epoch in range(epochs):
        # 训练阶段
        model.train()
        train_loss = 0.0
        train_iou_epoch = 0.0
        train_acc_epoch = 0.0

        loop = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{epochs} Train', leave=False)
        for images, masks in loop:
            images = images.to(device)
            masks = masks.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, masks)
            loss.backward()
            optimizer.step()

            train_loss += loss.item() * images.size(0)

            # 计算IoU和准确率
            iou = iou_score(outputs, masks)
            acc = pixel_accuracy(outputs, masks)

            train_iou_epoch += iou * images.size(0)
            train_acc_epoch += acc * images.size(0)

            loop.set_postfix(loss=loss.item(), iou=iou, acc=acc)

        # 计算平均值
        train_loss = train_loss / len(train_loader.dataset)
        train_iou_epoch = train_iou_epoch / len(train_loader.dataset)
        train_acc_epoch = train_acc_epoch / len(train_loader.dataset)

        train_losses.append(train_loss)
        train_iou.append(train_iou_epoch)
        train_acc.append(train_acc_epoch)

        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_iou_epoch = 0.0
        val_acc_epoch = 0.0

        loop = tqdm(val_loader, desc=f'Epoch {epoch + 1}/{epochs} Val', leave=False)
        with torch.no_grad():
            for images, masks in loop:
                images = images.to(device)
                masks = masks.to(device)

                outputs = model(images)
                loss = criterion(outputs, masks)

                val_loss += loss.item() * images.size(0)

                # 计算IoU和准确率
                iou = iou_score(outputs, masks)
                acc = pixel_accuracy(outputs, masks)

                val_iou_epoch += iou * images.size(0)
                val_acc_epoch += acc * images.size(0)

                loop.set_postfix(loss=loss.item(), iou=iou, acc=acc)

        # 计算平均值
        val_loss = val_loss / len(val_loader.dataset)
        val_iou_epoch = val_iou_epoch / len(val_loader.dataset)
        val_acc_epoch = val_acc_epoch / len(val_loader.dataset)

        val_losses.append(val_loss)
        val_iou.append(val_iou_epoch)
        val_acc.append(val_acc_epoch)

        # 打印每个epoch的结果
        print(f'Epoch {epoch + 1}/{epochs}')
        print(f'Train Loss: {train_loss:.4f}, Train IoU: {train_iou_epoch:.4f}, Train Acc: {train_acc_epoch:.4f}')
        print(f'Val Loss: {val_loss:.4f}, Val IoU: {val_iou_epoch:.4f}, Val Acc: {val_acc_epoch:.4f}')
        print('--------------------------------')

    # 保存模型
    torch.save(model.state_dict(), 'unet_model.pth')

    # 绘制训练曲线
    plt.figure(figsize=(12, 4))

    plt.subplot(1, 3, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(val_losses, label='Val Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    plt.subplot(1, 3, 2)
    plt.plot(train_iou, label='Train IoU')
    plt.plot(val_iou, label='Val IoU')
    plt.xlabel('Epoch')
    plt.ylabel('IoU')
    plt.legend()

    plt.subplot(1, 3, 3)
    plt.plot(train_acc, label='Train Acc')
    plt.plot(val_acc, label='Val Acc')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.tight_layout()
    plt.show()


# 调用训练函数
if __name__ == "__main__":
    dataset_root = "./pngmask_COCO"  # 替换为您的数据集根目录
    train_unet(dataset_root, epochs=10, batch_size=4, lr=0.001)