import torch
import torch.nn as nn
import torchvision
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from pycocotools.coco import COCO
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
from tqdm import tqdm
import random


# --------------------
# 1. 配置参数
# --------------------
class Config:
    # 训练集路径
    TRAIN_DATA_DIR = "./mini_coco2017/train2017"
    TRAIN_ANN_FILE = "./mini_coco2017/annotations/instances_train2017.json"

    # 验证集路径
    VAL_DATA_DIR = "./mini_coco2017/val2017"
    VAL_ANN_FILE = "./mini_coco2017/annotations/instances_val2017.json"

    BATCH_SIZE = 4
    IMG_SIZE = 256
    NUM_CLASSES = 91  # COCO类别数
    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    EPOCHS = 10
    LR = 0.001
    SAVE_DIR = "./saved_models"
    SEED = 42


# --------------------
# 2. 数据加载器
# --------------------
class COCOSegmentationDataset(Dataset):
    def __init__(self, coco, img_dir, transform=None):
        self.coco = coco
        self.img_dir = img_dir
        self.transform = transform
        self.ids = list(sorted(self.coco.imgs.keys()))

    def __len__(self):
        return len(self.ids)

    def __getitem__(self, idx):
        img_id = self.ids[idx]
        img_info = self.coco.loadImgs(img_id)[0]
        img_path = os.path.join(self.img_dir, img_info["file_name"])

        # 加载图像
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        # 生成掩码
        ann_ids = self.coco.getAnnIds(imgIds=img_id)
        anns = self.coco.loadAnns(ann_ids)
        mask = np.zeros((img_info["height"], img_info["width"]), dtype=np.uint8)

        for ann in anns:
            if ann["iscrowd"]:
                rle = self.coco.annToRLE(ann)
                m = self.coco.annToMask(ann)
            else:
                m = self.coco.annToMask(ann)
            mask[m > 0] = ann["category_id"]

        # 调整尺寸
        img = cv2.resize(img, (Config.IMG_SIZE, Config.IMG_SIZE))
        mask = cv2.resize(mask, (Config.IMG_SIZE, Config.IMG_SIZE),
                          interpolation=cv2.INTER_NEAREST)

        # 转换为Tensor
        if self.transform:
            img = self.transform(img)

        mask = torch.from_numpy(mask).long()

        return img, mask


# 数据预处理
def get_transform(train=True):
    if train:
        return transforms.Compose([
            transforms.ToTensor(),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    else:
        return transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])


# 3. U-Net模型定义
# --------------------
class DoubleConv(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.conv(x)


class UNet(nn.Module):
    def __init__(self):
        super().__init__()

        # 编码器（下采样）
        self.down1 = DoubleConv(3, 64)
        self.down2 = DoubleConv(64, 128)
        self.down3 = DoubleConv(128, 256)
        self.pool = nn.MaxPool2d(2)

        # 中间层
        self.bottleneck = DoubleConv(256, 512)

        # 解码器（上采样）
        self.up3 = nn.ConvTranspose2d(512, 256, 2, stride=2)
        self.conv3 = DoubleConv(512, 256)
        self.up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
        self.conv2 = DoubleConv(256, 128)
        self.up1 = nn.ConvTranspose2d(128, 64, 2, stride=2)
        self.conv1 = DoubleConv(128, 64)

        # 输出层
        self.final = nn.Conv2d(64, Config.NUM_CLASSES, 1)

    def forward(self, x):
        # 编码
        x1 = self.down1(x)  # [B, 64, H, W]
        x2 = self.pool(x1)
        x2 = self.down2(x2)  # [B, 128, H/2, W/2]
        x3 = self.pool(x2)
        x3 = self.down3(x3)  # [B, 256, H/4, W/4]

        # 中间
        x = self.pool(x3)
        x = self.bottleneck(x)  # [B, 512, H/8, W/8]

        # 解码
        x = self.up3(x)  # [B, 256, H/4, W/4]
        x = torch.cat([x, x3], dim=1)  # 跳跃连接
        x = self.conv3(x)

        x = self.up2(x)  # [B, 128, H/2, W/2]
        x = torch.cat([x, x2], dim=1)
        x = self.conv2(x)

        x = self.up1(x)  # [B, 64, H, W]
        x = torch.cat([x, x1], dim=1)
        x = self.conv1(x)

        # 输出
        return self.final(x)  # [B, NUM_CLASSES, H, W]



if __name__ == '__main__':
    # 设置随机种子
    torch.manual_seed(Config.SEED)
    np.random.seed(Config.SEED)
    random.seed(Config.SEED)

    # 初始化数据集
    train_coco = COCO(Config.TRAIN_ANN_FILE)
    train_dataset = COCOSegmentationDataset(train_coco, Config.TRAIN_DATA_DIR,
                                            transform=get_transform(train=True))

    val_coco = COCO(Config.VAL_ANN_FILE)
    val_dataset = COCOSegmentationDataset(val_coco, Config.VAL_DATA_DIR,
                                          transform=get_transform(train=False))

    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=Config.BATCH_SIZE,
                              shuffle=True, num_workers=2)
    val_loader = DataLoader(val_dataset, batch_size=Config.BATCH_SIZE,
                            shuffle=False, num_workers=2)

    # --------------------
    # 4. 训练设置
    # --------------------
    model = UNet().to(Config.DEVICE)
    optimizer = torch.optim.Adam(model.parameters(), lr=Config.LR)
    criterion = nn.CrossEntropyLoss()

    # 创建保存目录
    os.makedirs(Config.SAVE_DIR, exist_ok=True)

    # --------------------
    # 5. 训练与验证循环
    # --------------------
    best_val_loss = float('inf')

    for epoch in range(Config.EPOCHS):
        # 训练阶段
        model.train()
        train_loss = 0.0

        with tqdm(train_loader, desc=f"Epoch {epoch + 1}/{Config.EPOCHS} [Train]",
                  unit="batch") as pbar:
            for images, masks in pbar:
                images = images.to(Config.DEVICE)
                masks = masks.to(Config.DEVICE)

                # 前向传播
                outputs = model(images)
                loss = criterion(outputs, masks)

                # 反向传播
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                train_loss += loss.item()
                pbar.set_postfix({'loss': f"{loss.item():.4f}"})

        # 验证阶段
        model.eval()
        val_loss = 0.0

        with torch.no_grad():
            with tqdm(val_loader, desc=f"Epoch {epoch + 1}/{Config.EPOCHS} [Val]",
                      unit="batch") as pbar:
                for images, masks in pbar:
                    images = images.to(Config.DEVICE)
                    masks = masks.to(Config.DEVICE)

                    outputs = model(images)
                    loss = criterion(outputs, masks)

                    val_loss += loss.item()
                    pbar.set_postfix({'val_loss': f"{loss.item():.4f}"})

        # 计算平均损失
        avg_train_loss = train_loss / len(train_loader)
        avg_val_loss = val_loss / len(val_loader)
        print(f"\nEpoch {epoch + 1} Summary:")
        print(f"Train Loss: {avg_train_loss:.4f} | Val Loss: {avg_val_loss:.4f}")

        # 保存最佳模型
        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            torch.save(model.state_dict(),
                       os.path.join(Config.SAVE_DIR, "best_model.pth"))
            print(f"Saved new best model with val loss: {best_val_loss:.4f}")

        # --------------------
        # 6. 验证集可视化
        # --------------------
        if (epoch + 1) % 5 == 0:  # 每5个epoch可视化一次
            model.eval()
            fig, axes = plt.subplots(5, 3, figsize=(15, 25))

            # 随机选择5个样本
            random_indices = random.sample(range(len(val_dataset)), 5)

            for i, idx in enumerate(random_indices):
                image, mask = val_dataset[idx]

                with torch.no_grad():
                    input_tensor = image.unsqueeze(0).to(Config.DEVICE)
                    output = model(input_tensor)
                    pred_mask = torch.argmax(output, dim=1).squeeze().cpu().numpy()

                # 反标准化图像
                image = image.cpu().numpy().transpose(1, 2, 0)
                image = image * np.array([0.229, 0.224, 0.225]) + np.array([0.485, 0.456, 0.406])
                image = np.clip(image, 0, 1)

                # 绘制结果
                axes[i, 0].imshow(image)
                axes[i, 0].set_title("Input Image")
                axes[i, 0].axis('off')

                axes[i, 1].imshow(mask.numpy())
                axes[i, 1].set_title("Ground Truth")
                axes[i, 1].axis('off')

                axes[i, 2].imshow(pred_mask)
                axes[i, 2].set_title("Prediction")
                axes[i, 2].axis('off')

            plt.tight_layout()
            plt.show()