import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

# 导入必要的模块
from metrics import CombinedLoss


class ConvBlock(nn.Module):
    """基础卷积块"""

    def __init__(self, in_channels, out_channels):
        super(ConvBlock, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):
        return self.conv(x)


class NestedUNet(nn.Module):
    """
    Nested U-Net (UNet++) 架构
    通过密集跳跃连接和嵌套解码器结构改进传统U-Net
    """

    def __init__(self, in_channels=1, out_channels=1, deep_supervision=False):
        super(NestedUNet, self).__init__()

        self.deep_supervision = deep_supervision

        # 定义通道数
        filters = [64, 128, 256, 512, 1024]

        # 编码器路径
        self.conv0_0 = ConvBlock(in_channels, filters[0])
        self.conv1_0 = ConvBlock(filters[0], filters[1])
        self.conv2_0 = ConvBlock(filters[1], filters[2])
        self.conv3_0 = ConvBlock(filters[2], filters[3])
        self.conv4_0 = ConvBlock(filters[3], filters[4])

        # 嵌套解码器路径
        self.conv0_1 = ConvBlock(filters[0] + filters[1], filters[0])
        self.conv1_1 = ConvBlock(filters[1] + filters[2], filters[1])
        self.conv2_1 = ConvBlock(filters[2] + filters[3], filters[2])
        self.conv3_1 = ConvBlock(filters[3] + filters[4], filters[3])

        self.conv0_2 = ConvBlock(filters[0] * 2 + filters[1], filters[0])
        self.conv1_2 = ConvBlock(filters[1] * 2 + filters[2], filters[1])
        self.conv2_2 = ConvBlock(filters[2] * 2 + filters[3], filters[2])

        self.conv0_3 = ConvBlock(filters[0] * 3 + filters[1], filters[0])
        self.conv1_3 = ConvBlock(filters[1] * 3 + filters[2], filters[1])

        self.conv0_4 = ConvBlock(filters[0] * 4 + filters[1], filters[0])

        # 最大池化
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)

        # 上采样
        self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)

        # 最终输出层
        if self.deep_supervision:
            self.final1 = nn.Conv2d(filters[0], out_channels, kernel_size=1)
            self.final2 = nn.Conv2d(filters[0], out_channels, kernel_size=1)
            self.final3 = nn.Conv2d(filters[0], out_channels, kernel_size=1)
            self.final4 = nn.Conv2d(filters[0], out_channels, kernel_size=1)
        else:
            self.final = nn.Conv2d(filters[0], out_channels, kernel_size=1)

    def forward(self, x):
        # 编码器路径
        x0_0 = self.conv0_0(x)
        x1_0 = self.conv1_0(self.pool(x0_0))
        x2_0 = self.conv2_0(self.pool(x1_0))
        x3_0 = self.conv3_0(self.pool(x2_0))
        x4_0 = self.conv4_0(self.pool(x3_0))

        # 第一层嵌套解码器
        x0_1 = self.conv0_1(torch.cat([x0_0, self.up(x1_0)], 1))
        x1_1 = self.conv1_1(torch.cat([x1_0, self.up(x2_0)], 1))
        x2_1 = self.conv2_1(torch.cat([x2_0, self.up(x3_0)], 1))
        x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))

        # 第二层嵌套解码器
        x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.up(x1_1)], 1))
        x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up(x2_1)], 1))
        x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up(x3_1)], 1))

        # 第三层嵌套解码器
        x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up(x1_2)], 1))
        x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up(x2_2)], 1))

        # 第四层嵌套解码器
        x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.up(x1_3)], 1))

        # 输出
        if self.deep_supervision:
            output1 = self.final1(x0_1)
            output2 = self.final2(x0_2)
            output3 = self.final3(x0_3)
            output4 = self.final4(x0_4)
            return [output1, output2, output3, output4]
        else:
            output = self.final(x0_4)
            return output


class NestedUNetTrainer:
    """NestedUNet训练器"""

    def __init__(self, model, device, learning_rate=1e-4):
        self.model = model.to(device)
        self.device = device
        self.optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, mode="min", patience=5, factor=0.5
        )
        self.criterion = CombinedLoss()

    def train_epoch(self, train_loader):
        """训练一个epoch"""
        self.model.train()
        total_loss = 0

        for batch_idx, batch in enumerate(train_loader):
            images = batch["image"].to(self.device)
            masks = batch["mask"].to(self.device)

            # 前向传播
            self.optimizer.zero_grad()
            outputs = self.model(images)

            # 计算损失
            if self.model.deep_supervision:
                loss = 0
                for output in outputs:
                    loss += self.criterion(output, masks)
                loss /= len(outputs)
            else:
                loss = self.criterion(outputs, masks)

            # 反向传播
            loss.backward()
            self.optimizer.step()

            total_loss += loss.item()

            if batch_idx % 10 == 0:
                print(
                    f"Batch [{batch_idx}/{len(train_loader)}] Loss: {loss.item():.4f}"
                )

        avg_loss = total_loss / len(train_loader)
        return avg_loss

    def validate(self, val_loader, metrics_calculator):
        """验证模型"""
        self.model.eval()
        total_loss = 0
        all_metrics = []

        with torch.no_grad():
            for batch in val_loader:
                images = batch["image"].to(self.device)
                masks = batch["mask"].to(self.device)

                # 前向传播
                outputs = self.model(images)

                # 处理深度监督
                if self.model.deep_supervision:
                    outputs = outputs[-1]  # 使用最后一个输出

                # 计算损失
                loss = self.criterion(outputs, masks)
                total_loss += loss.item()

                # 计算指标
                pred = torch.sigmoid(outputs)
                _, batch_metrics = metrics_calculator.compute_metrics_batch(pred, masks)
                all_metrics.append(batch_metrics)

        # 计算平均指标
        avg_loss = total_loss / len(val_loader)
        avg_metrics = {}
        for key in all_metrics[0].keys():
            avg_metrics[key] = np.mean([m[key] for m in all_metrics])

        return avg_loss, avg_metrics

    def save_checkpoint(self, epoch, loss, metrics, filepath):
        """保存模型检查点"""
        checkpoint = {
            "epoch": epoch,
            "model_state_dict": self.model.state_dict(),
            "optimizer_state_dict": self.optimizer.state_dict(),
            "loss": loss,
            "metrics": metrics,
        }
        torch.save(checkpoint, filepath)

    def load_checkpoint(self, filepath):
        """加载模型检查点"""
        checkpoint = torch.load(filepath, map_location=self.device)
        self.model.load_state_dict(checkpoint["model_state_dict"])
        self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        return checkpoint["epoch"], checkpoint["loss"], checkpoint["metrics"]