import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

# 导入必要的模块
from metrics import CombinedLoss


class ResidualBlock(nn.Module):
    """残差块"""

    def __init__(self, in_channels, out_channels, stride=1):
        super(ResidualBlock, self).__init__()

        self.conv1 = nn.Conv2d(
            in_channels, out_channels, kernel_size=3, stride=stride, padding=1
        )
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)

        # 如果输入输出通道数不同，需要调整shortcut
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride),
                nn.BatchNorm2d(out_channels),
            )

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        out += self.shortcut(residual)
        out = self.relu(out)

        return out


class ResUNet(nn.Module):
    """
    ResUNet架构：结合残差连接的U-Net
    用于医学图像分割
    """

    def __init__(self, in_channels=1, out_channels=1, base_filters=64):
        super(ResUNet, self).__init__()

        filters = [
            base_filters,
            base_filters * 2,
            base_filters * 4,
            base_filters * 8,
            base_filters * 16,
        ]

        # 编码器路径 (下采样)
        # 输入块
        self.input_block = nn.Sequential(
            nn.Conv2d(in_channels, filters[0], kernel_size=3, padding=1),
            nn.BatchNorm2d(filters[0]),
            nn.ReLU(inplace=True),
            nn.Conv2d(filters[0], filters[0], kernel_size=3, padding=1),
            nn.BatchNorm2d(filters[0]),
            nn.ReLU(inplace=True),
        )

        # 编码器残差块
        self.encoder1 = ResidualBlock(filters[0], filters[1], stride=2)
        self.encoder2 = ResidualBlock(filters[1], filters[2], stride=2)
        self.encoder3 = ResidualBlock(filters[2], filters[3], stride=2)
        self.encoder4 = ResidualBlock(filters[3], filters[4], stride=2)

        # 桥接层
        self.bridge = ResidualBlock(filters[4], filters[4])

        # 解码器路径 (上采样)
        self.up4 = nn.ConvTranspose2d(filters[4], filters[3], kernel_size=2, stride=2)
        self.decoder4 = ResidualBlock(filters[3] + filters[3], filters[3])

        self.up3 = nn.ConvTranspose2d(filters[3], filters[2], kernel_size=2, stride=2)
        self.decoder3 = ResidualBlock(filters[2] + filters[2], filters[2])

        self.up2 = nn.ConvTranspose2d(filters[2], filters[1], kernel_size=2, stride=2)
        self.decoder2 = ResidualBlock(filters[1] + filters[1], filters[1])

        self.up1 = nn.ConvTranspose2d(filters[1], filters[0], kernel_size=2, stride=2)
        self.decoder1 = ResidualBlock(filters[0] + filters[0], filters[0])

        # 输出层
        self.output = nn.Conv2d(filters[0], out_channels, kernel_size=1)

    def forward(self, x):
        # 编码器路径
        e0 = self.input_block(x)
        e1 = self.encoder1(e0)
        e2 = self.encoder2(e1)
        e3 = self.encoder3(e2)
        e4 = self.encoder4(e3)

        # 桥接层
        bridge = self.bridge(e4)

        # 解码器路径
        d4 = self.up4(bridge)
        d4 = torch.cat([d4, e3], dim=1)
        d4 = self.decoder4(d4)

        d3 = self.up3(d4)
        d3 = torch.cat([d3, e2], dim=1)
        d3 = self.decoder3(d3)

        d2 = self.up2(d3)
        d2 = torch.cat([d2, e1], dim=1)
        d2 = self.decoder2(d2)

        d1 = self.up1(d2)
        d1 = torch.cat([d1, e0], dim=1)
        d1 = self.decoder1(d1)

        # 输出
        output = self.output(d1)

        return output


class AttentionBlock(nn.Module):
    """注意力机制块"""

    def __init__(self, F_g, F_l, F_int):
        super(AttentionBlock, self).__init__()

        self.W_g = nn.Sequential(
            nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(F_int),
        )

        self.W_x = nn.Sequential(
            nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(F_int),
        )

        self.psi = nn.Sequential(
            nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(1),
            nn.Sigmoid(),
        )

        self.relu = nn.ReLU(inplace=True)

    def forward(self, g, x):
        g1 = self.W_g(g)
        x1 = self.W_x(x)
        psi = self.relu(g1 + x1)
        psi = self.psi(psi)

        return x * psi


class AttentionResUNet(ResUNet):
    """
    带注意力机制的ResUNet
    在跳跃连接中加入注意力门控
    """

    def __init__(self, in_channels=1, out_channels=1, base_filters=64):
        super(AttentionResUNet, self).__init__(in_channels, out_channels, base_filters)

        filters = [
            base_filters,
            base_filters * 2,
            base_filters * 4,
            base_filters * 8,
            base_filters * 16,
        ]

        # 注意力块
        self.att4 = AttentionBlock(F_g=filters[3], F_l=filters[3], F_int=filters[2])
        self.att3 = AttentionBlock(F_g=filters[2], F_l=filters[2], F_int=filters[1])
        self.att2 = AttentionBlock(F_g=filters[1], F_l=filters[1], F_int=filters[0])
        self.att1 = AttentionBlock(
            F_g=filters[0], F_l=filters[0], F_int=filters[0] // 2
        )

    def forward(self, x):
        # 编码器路径
        e0 = self.input_block(x)
        e1 = self.encoder1(e0)
        e2 = self.encoder2(e1)
        e3 = self.encoder3(e2)
        e4 = self.encoder4(e3)

        # 桥接层
        bridge = self.bridge(e4)

        # 解码器路径（带注意力）
        d4 = self.up4(bridge)
        e3_att = self.att4(g=d4, x=e3)
        d4 = torch.cat([d4, e3_att], dim=1)
        d4 = self.decoder4(d4)

        d3 = self.up3(d4)
        e2_att = self.att3(g=d3, x=e2)
        d3 = torch.cat([d3, e2_att], dim=1)
        d3 = self.decoder3(d3)

        d2 = self.up2(d3)
        e1_att = self.att2(g=d2, x=e1)
        d2 = torch.cat([d2, e1_att], dim=1)
        d2 = self.decoder2(d2)

        d1 = self.up1(d2)
        e0_att = self.att1(g=d1, x=e0)
        d1 = torch.cat([d1, e0_att], dim=1)
        d1 = self.decoder1(d1)

        # 输出
        output = self.output(d1)

        return output


class ResUNetTrainer:
    """ResUNet训练器"""

    def __init__(self, model, device, learning_rate=1e-4):
        self.model = model.to(device)
        self.device = device
        self.optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            self.optimizer, T_max=50, eta_min=1e-6
        )
        self.criterion = CombinedLoss()

    def train_epoch(self, train_loader):
        """训练一个epoch"""
        self.model.train()
        total_loss = 0

        for batch_idx, batch in enumerate(train_loader):
            images = batch["image"].to(self.device)
            masks = batch["mask"].to(self.device)

            # 前向传播
            self.optimizer.zero_grad()
            outputs = self.model(images)

            # 计算损失
            loss = self.criterion(outputs, masks)

            # 反向传播
            loss.backward()
            self.optimizer.step()

            total_loss += loss.item()

            if batch_idx % 10 == 0:
                print(
                    f"Batch [{batch_idx}/{len(train_loader)}] Loss: {loss.item():.4f}"
                )

        avg_loss = total_loss / len(train_loader)
        self.scheduler.step()

        return avg_loss

    def validate(self, val_loader, metrics_calculator):
        """验证模型"""
        self.model.eval()
        total_loss = 0
        all_metrics = []

        with torch.no_grad():
            for batch in val_loader:
                images = batch["image"].to(self.device)
                masks = batch["mask"].to(self.device)

                # 前向传播
                outputs = self.model(images)

                # 计算损失
                loss = self.criterion(outputs, masks)
                total_loss += loss.item()

                # 计算指标
                pred = torch.sigmoid(outputs)
                _, batch_metrics = metrics_calculator.compute_metrics_batch(pred, masks)
                all_metrics.append(batch_metrics)

        # 计算平均指标
        avg_loss = total_loss / len(val_loader)
        avg_metrics = {}
        for key in all_metrics[0].keys():
            avg_metrics[key] = np.mean([m[key] for m in all_metrics])

        return avg_loss, avg_metrics

    def save_checkpoint(self, epoch, loss, metrics, filepath):
        """保存模型检查点"""
        checkpoint = {
            "epoch": epoch,
            "model_state_dict": self.model.state_dict(),
            "optimizer_state_dict": self.optimizer.state_dict(),
            "scheduler_state_dict": self.scheduler.state_dict(),
            "loss": loss,
            "metrics": metrics,
        }
        torch.save(checkpoint, filepath)

    def load_checkpoint(self, filepath):
        """加载模型检查点"""
        checkpoint = torch.load(filepath, map_location=self.device)
        self.model.load_state_dict(checkpoint["model_state_dict"])
        self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
        return checkpoint["epoch"], checkpoint["loss"], checkpoint["metrics"]