import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_model import BaseModel
from torchmetrics.functional import structural_similarity_index_measure as ssim


# ------------------------------
# ViT Block
# ------------------------------
class ViTBlock(nn.Module):
    def __init__(self, dim, num_heads=4, mlp_ratio=4.0):
        super().__init__()
        self.norm1 = nn.LayerNorm(dim)
        self.attn = nn.MultiheadAttention(dim, num_heads, batch_first=True)
        self.norm2 = nn.LayerNorm(dim)
        self.mlp = nn.Sequential(
            nn.Linear(dim, int(dim * mlp_ratio)),
            nn.GELU(),
            nn.Linear(int(dim * mlp_ratio), dim),
        )

    def forward(self, x):
        x_res = x
        x = self.norm1(x)
        attn_out, _ = self.attn(x, x, x)
        x = x_res + attn_out
        x = x + self.mlp(self.norm2(x))
        return x


# ------------------------------
# Basic U-Net blocks
# ------------------------------
class DoubleConv(nn.Module):
    def __init__(self, in_ch, out_ch):
        super().__init__()
        self.net = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, 3, padding=1),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_ch, out_ch, 3, padding=1),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):
        return self.net(x)


class Down(nn.Module):
    def __init__(self, in_ch, out_ch):
        super().__init__()
        self.net = nn.Sequential(nn.MaxPool2d(2), DoubleConv(in_ch, out_ch))

    def forward(self, x):
        return self.net(x)


class Up(nn.Module):
    def __init__(self, in_ch, out_ch):
        super().__init__()
        self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
        self.conv = DoubleConv(in_ch, out_ch)

    def forward(self, x1, x2):
        x1 = self.up(x1)
        dy = x2.size(2) - x1.size(2)
        dx = x2.size(3) - x1.size(3)
        if dy != 0 or dx != 0:
            x1 = F.pad(x1, [dx // 2, dx - dx // 2, dy // 2, dy - dy // 2])
        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)


# ------------------------------
# UNet + ViT Generator
# ------------------------------
class DiffusionUNet(nn.Module):
    def __init__(self, in_channels=3, out_channels=3, base_ch=64, use_vit=True):
        super().__init__()
        self.use_vit = use_vit
        self.inc = DoubleConv(in_channels, base_ch)
        self.down1 = Down(base_ch, base_ch * 2)
        self.down2 = Down(base_ch * 2, base_ch * 4)
        self.down3 = Down(base_ch * 4, base_ch * 8)

        if use_vit:
            self.vit = ViTBlock(dim=base_ch * 8, num_heads=8)
        else:
            self.vit = nn.Identity()

        self.up1 = nn.ConvTranspose2d(base_ch * 8, base_ch * 4, 2, stride=2)
        self.conv1 = DoubleConv(base_ch * 8, base_ch * 4)
        self.up2 = nn.ConvTranspose2d(base_ch * 4, base_ch * 2, 2, stride=2)
        self.conv2 = DoubleConv(base_ch * 4, base_ch * 2)
        self.up3 = nn.ConvTranspose2d(base_ch * 2, base_ch, 2, stride=2)
        self.conv3 = DoubleConv(base_ch * 2, base_ch)

        self.outc = nn.Conv2d(base_ch, out_channels, kernel_size=1)

    def forward(self, x):
        x1 = self.inc(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(x3)

        if self.use_vit:
            B, C, H, W = x4.shape
            v = x4.flatten(2).transpose(1, 2)
            v = self.vit(v)
            x4 = v.transpose(1, 2).view(B, C, H, W)

        u = self.up1(x4)
        u = torch.cat([u, x3], dim=1)
        u = self.conv1(u)
        u = self.up2(u)
        u = torch.cat([u, x2], dim=1)
        u = self.conv2(u)
        u = self.up3(u)
        u = torch.cat([u, x1], dim=1)
        u = self.conv3(u)

        out = self.outc(u)
        return torch.tanh(out)


# ------------------------------
# BaseModel subclass
# ------------------------------
class DiffusionwithvitModel(BaseModel):
    @staticmethod
    def modify_commandline_options(parser, is_train=True):
        parser.set_defaults(norm='batch', dataset_mode='aligned')
        if is_train:
            parser.add_argument('--lambda_L1', type=float, default=100.0, help='L1 loss weight')
            parser.add_argument('--lambda_SSIM', type=float, default=1.0, help='SSIM loss weight')
        return parser

    def __init__(self, opt):
        super().__init__(opt)
        self.loss_names = ['G_L1', 'G_SSIM', 'G_total']
        self.visual_names = ['real_A', 'fake_B', 'real_B']
        self.model_names = ['G']

        self.netG = DiffusionUNet(in_channels=opt.input_nc,
                                  out_channels=opt.output_nc,
                                  base_ch=getattr(opt, 'ngf', 64),
                                  use_vit=True).to(self.device)

        if hasattr(self, 'gpu_ids') and len(self.gpu_ids) > 1:
            self.netG = torch.nn.DataParallel(self.netG, self.gpu_ids)

        if self.isTrain:
            self.criterionL1 = nn.L1Loss().to(self.device)
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.lambda_L1 = getattr(opt, 'lambda_L1', 100.0)
            self.lambda_SSIM = getattr(opt, 'lambda_SSIM', 1.0)

    def set_input(self, input):
        AtoB = self.opt.direction == 'AtoB'
        self.real_A = input['A' if AtoB else 'B'].to(self.device)
        self.real_B = input['B' if AtoB else 'A'].to(self.device)
        self.image_paths = input.get('A_paths' if AtoB else 'B_paths', None)

    def forward(self):
        if isinstance(self.netG, torch.nn.DataParallel):
            self.netG.module.to(self.device)
        else:
            self.netG.to(self.device)
        self.fake_B = self.netG(self.real_A)

    def backward_G(self):
        loss_l1 = self.criterionL1(self.fake_B, self.real_B) * self.lambda_L1
        fake_01 = (self.fake_B + 1.0) * 0.5
        real_01 = (self.real_B + 1.0) * 0.5
        ssim_val = ssim(fake_01, real_01, data_range=1.0)
        loss_ssim = (1.0 - ssim_val) * self.lambda_SSIM
        loss_G = loss_l1 + loss_ssim
        loss_G.backward()
        self.loss_G_L1 = loss_l1.item()
        self.loss_G_SSIM = ssim_val.item()
        self.loss_G_total = loss_G.item()

    def optimize_parameters(self):
        self.forward()
        self.optimizer_G.zero_grad()
        self.backward_G()
        self.optimizer_G.step()
