import torch
import torch.nn as nn
from torch.autograd import Function
import torch.nn.functional as F
import numpy as np
from torch.nn.utils import spectral_norm

class LambdaSheduler(nn.Module):
    def __init__(self, gamma=1.0, max_iter=1000, **kwargs):
        super(LambdaSheduler, self).__init__()
        self.gamma = gamma
        self.max_iter = max_iter
        self.curr_iter = 0

    def lamb(self):
        p = self.curr_iter / self.max_iter
        lamb = 2. / (1. + np.exp(-self.gamma * p)) - 1
        return lamb
    
    def step(self):
        self.curr_iter = min(self.curr_iter + 1, self.max_iter)

class AdversarialLoss(nn.Module):
    '''
    Acknowledgement: The adversarial loss implementation is inspired by http://transfer.thuml.ai/
    '''
    def __init__(self, gamma=1.0, max_iter=1000, g_dim=1024 ,use_lambda_scheduler=True, **kwargs):
        super(AdversarialLoss, self).__init__()
        self.domain_classifier = Discriminator(input_dim=g_dim)
        self.use_lambda_scheduler = use_lambda_scheduler
        if self.use_lambda_scheduler:
            self.lambda_scheduler = LambdaSheduler(gamma, max_iter)
        
    def forward(self, source, target):
        lamb = 1.0
        if self.use_lambda_scheduler:
            lamb = self.lambda_scheduler.lamb()
            self.lambda_scheduler.step()
        source_loss = self.get_adversarial_result(source, True, lamb)
        target_loss = self.get_adversarial_result(target, False, lamb)
        adv_loss = 0.5 * (source_loss + target_loss)
        return adv_loss
    
    def get_adversarial_result(self, x, source=True, lamb=1.0):
        x = ReverseLayerF.apply(x, lamb)
        domain_pred = self.domain_classifier(x)
        device = domain_pred.device
        if source:
            domain_label = torch.ones(len(x), 1).long()
        else:
            domain_label = torch.zeros(len(x), 1).long()
        # loss_fn = nn.BCELoss()
        loss_fn = nn.BCEWithLogitsLoss()
        loss_adv = loss_fn(domain_pred, domain_label.float().to(device))
        return loss_adv
    

class ReverseLayerF(Function):
    @staticmethod
    def forward(ctx, x, alpha):
        ctx.alpha = alpha
        return x.view_as(x)
    
    @staticmethod
    def backward(ctx, grad_output):
        output = grad_output.neg() * ctx.alpha
        return output, None

class Discriminator(nn.Module):
    def __init__(self, input_dim=256, hidden_dim=256):
        super(Discriminator, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        layers = [
            nn.Linear(input_dim, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
            # nn.Sigmoid()
        ]
        self.layers = torch.nn.Sequential(*layers)
    
    def forward(self, x):
        return self.layers(x)
    

class LightweightSelfAttention(nn.Module):
    def __init__(self, input_dim, reduction_ratio=4):
        super().__init__()
        self.query = nn.Linear(input_dim, input_dim // reduction_ratio)
        self.key = nn.Linear(input_dim, input_dim // reduction_ratio)
        self.value = nn.Linear(input_dim, input_dim)
        self.gamma = nn.Parameter(torch.zeros(1))
        
    def forward(self, x):
        # x shape: [B, C] or [B*num_patches, C]
        B = x.shape[0]
        Q = self.query(x)  # [B, C/r]
        K = self.key(x)    # [B, C/r]
        V = self.value(x)  # [B, C]
        
        attn = torch.softmax(Q @ K.T / torch.sqrt(torch.tensor(Q.size(-1))), dim=-1)  # [B, B]
        out = attn @ V  # [B, C]
        return self.gamma * out + x  # 残差连接

class AttnDiscriminator(nn.Module):
    def __init__(self, input_dim=384, hidden_dim=512, reduction_ratio=4):
        super().__init__()
        self.attention = LightweightSelfAttention(input_dim, reduction_ratio)
        self.discriminator = Discriminator(input_dim, hidden_dim)
        
        # 初始化
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.1)
    
    def forward(self, x):
        x = self.attention(x)
        return self.discriminator(x)



class PatchLoss(AdversarialLoss, LambdaSheduler):
    def __init__(self, gamma=5.0, max_iter=8000, g_dim=1024, l_dim=384, **kwargs):
        super(PatchLoss, self).__init__(gamma=gamma, max_iter=max_iter, g_dim=g_dim, **kwargs)
        # 单个共享判别器
        self.patch_discriminator = AttnDiscriminator(input_dim=l_dim)
        # self.patch_discriminator = Discriminator(input_dim=l_dim)

        self.d_g, self.d_l = 0, 0
        self.dynamic_factor = 0.5

    def forward(self, source, target, source_patch, target_patch):
        lamb = self.lamb()
        self.step()
        B, C, H, W = source_patch.shape
        source_loss_g = self.get_adversarial_result(source, True, lamb)
        target_loss_g = self.get_adversarial_result(target, False, lamb)
        source_loss_l = self.get_local_adversarial_result(source_patch,True, lamb)
        target_loss_l = self.get_local_adversarial_result(target_patch, False, lamb)
        global_loss = 0.5 * (source_loss_g + target_loss_g) * 0.05
        local_loss = 0.5 * (source_loss_l + target_loss_l) * 0.01

        self.d_g = self.d_g + 2 * (1 - 2 * global_loss.cpu().item())
        self.d_l = self.d_l + 2 * (1 - 2 * (local_loss / H*W).cpu().item())

        adv_loss = (1 - self.dynamic_factor) * global_loss + self.dynamic_factor * local_loss
        return adv_loss
    
    def get_local_adversarial_result(self, x, source=True, lamb=1.0):
        loss_fn = nn.BCEWithLogitsLoss()
        x = ReverseLayerF.apply(x, lamb)
        
        B, C, H, W = x.shape
        x = x.permute(0, 2, 3, 1).reshape(-1, C)  # [B*H*W, C]
        
        # Shared discriminator for all positions
        domain_pred = self.patch_discriminator(x)  # [B*H*W, 1]
        
        # Create domain labels
        device = domain_pred.device
        domain_label = torch.ones(B*H*W, 1, device=device) if source else \
                    torch.zeros(B*H*W, 1, device=device)
        
        return loss_fn(domain_pred, domain_label.float())
        
    def update_dynamic_factor(self, epoch_length):
        eps = 1e-8
        if self.d_g == 0 and self.d_l == 0:
            self.dynamic_factor = 0.5
        else:
            self.d_g = self.d_g / epoch_length
            self.d_l = self.d_l / epoch_length
            self.dynamic_factor = 1 - self.d_g / (self.d_g + self.d_l + eps)
        self.d_g, self.d_l = 0, 0
