import torch
import torch.nn as nn
from transformers import AutoModel
from torchvision.models import resnet50
import torch.nn.functional as F
import timm
from .mamba.models_insect import build_vssm_models_ as mamba_model
from .DWT import DWT_2D
from .modules.DyRoutFusion_CLS import DyRoutTrans,SentiCLS

#####################################
class APLLoss(nn.Module):
    def __init__(self, gamma_neg=4, gamma_pos=0, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=True):
        super(APLLoss, self).__init__()

        self.gamma_neg = gamma_neg
        self.gamma_pos = gamma_pos
        self.clip = clip
        self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
        self.eps = eps

        # parameters of Taylor expansion polynomials
        self.epsilon_pos = 1.0
        self.epsilon_neg = 0.0
        self.epsilon_pos_pow = -2.5

    def forward(self, x, y):
        """"
        x: input logits with size (batch_size, number of labels).
        y: binarized multi-label targets with size (batch_size, number of labels).
        """
        # Calculating Probabilities
        x_sigmoid = torch.sigmoid(x)
        xs_pos = x_sigmoid
        xs_neg = 1 - x_sigmoid

        # Asymmetric Clipping
        if self.clip is not None and self.clip > 0:
            xs_neg = (xs_neg + self.clip).clamp(max=1)

        # Basic Taylor expansion polynomials
        los_pos = y * (torch.log(xs_pos.clamp(min=self.eps)) + self.epsilon_pos * (1 - xs_pos.clamp(min=self.eps)) + self.epsilon_pos_pow * 0.5 * torch.pow(1 - xs_pos.clamp(min=self.eps), 2) )
        los_neg = (1 - y) * (torch.log(xs_neg.clamp(min=self.eps)) + self.epsilon_neg * (xs_neg.clamp(min=self.eps)) )
        loss = los_pos + los_neg

        # Asymmetric Focusing
        if self.gamma_neg > 0 or self.gamma_pos > 0:
            if self.disable_torch_grad_focal_loss:
                torch.set_grad_enabled(False)
            pt0 = xs_pos * y
            pt1 = xs_neg * (1 - y)  # pt = p if t > 0 else 1-p
            pt = pt0 + pt1
            one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y)
            one_sided_w = torch.pow(1 - pt, one_sided_gamma)
            if self.disable_torch_grad_focal_loss:
                torch.set_grad_enabled(True)
            loss *= one_sided_w

        return -loss.sum()
#####################################

class wad_module(nn.Module):
    def __init__(self, wavename='haar'):
        super(wad_module, self).__init__()
        self.dwt = DWT_2D(wavename=wavename)
        self.softmax = nn.Softmax2d()

    @staticmethod
    def get_module_name():
        return "wad"

    def forward(self, input):
        LL, LH, HL, _ = self.dwt(input)
        output = LL

        x_high = self.softmax(torch.add(LH, HL))
        AttMap = torch.mul(output, x_high)
        output = torch.add(output, AttMap)
        return output

def calculate_ratio(uni_modal, multi_modal, k=2.):
    ratio = {}
    for m in ['T', 'V']:
        uni_modal[m] = torch.exp(-1 * k * torch.pow(torch.abs(uni_modal[m] - multi_modal), 2))

    # 进行归一化
    for m in ['T', 'V']:
        ratio[m] = uni_modal[m] / (uni_modal['T'] + uni_modal['V'])
        ratio[m] = ratio[m].unsqueeze(-1)
    return ratio

class DyRoutTransOptions:
    def __init__(self):
        self.hidden_size = 32
        self.ffn_size = 64 
        self.seq_lens = [1, 1]

class TextModel(nn.Module):

    def __init__(self, config):
        super(TextModel, self).__init__()
        #tinybert
        self.bert = AutoModel.from_pretrained('./TinyBertWeight')
        self.trans = nn.Sequential(
            nn.Dropout(config.bert_dropout),
            nn.Linear(self.bert.config.hidden_size, config.middle_hidden_size),
            nn.ReLU(inplace=True)
        ) 
        
        # 是否进行fine-tune
        for param in self.bert.parameters():
            if config.fixed_text_model_params:
                param.requires_grad = False
            else:
                param.requires_grad = True

    def forward(self, bert_inputs, masks, token_type_ids=None):
        assert bert_inputs.shape == masks.shape, 'error! bert_inputs and masks must have same shape!'
        bert_out = self.bert(input_ids=bert_inputs, token_type_ids=token_type_ids, attention_mask=masks)
        pooler_out = bert_out['pooler_output']
        
        return self.trans(pooler_out)
    
class ImageModel(nn.Module):

    def __init__(self, config):
        super(ImageModel, self).__init__()
        #vmamba
        self.mamba_model = mamba_model(cfg="vssm_tiny")
        in_features = self.mamba_model.classifier.head.in_features
        self.mamba_model.classifier.head = nn.Sequential(
            nn.Dropout(config.resnet_dropout),
            nn.Linear(in_features, config.middle_hidden_size),
            nn.ReLU(inplace=True)
        )
        
        # 实例化WAD模块
        self.wad = wad_module()

    def forward(self, imgs):
        imgs = self.wad(imgs)
        feature = self.mamba_model(imgs)
        return feature

class Model(nn.Module):

    def __init__(self, config):
        super(Model, self).__init__()
        # text
        self.text_model = TextModel(config)
        # image
        self.img_model = ImageModel(config)
        # loss
        self.loss_func = APLLoss()
        
        self.img_view = nn.Linear(config.middle_hidden_size, 8)
        self.text_view = nn.Linear(config.middle_hidden_size, 8)
        
        opt = DyRoutTransOptions()
        self.fusion = DyRoutTrans(opt)
        self.classifier = SentiCLS()
        
    def forward(self, texts, texts_mask, imgs, labels=None):
        text_features = self.text_model(texts, texts_mask)
        image_features = self.img_model(imgs)
        
        if labels == None:
            ratio = None
        else:
            img_modal = self.img_view(image_features)
            text_modal = self.text_view(text_features)
            uni_modal = {
                'T': text_modal,  # 文本特征
                'V': img_modal,    # 视觉特征
            }
            ratio = calculate_ratio(uni_modal,labels,k=0.1)
        uni_fea = {
            'T': text_features.unsqueeze(1),  # 文本特征
            'V': image_features.unsqueeze(1),    # 视觉特征
        }
        fusion_feature = self.fusion(uni_fea, ratio)
        prob_vec = self.classifier(fusion_feature)

        if labels is not None:
            loss = self.loss_func(prob_vec, labels)
            return prob_vec, loss
        else:
            return prob_vec