import math
from typing import List

import torch
from sentence_transformers import SentenceTransformer
from torch import nn
from torch.autograd import Variable
from transformers import BertModel, AutoModelForMaskedLM, BertTokenizer

from config.config import Config


def init_weights(module):
    if isinstance(module, nn.Linear):
        # Linear函数的参数标准化
        nn.init.xavier_normal_(module.weight)
        if module.bias is not None:
            module.bias.data.zero_()
    elif isinstance(module, nn.Embedding):
        # Embedding函数的参数标准化
        nn.init.xavier_normal_(module.weight)
        if module.padding_idx is not None:
            module.weight.data[module.padding_idx].zero_()
    elif isinstance(module, nn.LayerNorm):
        # LayerNorm函数的权重初始化为1.0，偏差初始化为0
        module.bias.data.zero_()
        module.weight.data.fill_(1.0)


class AttentionBlock(nn.Module):
    def __init__(self, config: Config):
        super().__init__()

        self.layer_norm_1 = nn.LayerNorm(normalized_shape=config.embed_dim)

        self.attention = nn.MultiheadAttention(
            embed_dim=config.embed_dim,
            num_heads=config.num_attention_heads,
            dropout=config.dropout)

        self.layer_norm_2 = nn.LayerNorm(normalized_shape=config.embed_dim)

        self.feed_forward = MLP(config.embed_dim, config.embed_dim, config.feed_forward_hidden_size, config.num_feed_forward_layers, config.dropout)
        self.dropout = nn.Dropout(config.dropout)

    def forward(self, x):
        a, _ = self.attention(x, x, x)
        a = self.dropout(a)
        x = self.layer_norm_1(a + x)

        f = self.feed_forward(x)
        f = self.dropout(f)
        x = self.layer_norm_2(f + x)

        return x


class AttentionLayer(nn.Module):
    def __init__(self, config: Config):
        super().__init__()

        self.layer_norm = nn.LayerNorm(config.embed_dim)

        self.attention_blocks = nn.ModuleList(
            [AttentionBlock(config) for _ in range(config.num_attention_layers - 1)]
        )

        self.top_attention = AttentionBlock(config)

    def forward(self, x, embedded_positions, embedded_speakers):
        x = self.layer_norm(x + embedded_positions + embedded_speakers)

        for block in self.attention_blocks:
            x = block(x)

        # x = self.layer_norm(x + embedded_positions)
        return self.top_attention(x)


class PositionalEmbeddings(nn.Module):
    def __init__(self, config: Config, mode, max_len=70):
        super().__init__()

        self.mode = mode

        pe = torch.zeros(max_len, config.embed_dim)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, config.embed_dim, 2) * -(math.log(10000.0) / config.embed_dim))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)

        self.register_buffer('pe', pe)

    def forward(self, x):
        if self.mode != 'complex-order':
            return Variable(self.pe[:x.size(0)], requires_grad=False).to(x.device)
        else:
            position_size, embed_dim = x.size()

            position_j = 1. / torch.pow(10000., 2 * torch.arange(0, embed_dim, dtype=torch.float32) / embed_dim)
            position_j = torch.unsqueeze(position_j, 0)

            position_i = torch.arange(0, position_size, dtype=torch.float32)
            position_i = torch.unsqueeze(position_i, 1)
            position_ij = torch.matmul(position_i, position_j)
            position_embedding = position_ij

            return Variable(position_embedding, requires_grad=False).to(x.device)


class LinearBlock(nn.Module):
    def __init__(self, input_dim, output_dim, dropout):
        super().__init__()
        self.model = nn.Sequential(
            nn.Linear(input_dim, output_dim),
            nn.GELU(),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        return self.model(x)


class MLP(nn.Module):
    def __init__(self, input_dim, out_dim, hidden_dim, num_middle_blocks, dropout):
        super().__init__()

        self.model = nn.Sequential(
            LinearBlock(input_dim, hidden_dim, dropout),
            nn.Sequential(
                *[LinearBlock(hidden_dim, hidden_dim, dropout) for _ in range(num_middle_blocks)]
            ),
            nn.Linear(hidden_dim, out_dim),
        )

    def forward(self, x):
        return self.model(x)


class WarmupLambda:
    def __init__(self, steps: int, milestones: List[float], init_lr_ratio: float, last_lr_ratio: float):
        """

        :param milestones: 一共3个阶段，上升期，平稳期，下降期，加起来是所有steps
        :param init_lr_ratio:
        :param last_lr_ratio:
        """
        self.steps = steps

        self.milestones = [milestones[0], milestones[1], 1.0]
        self.milestones = [int(m * steps) for m in self.milestones]

        self.init_lr_ratio = init_lr_ratio
        self.last_lr_ratio = last_lr_ratio

    def __call__(self, step):
        if step < self.milestones[0]:
            return self.init_lr_ratio + (1. - self.init_lr_ratio) * step / self.milestones[0]
        elif self.milestones[0] <= step <= self.milestones[1]:
            return 1.0
        else:
            return self.last_lr_ratio + (1.0 - self.last_lr_ratio) * (self.steps - step) / (
                        self.milestones[2] - self.milestones[1])


class Classifier(nn.Module):
    def __init__(self, config: Config):
        super().__init__()

        self.config = config

        self.position_embeddings = PositionalEmbeddings(config, self.config.position_embedding_type)
        self.speaker_embeddings = nn.Embedding(num_embeddings=config.num_speakers, embedding_dim=config.embed_dim)

        self.attention_layer = AttentionLayer(config)

        self.cls = nn.Linear(config.embed_dim, config.num_labels)

        self.apply(init_weights)

    def forward(self, sent_embeddings, speaker_ids):
        embedded_positions = self.position_embeddings(sent_embeddings)
        embedded_speakers = self.speaker_embeddings(speaker_ids)

        embeddings = self.attention_layer(sent_embeddings, embedded_positions, embedded_speakers)

        return self.cls(embeddings)


class BertSentenceEmbeddings(nn.Module):
    """
    使用 Roberta 获取句向量，部分层可以梯度
    """

    def __init__(self, config: Config):
        super().__init__()

        self.config = config
        self.word_embeddings = BertModel.from_pretrained(config.pretrained_model)
        self.freeze_bert_layers(self.word_embeddings)
        self.layer_norm = nn.LayerNorm(config.embed_dim)
        self.dropout = nn.Dropout(config.dropout)

    def forward(self, input):
        input_ids = input['input_ids']
        attention_mask = input['attention_mask']

        sentence_embeddings = self.word_embeddings(input_ids=input_ids, attention_mask=attention_mask)
        sentence_embeddings = self.layer_norm(sentence_embeddings.pooler_output)
        sentence_embeddings = self.dropout(sentence_embeddings)

        return sentence_embeddings

    def freeze_bert_layers(self, model):
        unfreeze_layers = self.config.bert_unfreeze_layers
        if unfreeze_layers is None:
            return

        for name, param in model.named_parameters():
            param.requires_grad = False
            for ele in unfreeze_layers:
                if ele in name:
                    param.requires_grad = True
                    break

            print(name, param.size(), param.requires_grad)


class SimCSESentenceEmbeddings(nn.Module):
    """
    使用 Roberta 获取句向量，部分层可以梯度
    """

    def __init__(self, config: Config):
        super().__init__()

        self.config = config
        self.word_embeddings = AutoModelForMaskedLM.from_pretrained(config.pretrained_model)
        self.freeze_bert_layers(self.word_embeddings)
        # self.layer_norm = nn.LayerNorm(config.embed_dim)
        # self.dropout = nn.Dropout(config.dropout)

    def forward(self, input):
        input_ids = input['input_ids']
        attention_mask = input['attention_mask']

        sentence_embeddings = self.word_embeddings(input_ids=input_ids,
                                                   attention_mask=attention_mask,
                                                   output_hidden_states=True)
        sentence_embeddings = sentence_embeddings.hidden_states[-1][:, 0, :]
        # sentence_embeddings = self.layer_norm(sentence_embeddings.pooler_output)
        # sentence_embeddings = self.dropout(sentence_embeddings)

        return sentence_embeddings

    def freeze_bert_layers(self, model):
        unfreeze_layers = self.config.bert_unfreeze_layers
        if unfreeze_layers is None:
            return

        for name, param in model.named_parameters():
            param.requires_grad = False
            for ele in unfreeze_layers:
                if ele in name:
                    param.requires_grad = True
                    break

            print(name, param.size(), param.requires_grad)


class SBertSentenceEmbeddings(nn.Module):
    def __init__(self, config: Config):
        super().__init__()

        self.config = config
        # "uer/sbert-base-chinese-nli"
        # self.model = SentenceTransformer(config.pretrained_model)
        # self.model.requires_grad_(False)

    def forward(self, input):
        # sentences = [self.config.tokenizer.decode(input_ids, skip_special_tokens=True).replace(' ', '')
        #              for input_ids in input['input_ids']]
        #
        # with torch.no_grad():
        #     output = self.model.encode(sentences)
        # return torch.tensor(output, device=input['input_ids'].device)
        return input['embeddings']

