from ast import Tuple
from email import header
from http.client import ImproperConnectionState
import imp
from select import select
from turtle import forward
from typing import List, Optional
from torch import nn
import torch.nn.functional as F
import torch
import random
from torch import Tensor
from typing import Union, Tuple, List, Iterable, Dict
import os
import json

class TextCNN(nn.Module):
    def __init__(self, embedding_dim: int, kernel_sizes: List[int], num_class:int, kernel_num: int,dropout_rate=0.5) -> None:
        super(TextCNN, self).__init__()
        self.layer_num = len(kernel_sizes)
        self.conv = nn.ModuleList(
            [nn.Conv2d(1, kernel_num, (kernel_size, embedding_dim)) for kernel_size in kernel_sizes]
        )
        self.dropout = nn.Dropout(dropout_rate)
        self.classfier = nn.Linear(self.layer_num * kernel_num, num_class)
    
    def conv_and_pooling(self, input, conv_layer, mean=False):
        x = F.relu(conv_layer(input)) # x: (B,kernel_num,H_out,1)
        x = x.squeeze(-1) # x: (B,kernel_num,H_out)
        if mean:
            x = F.max_pool1d(x, x.size(-1))  # x: (batch, kernel_num, 1)
        else:
            x = F.avg_pool1d(x, x.size(-1)) # x: (batch, kernel_num, 1)
        return self.dropout(x.squeeze(2))# x: (batch, kernel_num)

    def forward(self, input):  
        # input: (B,L,Embeding_dim)--> (B,1,L,Embedding_dim)
        input = input.unsqueeze(1)
        result = []
        for idx, conv in enumerate(self.conv):
            # if idx==0:
            #     result.append(self.conv_and_pooling(input, conv))
            # else:
            result.append(self.conv_and_pooling(input, conv, mean=True))
        x = torch.cat(result, dim=-1) # x: (B, len(kernel_sizes)*kernel_num)
        output = self.classfier(x) # output: (B, num_class)
        return output

class DilationCNN(nn.Module):
    def __init__(self, embedding_dim: int, kernel_sizes: List[int], dilation: List[int], num_class:int, kernel_num: int,dropout_rate=0.5): 
        super(DilationCNN, self).__init__()
        self.layer_num = len(kernel_sizes)
        self.conv = nn.ModuleList(
            [nn.Conv2d(1, kernel_num, (kernel_size, embedding_dim), dilation=(d,1)) for (kernel_size,d) in zip(kernel_sizes, dilation)]
        )
        self.dropout = nn.Dropout(dropout_rate)
        self.classfier = nn.Linear(self.layer_num * kernel_num, num_class)

    def conv_and_pooling(self, input, conv_layer, mean=False):
        x = torch.relu(conv_layer(input)) # x: (B,kernel_num,H_out,1)
        x = x.squeeze(-1) # x: (B,kernel_num,H_out)
        if mean:
            x = F.max_pool1d(x, x.size(-1))  # x: (batch, kernel_num, 1)
        else:
            x = F.avg_pool1d(x, x.size(-1)) # x: (batch, kernel_num, 1)
        return self.dropout(x.squeeze(2))# x: (batch, kernel_num)

    def forward(self, input):
        assert input.size(-1)==768
        input = input.unsqueeze(1)
        result = []
        for idx, conv in enumerate(self.conv):
            # if idx<2:
            #     result.append(self.conv_and_pooling(input, conv))
            # else:
            result.append(self.conv_and_pooling(input, conv, mean=True))
        
        x = torch.cat(result, dim=-1) # x: (B, len(kernel_sizes)*kernel_num)
        output = self.classfier(x) # output: (B, num_class)
        return output
        
class MLP(nn.Module):
    def __init__(self, n_in, n_out, dropout=0):
        super().__init__()
        self.linear = nn.Linear(n_in, n_out)
        self.activation = nn.GELU()
        self.dropout = nn.Dropout(dropout)
    def forward(self, x):
        x = self.dropout(x)
        x = self.linear(x)
        x = self.activation(x)
        return x

def create_masked_lm_probability(
    tokenizer, 
    inputs_feature: torch.Tensor,
    special_tokens_mask: Optional[torch.Tensor] = None,
    mlm_probability:float=0.15,
    mask_probability:float=0.8,
    replace_probability:float=0.5):
    """
    @param  :
        tokenizer: Hugging Face tokenizer
        inputs_feature: (BS, length) , where the length include a [CLS] token and two [SEP] tokens that placed middle and last
    -------
    @Returns  :
        The shape of inputs is (BS, length)
        The shape of labels is (BS, length)
    -------
    @description  :
    Prepare masked tokens inputs/labels for masked language modeling: 80% probability MASK, 10% probability random, 10% probability original.
    Reference: https://github.com/princeton-nlp/SimCSE/blob/121443ec2650b62145c618e30867281484b59b30/train.py#L497
    ---------
    """
    
    inputs = inputs_feature.clone()
    labels = inputs_feature.clone()
    # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
    probability_matrix = torch.full(labels.shape, mlm_probability)
    if special_tokens_mask is None:
        special_tokens_mask = [
            tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
        ]
        special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
    else:
        special_tokens_mask = special_tokens_mask.bool()

    probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
    masked_indices = torch.bernoulli(probability_matrix).bool()
    labels[~masked_indices] = -100  # We only compute loss on masked tokens（The value of 'ignore_index' is -100 in nn.CrossEntropyLoss）

    # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
    indices_replaced = torch.bernoulli(torch.full(labels.shape, mask_probability)).bool() & masked_indices
    inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
    # 10% of the time, we replace masked input tokens with random word
    indices_random =  torch.bernoulli(torch.full(labels.shape, replace_probability)).bool() & ~indices_replaced & masked_indices
    random_words = torch.randint(100+3, len(tokenizer), labels.shape, dtype=torch.long) # The All tokens befer num 100 are [unused] token in Hugging Face tokenizer
    inputs[indices_random] = random_words[indices_random]

    # The rest of the time (10% of the time) we keep the masked input tokens unchanged
    return inputs, labels

def create_masked_lm_predictions(
    tokenizer,
    inputs_feature: torch.Tensor,
    special_tokens_mask: Optional[torch.Tensor] = None,
    mlm_probability:float=0.15,
    mask_probability:float=0.8,
    replace_probability:float=0.5):
    """
    @param  :
    @param  :
        tokenizer: Hugging Face tokenizer
        inputs_feature: (BS, length) , where the length include a [CLS] token and two [SEP] tokens that placed middle and last
    -------
    @Returns  :
        The shape of inputs is (BS, length)
        The shape of labels is (BS, length)
    -------
    @description  :
    Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
    Reference: https://github.com/google-research/bert/blob/eedf5716ce1268e56f0a50264a88cafad334ac61/create_pretraining_data.py#L342
    ---------
    """
    labels = torch.full(inputs_feature.shape, -100) # We only compute loss on masked tokens（The value of 'ignore_index' is -100 in nn.CrossEntropyLoss）
    maskToken_id = tokenizer.mask_token_id
    
    for idx, sentence in enumerate(inputs_feature):
        shuffle_idx = []
        for i, token in enumerate(sentence):
            if token == tokenizer.cls_token_id or token == tokenizer.sep_token_id or token == tokenizer.pad_token_id:
                continue
            shuffle_idx.append(i)
        assert len(shuffle_idx)>0
        random.shuffle(shuffle_idx)

        num_to_predict = max(1, int(round(len(shuffle_idx) * mlm_probability)))
        for index in shuffle_idx[:num_to_predict]:
            # 80% of the time, replace with [MASK]
            if random.random() < mask_probability:
                labels[idx][index] = inputs_feature[idx][index]
                inputs_feature[idx][index] = maskToken_id
            else:
                # 10% of the time, keep original
                if random.random() < replace_probability:
                    pass
                # 10% of the time, replace with random word
                else:
                    labels[idx][index] = inputs_feature[idx][index]
                    inputs_feature[idx][index] = random.randint(100+3, len(tokenizer) - 1)

    return inputs_feature, labels

def freeze(_layer: nn.Module):
    """
    冻结某些网络的参数(即不参与反向传播)
    :param _layer:
    :return:
    """
    for p in _layer.parameters():
        p.requires_grad = False
    return _layer


class LayerNorm(nn.Module):
    def __init__(self, dimension: int):
        super(LayerNorm, self).__init__()
        self.dimension = dimension
        self.norm = nn.LayerNorm(dimension)
   

    def forward(self, features: Dict[str, Tensor]):
        features['sentence_embedding'] = self.norm(features['sentence_embedding']) 
        return features


    def get_sentence_embedding_dimension(self):
        return self.dimension

    def save(self, output_path):
        with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
            json.dump({'dimension': self.dimension}, fOut, indent=2)

        torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))

    @staticmethod
    def load(input_path):
        with open(os.path.join(input_path, 'config.json')) as fIn:
            config = json.load(fIn)

        model = LayerNorm(**config)
        model.load_state_dict(torch.load(os.path.join(input_path, 'pytorch_model.bin'), map_location=torch.device('cpu')))
        return model