import torch.nn as nn
import torch
import math
import pdb
from nltk.stem import WordNetLemmatizer
import re
import sys
from Dataloader.dataloader_utils import mask_tokens
from torch.autograd import Function
from functools import reduce

class GradReverse(Function):
    @staticmethod
    def forward(ctx, x):
        return x.view_as(x)

    @staticmethod
    def backward(ctx, grads):
        return grads.neg()

def grad_reverse(x):
    return GradReverse.apply(x)


class SelfAttention(nn.Module):
    def __init__(self, emb_hidden_size, sent_hidden_size,
                 num_attention_heads, attention_probs_dropout_prob=0.2,
                 output_attentions=False):
        super(SelfAttention, self).__init__()
        if emb_hidden_size % num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (emb_hidden_size, num_attention_heads))
        self.output_attentions = output_attentions

        self.num_attention_heads = num_attention_heads
        assert sent_hidden_size % num_attention_heads == 0
        self.attention_head_size = int(sent_hidden_size / num_attention_heads)
        self.sent_hidden_size = sent_hidden_size

        self.query = nn.Linear(emb_hidden_size, self.sent_hidden_size)
        self.key = nn.Linear(emb_hidden_size, self.sent_hidden_size)
        self.value = nn.Linear(emb_hidden_size, self.sent_hidden_size)
        self.dropout = nn.Dropout(attention_probs_dropout_prob)

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(self, hidden_states, attention_mask, head_mask=None):
        mixed_query_layer = self.query(hidden_states) # [batch, seq, dim_1] -> [batch, seq, dim_2]
        mixed_key_layer = self.key(hidden_states)
        mixed_value_layer = self.value(hidden_states)

        query_layer = self.transpose_for_scores(mixed_query_layer) # [batch, seq, dim_1] -> [batch, heads, seq, head_size]
        key_layer = self.transpose_for_scores(mixed_key_layer)
        value_layer = self.transpose_for_scores(mixed_value_layer)

        # Take the dot product between "query" and "key" to get the raw attention scores.
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) # -> [batch, heads, seq, seq]
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
        attention_scores = attention_scores + attention_mask

        # Normalize the attention scores to probabilities.
        attention_probs = nn.Softmax(dim=-1)(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        attention_probs = self.dropout(attention_probs)

        # Mask heads if we want to
        if head_mask is not None:
            attention_probs = attention_probs * head_mask

        context_layer = torch.matmul(attention_probs, value_layer) # -> [batch, heads, seq, head_size]

        context_layer = context_layer.permute(0, 2, 1, 3).contiguous() # -> [batch, seq, heads, head_size]
        new_context_layer_shape = context_layer.size()[:-2] + (self.sent_hidden_size,)
        context_layer = context_layer.view(*new_context_layer_shape) # -> [batch, seq, all_head_size]

        outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
        return outputs

class FeedForward(nn.Module):
    def __init__(self, hidden_size1, hidden_size2,  hidden_dropout_prob=0.2, layer_norm_eps=1e-8):
        super(FeedForward, self).__init__()
        self.dense = nn.Linear(hidden_size1, hidden_size2)
        self.LayerNorm = nn.LayerNorm(hidden_size2, eps=layer_norm_eps)
        self.dropout = nn.Dropout(hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        try:
            hidden_states = self.LayerNorm(hidden_states + input_tensor)
        except:
            pdb.set_trace()
            raise
        return hidden_states

class Transformer_Layer(nn.Module):
    def __init__(self, emb_hidden_size, sent_hidden_size,
                 num_attention_heads, attention_probs_dropout_prob=0.2,
                 layer_norm_eps=1e-8, output_attentions=False):
        super(Transformer_Layer, self).__init__()
        self.attention = SelfAttention(emb_hidden_size, sent_hidden_size,
                 num_attention_heads, attention_probs_dropout_prob,
                 output_attentions)
        self.output = FeedForward(sent_hidden_size, sent_hidden_size,
                                  hidden_dropout_prob=0.2, layer_norm_eps=layer_norm_eps)

    def forward(self, hidden_states, attention_mask, head_mask=None):
        attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
        attention_output = attention_outputs[0]
        layer_output = self.output(attention_output, hidden_states)
        outputs = (layer_output,) + attention_outputs[1:]  # add attentions if we output them
        return outputs

class SentenceModel(nn.Module):
    def __init__(self):
        super(SentenceModel, self).__init__()

    def tokens2vecs(self, sents):
        print("Error: tokens2vecs is not implemented in current model")
        print(sys._current_frames().items())
        sys.exit(0)

    def tokens2vecs_aug(self, sents):
        print("Error: tokens2vecs_aug is not implemented in current model")
        print(sys._current_frames().items())
        sys.exit(0)

    def AugForward(self, sents):
        print("Error: AugForward is not implemented in current model")
        print(sys._current_frames().items())
        sys.exit(0)

    def forward(self, sents):
        print("Error: forward is not implemented in current model")
        print(sys._current_frames().items())
        sys.exit(0)

class TokenizerBasedModel(SentenceModel):
    def __init__(self):
        super(TokenizerBasedModel, self).__init__()
        self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

    def Pad_Sequence(self, ipt_ids, mlm_labels=None):
        try:
            max_sent_len = max([len(ids) for ids in ipt_ids])
            if max_sent_len > 512:
                ipt_ids = [t if len(t)<512 else t[:512] for t in ipt_ids]
                max_sent_len = 512
            ipt_tensors = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64) * 102
            attn_masks = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64)
            if mlm_labels is not None:
                labels = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64) * -1
            for i in range(len(ipt_ids)):
                ipt_tensors[i, :len(ipt_ids[i])] = ipt_ids[i]
                if mlm_labels is not None:
                    labels[i, :len(ipt_ids[i])] = mlm_labels[i]
                attn_masks[i, len(ipt_ids[i]):] = 0
        except:
            pdb.set_trace()
            raise
        if mlm_labels is None:
            return ipt_tensors.to(self.device), attn_masks.to(self.device)
        else:
            return ipt_tensors.to(self.device), attn_masks.to(self.device), labels.to(self.device)

    def sents2mlm_ids(self, sents, mlm_probs):
        try:
            text_inputs = [mask_tokens(torch.tensor(self.tokenizer.encode(sent, add_special_tokens=True)), self.tokenizer, mlm_probs) for sent in sents]
            ids = [items[0] for items in text_inputs]
            labels = [items[1] for items in text_inputs]
            input_ids, att_masks, masked_lm_labels = self.Pad_Sequence(ids, labels)
        except:
            pdb.set_trace()
            raise
        return input_ids, att_masks, masked_lm_labels

    def sents2ids(self, sents):
        text_inputs = [torch.tensor(self.tokenizer.encode(sent, add_special_tokens=True)) for sent in sents]
        input_ids, att_masks = self.Pad_Sequence(text_inputs)
        return input_ids, att_masks

class CN_TokenizerBasedModel(TokenizerBasedModel):
    def __init__(self):
        super(CN_TokenizerBasedModel, self).__init__()
        self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.zh_pattern = re.compile(u'[\u4e00-\u9fa5]+')
        self.lemmatizer = WordNetLemmatizer()

    def contains_CN_Char(self, string):
        return self.zh_pattern.search(string)

    def lemma(self, word):
        word_n = self.lemmatizer.lemmatize(word, 'n')
        word_a = self.lemmatizer.lemmatize(word_n, 'a')
        word_v= self.lemmatizer.lemmatize(word_a, 'v')
        word_r = self.lemmatizer.lemmatize(word_v, 'r')
        return word_r

    def sent_to_tokens(self, sent):
        words = sent.split(" ")
        tokens = [self.tokenizer.cls_token]
        for word in words:
            if self.contains_CN_Char(word):
                tokens.extend(list(word))
            else:
                tokens.append(self.lemma(word))
            if len(tokens) + 1 > self.bert_config.max_position_embeddings:
                break
        return tokens[:self.bert_config.max_position_embeddings-1] + [self.tokenizer.sep_token]

    def sents2mlm_ids(self, sents, mlm_probs):
        if reduce(lambda x, y: x and y, map(lambda x:type(x)==int, sents[0])): # if sent is consistuted with the word indexs
            text_inputs = [mask_tokens(torch.tensor(tokens), self.tokenizer, mlm_probs) for tokens in sents]
        else:
            sent_tokens = [self.sent_to_tokens(sent) for sent in sents]
            text_inputs = [mask_tokens(torch.tensor(self.tokenizer.convert_tokens_to_ids(tokens)), self.tokenizer, mlm_probs) for tokens in sent_tokens]
        ids = [items[0] for items in text_inputs]
        labels = [items[1] for items in text_inputs]
        input_ids, att_masks, masked_lm_labels = self.Pad_Sequence(ids, labels)
        return input_ids, att_masks, masked_lm_labels

    def sents2ids(self, sents):
        if reduce(lambda x, y: x and y, map(lambda x:type(x)==int, sents[0])): # if sent is consistuted with the word indexs
            text_inputs = [torch.tensor(tokens) for tokens in sents]
        else:
            sent_tokens = [self.sent_to_tokens(sent) for sent in sents]
            text_inputs = [torch.tensor(self.tokenizer.convert_tokens_to_ids(tokens)) for tokens in sent_tokens]
        input_ids, att_masks = self.Pad_Sequence(text_inputs)
        return input_ids, att_masks

class W2VBasedModel(SentenceModel):
    def __init__(self, w2v_dir, seg=None):
        super(W2VBasedModel, self).__init__()
        self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        with open("%s/vocab.txt"%w2v_dir, "r") as fr:
            self.vocab = [line.strip('\n') for line in fr]
        self.word2index = {w:idx for idx, w in enumerate(self.vocab)}
        self.embedding_size = 300
        self.emb = nn.Embedding(len(self.vocab), embedding_dim=self.embedding_size).to(self.device)
        state_dict = torch.load("%s/embedding.pkl"%w2v_dir)
        self.emb.load_state_dict(state_dict['embedding'])
        self.seg = seg

    def encode_sentence(self, sent):
        if reduce(lambda x, y: x and y, map(lambda x:type(x)==int, sent)): # if sent is consistuted with the word indexs
            return sent
        if self.seg is None:
            tokens = sent.split()
        else:
            tokens = self.seg.cut(sent)
        idxs = [self.word2index[token] if token in self.word2index else self.word2index['[UNK]'] for token in tokens]
        return idxs

    def Pad_Sequence(self, ipt_ids, mlm_labels=None):
        max_sent_len = max([len(ids) for ids in ipt_ids])
        ipt_tensors = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64, device=self.device)
        attn_masks = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64, device=self.device)
        if mlm_labels is not None:
            labels = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64, device=self.device) * -1
        for i in range(len(ipt_ids)):
            ipt_tensors[i, :len(ipt_ids[i])] = ipt_ids[i]
            if mlm_labels is not None:
                labels[i, :len(ipt_ids[i])] = mlm_labels[i]
            attn_masks[i, len(ipt_ids[i]):] = 0
        if mlm_labels is None:
            return ipt_tensors, attn_masks
        else:
            return ipt_tensors, attn_masks, labels

    def sents2mlm_ids(self, sents, mlm_probs):
        print("Error: sents2mlm_ids is not implemented for W2VBasedModel in this version")
        sys.exit(0)

    def sents2ids(self, sents):
        text_inputs = [torch.tensor(self.encode_sentence(sent)) for sent in sents]
        input_ids, att_masks = self.Pad_Sequence(text_inputs)
        return input_ids, att_masks

class CN_W2VBasedModel(W2VBasedModel):
    def __init__(self, w2v_dir, seg=None):
        super(CN_W2VBasedModel, self).__init__(w2v_dir=w2v_dir, seg=seg)

    def encode_sentence(self, sent):
        def check_contain_chinese(check_str):
            for ch in check_str:
                if u'\u4e00' <= ch <= u'\u9fff':
                    return True
            return False
        if self.seg is None:
            tokens = sent.split()
        else:
            tokens = self.seg.cut(sent)
        tokens = [ch for token in tokens for ch in token if check_contain_chinese(token)]
        idxs = [self.word2index[token] if token in self.word2index else self.word2index['[UNK]'] for token in tokens]
        return idxs