import torch
from pytorch_transformers import BertForMaskedLM, BertTokenizer, BertModel
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
from torch.optim import Adam
import torch.nn as nn
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class BertPretrain(nn.Module):
    def __init__(self, bert_dir):
        super(BertPretrain, self).__init__()
        self.bert = BertForMaskedLM.from_pretrained(bert_dir).to(device)
        self.tokenizer = BertTokenizer.from_pretrained(bert_dir)

    def Pad_Sequence(self, ipt_ids):
        max_sent_len = max([len(ids) for ids in ipt_ids])
        ipt_tensors = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64, device=device)*102
        for i in range(len(ipt_ids)):
            ipt_tensors[i, :len(ipt_ids[i])] = ipt_ids[i]
        return ipt_tensors

    def forward(self, sents):
        self.bert.bert.encoder.output_hidden_states = True
        text_inputs = [torch.tensor(self.tokenizer.encode(sent, add_special_tokens=True)) for sent in
                       sents]
        text_len = [len(t) for t in text_inputs]
        input_tensors = self.Pad_Sequence(text_inputs)
        rst = self.bert.bert(input_tensors)
        outs = torch.stack([t for t in rst[2]]).transpose(1, 0)
        for idx, l in enumerate(text_len):
            outs[idx, :, l:, ].fill_(0.0)
        return outs

    def Finetune(self, texts, batchsize=32, max_epoch=10, print_every=10, learning_rate=2e-5):
        optim = AdamW([
            {'params': self.bert.parameters(), 'lr': 2e-5}
        ])
        # schedule = WarmupLinearSchedule(optim, warmup_steps=int(len(texts)/batchsize), t_total=max_epoch*int(len(texts)/batchsize))
        for epoch in range(max_epoch):
            for i in range(0, len(texts), batchsize):
                text_inputs = [torch.tensor(self.tokenizer.encode(sent, add_special_tokens=True)) for sent in texts[i:i+batchsize]]
                input_tensors= self.Pad_Sequence(text_inputs)
                rst = self.bert(input_tensors, masked_lm_labels=input_tensors)
                optim.zero_grad()
                rst[0].backward()
                optim.step()
                # schedule.step()
                if i % (print_every*batchsize) == 0:
                    print("#FineTune Loss# %3d|%3d [%3d, %3d] loss:%6.7f"%(i, len(texts), epoch, max_epoch, rst[0].item()))
    def save_model(self, saved_file):
        torch.save(
            {
                "bert": self.bert.state_dict(),
            },
            saved_file
        )

class SentimentPlot(object):
    def __init__(self, sentiment_model, bins=20, xlabel="", ylabel="", title=""):
        self.s_model = sentiment_model
        plt.xlabel(xlabel)
        plt.ylabel(ylabel)
        plt.title(title)
        plt.xlim(0, 1)
        plt.ylim(0, 10)
        self.bins = bins

    def textseq2sentiseq(self, text_seqs):
        senti_seqs = []
        for seq in tqdm(text_seqs):
            with torch.no_grad():
                senti_scores = self.s_model(seq)
            senti_seqs.append(senti_scores)
        return senti_seqs

    def senti_shift(self, senti_seqs, start=0, end=1, legend=""):
        colors = [(1, 0, 0), (1, 1, 0), (0, 1, 0,), (0, 0, 1)]
        pos_head = [senti[start] for senti in senti_seqs]
        pos_tail = [senti[end] for senti in senti_seqs]
        sns.distplot(pos_head, bins=self.bins, rug=False, kde=True, hist=True, norm_hist=True, label="source",
                     hist_kws={"histtype": "step", "linewidth": 2,
                               "alpha": 1}, kde_kws={"color": colors[0], "lw": 0, "label": ""})
        sns.distplot(pos_tail, bins=self.bins, rug=False, kde=True, hist=True, norm_hist=True, label="tail",
                     hist_kws={"histtype": "step", "linewidth": 2,
                               "alpha": 1}, kde_kws={"color": colors[0], "lw": 0, "label": ""})

def encode_sent_pair(tt, pair):
    return tt.encode(pair[0], text_pair=pair[1], add_special_tokens=True)

def encode_sent_pairs(tt, sent_pairs):
    return [encode_sent_pair(tt, pair) for pair in sent_pairs]

def pad_sentences(sent_list, padding_value, max_sent_len=256):
    max_len = min( max(map(lambda s:len(s), sent_list)), max_sent_len)
    batchsize = len(sent_list)
    out = torch.empty([batchsize, max_len], dtype=torch.int64).fill_(padding_value)
    mask = torch.ones([batchsize, max_len])
    for i, sent in enumerate(sent_list):
        sent_len = min(len(sent), max_len)
        out[i, :sent_len] = torch.tensor(sent[:sent_len])
        mask[i, sent_len:].fill_(0.0)
    return out, mask

def encode_sent_list(tt, sent_list):
    return [tt.encode(sent.strip("\t"), add_special_tokens=True) for sent in sent_list]


def Entrophy(pred):
    assert pred.ndim == 2
    prob = pred.softmax(dim=1)
    return -1*(prob*torch.log(prob)).sum(dim=1).mean()


