from pytorch_transformers import BertModel, BertTokenizer
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
from utils.twitterloader import SubReader, AirlineSenti
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from pytorch_transformers.modeling_bert import BertOnlyMLMHead
from utils import BertPretrain
import os
import sys
import random
import fitlog

class SentimentModel(nn.Module):
    def __init__(self,
                 bert_dir="../bertModel",
                 device=torch.device('cuda')
                 ):
        super(SentimentModel, self).__init__()
        self.bpt = BertPretrain(bert_dir)
        #         self.conv1 = nn.Conv3d(1, 1, kernel_size=(2, 2, 5), stride=(1, 1, 1)).to(device)
        #         self.conv2 = nn.Conv3d(1, 1, kernel_size=(4, 4, 25), stride=(1, 1, 1)).to(device)
        self.conv3 = nn.Conv3d(1, 300, kernel_size=(13, 4, 768), stride=(1, 1, 1)).to(device)
        self.classifier = nn.Linear(300, 2).to(device)
        self.act_fn = nn.ReLU()
        self.loss_fn = nn.CrossEntropyLoss()
        self.device = device

    def forward(self, sents):
        feat = self.Sents2Vecs(sents)
        pred = self.classifier(feat).softmax(dim=1)
        return pred

    def loss(self, sents, labels):
        preds = self.forward(sents)
        loss = self.loss_fn(preds, labels)
        return loss

    def trainIters(self, texts, labels, max_iters=10000, warmup_steps=100, batchsize=32, print_every=10):
        optim = AdamW([
            #             {'params': self.conv1.parameters(), 'lr': 2e-2},
            #             {'params': self.conv2.parameters(), 'lr': 2e-2},
            {'params': self.conv3.parameters(), 'lr': 2e-5},
            {'params': self.classifier.parameters(), 'lr': 2e-3}
        ])
        schedule = WarmupLinearSchedule(optim, warmup_steps=warmup_steps, t_total=max_iters)
        idxs = list(range(len(texts)))
        sum_loss = 0
        for i in range(1, max_iters + 1):
            batch_idxs = random.sample(idxs, batchsize)
            sents = texts[batch_idxs]
            batch_y = torch.tensor(labels[batch_idxs], dtype=torch.int64, device=self.device)
            preds = self.forward(sents)
            loss = self.loss_fn(preds, batch_y)
            optim.zero_grad()
            loss.backward()
            optim.step()
            schedule.step()
            acc = accuracy_score(batch_y.cpu(), preds.cpu().argmax(dim=1))
            sum_loss += loss
            if print_every % print_every == 0:
                mean_loss = sum_loss * 1.0 / print_every
                sum_loss = 0
                print('%6d | %6d  loss/acc = %6.8f/%6.7f' % (i, max_iters,
                                                             mean_loss, acc
                                                             )
                      )

    def Sents2Vecs(self, sents):
        with torch.no_grad():
            img = self.bpt(sents)
        out3 = self.act_fn(self.conv3(img.unsqueeze(1)))
        #         out2 = self.act_fn(self.conv2(out1))
        #         out3 = self.act_fn(self.conv3(out2))
        out = out3.max(dim=-2)[0]
        shape = out.shape
        #         pdb.set_trace()
        assert shape[2] == 1 and shape[3] == 1
        #         feat = out3.view((shape[0], shape[3], shape[4])).contiguous().max(dim=1)[0]
        feat = out.squeeze(3).squeeze(2)
        return feat

    def BPTPretrain(self, text_file="../Downloads/trainingandtestdata/AE.data", bpt_saved_file="./model/bpt.pkl"):
        with open(text_file) as fr:
            lines = [line for line in fr]
        self.bpt.Finetune(lines, max_epoch=1)
        self.bpt.save_model(bpt_saved_file)

    def load_bpt_model(self, model_file="./model/bpt.pkl"):
        if os.path.exists(model_file):
            ch = torch.load(model_file)
            self.bpt.bert.load_state_dict(ch['bert'])
        else:
            print("error! model_file %s do not exists" % model_file)
            sys.exit(0)

class BertSenti(nn.Module):
    def __init__(self,
                 bert_dir,
                 device=torch.device('cuda')
                 ):
        super(BertSenti, self).__init__()
        self.bert = BertModel.from_pretrained(bert_dir).to(device)
        self.tokenizer = BertTokenizer.from_pretrained(bert_dir)
        self.cls = nn.Linear(768, 3).to(device)
        self.loss_fn = nn.NLLLoss(reduction='none')
        self.device = device

    def Pad_Sequence(self, ipt_ids):
        max_sent_len = max([len(ids) for ids in ipt_ids])
        ipt_tensors = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64, device=self.device) * 102
        attn_masks = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64, device=self.device)
        lm_labels = torch.ones([len(ipt_ids), max_sent_len], dtype=torch.int64, device=self.device) * -1
        for i in range(len(ipt_ids)):
            ipt_tensors[i, :len(ipt_ids[i])] = ipt_ids[i]
            lm_labels[i, :len(ipt_ids[i])] = ipt_ids[i]
            attn_masks[i, len(ipt_ids[i]):] = 0
        return ipt_tensors, attn_masks, lm_labels

    def forward(self, sents):
        text_inputs = [torch.tensor(self.tokenizer.encode(sent, add_special_tokens=True)) for sent in
                       sents]
        input_tensors, att_masks, _ = self.Pad_Sequence(text_inputs)
        rst = self.bert(input_tensors, att_masks)
        preds = self.cls(rst[1]).softmax(dim=1)
        return preds
    
    def LMFintune(self, tr_set, batchsize=32, warmup_steps=1000, max_iters=10000, print_every=10):
        self.lm_cls =BertOnlyMLMHead(self.bert.config).to(self.device)
        optim = AdamW([
            {'params': self.bert.parameters(), 'lr': 2e-5},
            {'params': self.lm_cls.parameters(), 'lr': 2e-3}
        ])
        scheduler = WarmupLinearSchedule(optim, warmup_steps=warmup_steps, t_total=max_iters)
        loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
        for step in range(1, max_iters+1):
            batches = tr_set.sample(batchsize)
            sents = [" ".join(txt) for txt in batches[0]]
            text_inputs = [torch.tensor(self.tokenizer.encode(sent, add_special_tokens=True)) for sent in sents]
            input_ids, att_masks, masked_lm_labels = self.Pad_Sequence(text_inputs)
            outputs = self.bert(input_ids, attention_mask=att_masks)
            sequence_output = outputs[0]
            prediction_scores = self.lm_cls(sequence_output)
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.bert.config.vocab_size), masked_lm_labels.view(-1))
            optim.zero_grad()
            masked_lm_loss.backward()
            optim.step()
            scheduler.step()
            torch.cuda.empty_cache()
            if step % print_every == 0:
                print("#FineTune Loss# %3d|%3d  loss:%6.7f" % (step, max_iters, masked_lm_loss))

    def trainIters(self, tr_set, dev_set, te_set, max_iters=10000, warmup_steps=1000,
                   batchsize=32, print_every=10, valid_every=100, learning_rate=2e-3):
        optim = AdamW([
            {'params': self.bert.parameters(), 'lr': 0.01*learning_rate},
            {'params': self.cls.parameters(), 'lr': learning_rate}
        ])
        schedule = WarmupLinearSchedule(optim, warmup_steps=warmup_steps, t_total=max_iters)
        sum_loss = 0
        sum_acc = 0
        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        for step in range(1, max_iters + 1):
            batches = tr_set.sample(batchsize)
            sents = [" ".join(txt) for txt in batches[0]]
            batch_y = batches[1].to(self.device)
            preds = self.forward(sents)
            loss = torch.dot(self.loss_fn(preds.log(), batch_y), batches[3].to(self.device))
            optim.zero_grad()
            loss.backward()
            optim.step()
            schedule.step()
            torch.cuda.empty_cache()
            acc = accuracy_score(batch_y.cpu(), preds.cpu().argmax(dim=1))
            sum_loss += loss
            sum_acc += acc
            fitlog.add_metric(acc, step, "training_acc")
            fitlog.add_metric(loss, step, "training_loss")
            if step % print_every == 0:
                mean_loss = sum_loss * 1.0 / print_every
                mean_acc = sum_acc * 1.0 / print_every
                print("#Sentiment Training# %3d|%3d loss/acc: %6.7f / %6.7f" % (step, max_iters, mean_loss, mean_acc))
                sum_loss = 0
                sum_acc = 0
            if step % valid_every == 0:
                val_acc, val_loss = self.valid(dev_set)
                test_acc, test_loss = self.valid(te_set)
                fitlog.add_metric(test_acc, step, "test_acc")
                fitlog.add_metric(test_loss, step, "test_loss")
                fitlog.add_metric(val_acc, step, "valid_acc")
                fitlog.add_metric(val_loss, step, "valid_loss")
                print(
                    '##### %6d | %5d, val_loss|val_acc = %6.8f/%6.7f, test_loss|test_acc = %6.8f/%6.7f, best_valid_acc/best_valid_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                        step, max_iters,
                        val_loss, val_acc,
                        test_loss, test_acc,
                        best_valid_acc, best_valid_test_acc,
                        best_test_acc
                    )
                )
                best_test_acc = test_acc if test_acc > best_test_acc else best_test_acc
                if val_acc > best_valid_acc:
                    fitlog.add_best_metric(val_acc, "best_valid_acc")
                    fitlog.add_best_metric(test_acc, "best_valid_test_acc")
                    best_valid_acc = val_acc
                    best_valid_test_acc = test_acc
                    self.save_model("./model/airline_bert_senti.pkl")

    def valid(self, dev, pretrained_file=None):
        if pretrained_file is not None:
            self.load_model(pretrained_file=pretrained_file)
        with torch.no_grad():
            preds = []
            labels = []
            for batch in dev:
                sents = [" ".join(txt) for txt in batch[0]]
                pred = self.forward(sents)
                y_label = batch[1].to(self.device)
                labels.append(y_label)
                preds.append(pred)
                torch.cuda.empty_cache()
            pred_tensor = torch.cat(preds).cpu()
            label_tensor = torch.cat(labels).cpu()
            val_acc = accuracy_score(label_tensor.numpy(),
                                     pred_tensor.argmax(dim=1).numpy())
            val_loss = self.loss_fn(pred_tensor.log(), label_tensor).mean()
            return val_acc, val_loss

    def save_model(self, saved_file):
        torch.save(
            {
                "bert": self.bert.state_dict(),
            },
            saved_file
        )

    def load_model(self, pretrained_file):
        ch = torch.load(pretrained_file)
        self.bert.load_state_dict(ch['bert'])



if __name__ == '__main__':

    fitlog.set_log_dir("./logs/", new_log=True)
    fitlog.add_hyper_in_file(__file__)

    te = SubReader(data_file="./data/test.csv")
    data = AirlineSenti()
    data.load_data("./data/Tweets.csv")
    tr, dev = data.split([0.97, 1.0])
    s_model = BertSenti(bert_dir="../bert_en/")
    # s_model.LMFintune(tr, warmup_steps=1000, max_iters=1500)
    # fitlog.commit(__file__)

    #######hyper
    max_iters = 200
    warmup_steps = 1000
    batchsize = 32
    print_every = 10
    valid_every = 100
    learning_rate = 2e-3
    #######hyper

    try:
        s_model.trainIters(tr, dev, te, max_iters=max_iters, warmup_steps=warmup_steps,
                        batchsize=batchsize, print_every=print_every, valid_every=valid_every, learning_rate=learning_rate)
    except:
        fitlog.finish(1)
    else:
        fitlog.finish(0)


    fitlog.set_log_dir("./logs/", new_log=True)
    fitlog.add_hyper_in_file(__file__)
    try:
        s_model.trainIters(tr, dev, te, max_iters=max_iters, warmup_steps=warmup_steps,
                        batchsize=batchsize, print_every=print_every, valid_every=valid_every, learning_rate=learning_rate)
    except:
        fitlog.finish(1)
    else:
        fitlog.finish(0)