from models.base_model import BaseModel
from pytorch_transformers import BertTokenizer, BertModel
from torch import nn
import torch
from os.path import join
import time
import numpy as np
from models.model_utils import get_TPTNFNFP
from glob import glob
import re

model_base = '/home/njuciairs/wangshuai/pretrained_models/chinese_roberta_wwm_ext_pytorch'


class BertSentiEntity(BaseModel):
    def __init__(self, tokenizer, token_CLSE,token_SEPE, learning_rate=5e-5, version_id=None, continue_train=False, gamma=0.9,
                 model_name=None):
        if model_name == None:
            model_name = 'BertSentiEntity_hit_inplace_cross'
        self.model_name = model_name
        super().__init__(model_name=self.model_name, version_id=version_id, continue_train=continue_train)
        self.bertModel = BertModel.from_pretrained(model_base)
        self.bertModel.resize_token_embeddings(len(tokenizer))
        self.CLSE_ID = tokenizer.convert_tokens_to_ids([token_CLSE])[0]
        self.SEPE_ID = tokenizer.convert_tokens_to_ids([token_SEPE])[0]
        self.steps = 0
        self.hidden_size = self.bertModel.config.hidden_size
        self.senti_linear = nn.Linear(self.hidden_size, 2)
        self.entity_linear = nn.Linear(self.hidden_size, 2)
        self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
        self.crossEntropyLoss = nn.CrossEntropyLoss()
        self.steps = 0
        self.token_clse = token_CLSE
        self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma, last_epoch=-1)

    def forward(self, text_ids, attn_mask):
        hiddens, pooled_hidden = self.bertModel(input_ids=text_ids, attention_mask=attn_mask)
        senti_logits = self.predict_sentiment(hiddens)
        pos = (text_ids == self.CLSE_ID)
        enity_logits = self.predict_entity_labels(pos, hiddens)
        return senti_logits, enity_logits

    def predict_sentiment(self, hiddens):
        cls_hidden = hiddens[:, 0]
        logits = self.senti_linear(cls_hidden)
        return logits

    def predict_entity_labels(self, pos, hiddens):
        entity_hiddens = hiddens[pos]
        logits = self.entity_linear(entity_hiddens)
        return logits

    def train_one_epoch(self, data_loader):
        self.bertModel.train()
        total_len = len(data_loader)
        tik = time.time()

        for i, batch in enumerate(data_loader):
            self.steps += 1
            text_ids, entity_labels, sentiment_labels, attn_mask = batch
            if len(text_ids) != len(sentiment_labels):
                continue
            self.optimizer.zero_grad()
            senti_logits, entity_logits = self.forward(text_ids=text_ids, attn_mask=attn_mask)
            senti_loss = self.crossEntropyLoss(senti_logits, sentiment_labels)
            # print(entity_logits,entity_labels)
            if len(entity_logits) > len(entity_labels):
                entity_loss = senti_loss
            else:
                entity_loss = self.crossEntropyLoss(entity_logits, entity_labels[:len(entity_logits)])

            loss = senti_loss + entity_loss
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.parameters(), 1)
            self.optimizer.step()
            self.writer.add_scalars('losses', {'senti_loss': senti_loss.item(),
                                               'entity_loss': entity_loss.item()}, self.steps)
            print("\rstep:%d of %d" % (i, total_len), end="")
        tok = time.time()
        print(' used time:', tok - tik)
        self.lr_scheduler.step()

    def train_epochs(self, train_loader, val_loader, num_epoch, early_stop=5):
        best_score = 0
        early_epochs = 0
        for e in range(1, num_epoch + 1):
            self.train_one_epoch(train_loader)
            sF1, sP, sR, eF1, eP, eR, total_score, total_val_senti_loss, total_val_entity_loss = self.validate(
                val_loader)
            self.writer.add_scalars('validate', {'sentiment_f1': sF1,
                                                 'entity_f1': eF1,
                                                 'total_score': total_score}, e)
            print('epoch %d' % (e), 'sentiment f1:%.3f' % (sF1), 'entity f1:%.3f' % (eF1), 'total score:', total_score)
            if total_score > best_score:
                best_score = total_score
                early_epochs = 0
                save_path = join(self.model_save_dir, 'score%f_epoch%d_time%s' % (
                    best_score, e, time.strftime("%Y%m%d%H%M%S", time.localtime())))
                torch.save(self.state_dict(), save_path)
            else:
                early_epochs += 1
                if early_epochs >= early_stop:
                    break

    def validate(self, data_loader):
        self.bertModel.train(mode=False)
        with torch.no_grad():
            total_val_senti_loss, total_val_entity_loss = 0, 0
            senti_logits_list, senti_labels_list = [], []
            entity_logits_list, entity_labels_list = [], []
            for batch in data_loader:
                text_ids, entity_labels, sentiment_labels, attn_mask = batch



                senti_logits, entity_logits = self.forward(text_ids, attn_mask)
                senti_labels_list.append(sentiment_labels)
                entity_labels_list.append(entity_labels[:len(entity_logits)])
                senti_logits_list.append(senti_logits)
                entity_logits_list.append(entity_logits)
                total_val_senti_loss += self.crossEntropyLoss(senti_logits, sentiment_labels).item()
                total_val_entity_loss += self.crossEntropyLoss(entity_logits, entity_labels[:len(entity_logits)]).item()
            sF1, sP, sR = self.compute_f1(senti_logits_list, senti_labels_list)
            eF1, eP, eR = self.compute_f1(entity_logits_list, entity_labels_list)
            total_score = 0.4 * sF1 + 0.6 * eF1
        return sF1, sP, sR, eF1, eP, eR, total_score, total_val_senti_loss, total_val_entity_loss

    def compute_f1(self, logits_list, labels_list):
        TP, TN, FN, FP = zip(*[get_TPTNFNFP(logits, labels) for logits, labels in zip(logits_list, labels_list)])
        TP, TN, FN, FP = [np.sum(x) for x in (TP, TN, FN, FP)]
        P = TP / (TP + FP)
        R = TP / (TP + FN)
        F1 = 2 * P * R / (P + R)
        return F1, P, R

    def load_best_model(self):
        models = glob(self.model_save_dir + '/score*')
        rep = 'score([.0-9]*)_epoch'
        scores = [float(re.search(rep, p).group(1)) for p in models]
        best_model = models[np.argmax(scores)]
        self.load_state_dict(torch.load(best_model))
        self.score = best_model.split('/')[-1]
        print('loaded', self.score)

    def inference(self, test_loader):
        self.bertModel.train(mode=False)
        with torch.no_grad():
            for batch in test_loader:
                text_ids, attn_mask, ids, entity_lists = batch
                senti_logits, entity_logits = self.forward(text_ids, attn_mask)
                senti_predicts = senti_logits.argmax(dim=1).cpu().tolist()
                entity_predicts = entity_logits.argmax(dim=1).cpu().tolist()
                pos = (text_ids == self.CLSE_ID)
                entity_nums = pos.sum(dim=1).tolist()
                index = 0
                for i, num in enumerate(entity_nums):
                    yield (ids[i], senti_predicts[i], entity_predicts[index:index + num], entity_lists[i])
                    index += num

    def inference_for_new_features(self, test_loader):
        self.bertModel.train(mode=False)
        with torch.no_grad():
            for batch in test_loader:
                text_ids, attn_mask, ids, entity_lists = batch
                senti_logits, entity_logits = self.forward(text_ids, attn_mask)
                senti_predicts = senti_logits.cpu().tolist()
                entity_predicts = entity_logits.cpu().tolist()
                pos = (text_ids == self.CLSE_ID)
                entity_nums = pos.sum(dim=1).tolist()
                index = 0
                for i, num in enumerate(entity_nums):
                    yield (ids[i], senti_predicts[i], entity_predicts[index:index + num], entity_lists[i])
                    index += num

