# -*- coding: utf-8 -*-
import time

import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from torch.optim.lr_scheduler import StepLR

from myVocab import vocab
from configs import configData
from myPath import *
from myNER.crf import crf_neg_log_likelihood, viterbi_decode
from utils import logger, device
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader

class BiLSTM_CRF(nn.Module):
    def __init__(self, hidden_size = None, lstm_layer = 1, drop_rate = 0.2, dataClass=None):

        super(BiLSTM_CRF, self).__init__()

        # 网络参数
        self.emb_dim = vocab.dim
        self.hidden_size = self.emb_dim if hidden_size is None else hidden_size
        self.lstm_layer = lstm_layer
        self.nclass = len(dataClass.tags)
        self.drop_rate = drop_rate # 所有dropout层使用同一个概率
        self.dataClass = dataClass

        # 网络结构
        self.embedding = self._init_embedding()
        self.embedding.requires_grad_(False)
        self.bilstm = nn.LSTM(self.emb_dim, self.hidden_size,
                              bidirectional=True, num_layers=lstm_layer)
        self.dropout_cls = nn.Dropout(self.drop_rate)
        self.hidden2class = nn.Linear(self.hidden_size*2*self.lstm_layer, self.nclass, bias=False)
        self.transition = nn.Parameter(torch.rand((self.nclass, self.nclass))/10, requires_grad=True)

    def _init_embedding(self):
        if vocab is not None:
            num_embs = len(vocab)
            emb_dim = vocab.dim
            weights = vocab.get_embedding()
            return nn.Embedding(num_embs, emb_dim,
                                padding_idx=vocab.word2index('<pad>'),
                                _weight=torch.from_numpy(weights).float())
        else:
            raise ValueError("pre-trained word2vec model can't be None!")

    def _viterbi_decode(self, feats, lengths):
        paths, scores = [], []
        for feat, length in zip(feats, lengths):
            path, score = viterbi_decode(feat, self.transition)
            paths.append(path[:length])
            scores.append(score)
        return paths, scores, lengths


    def _get_lstm_feats(self, input):
        sents, lengths = input
        sents, lengths = sents.to(device), lengths.to(device) 
        x_emb = self.embedding(sents).transpose(0, 1).to(device) #(T, B, e)
        packed_emb = pack_padded_sequence(x_emb, lengths)

        hiddens, _ = self.bilstm(packed_emb)
        paded_x, lengths = pad_packed_sequence(hiddens)
        paded_x = self.dropout_cls(paded_x).transpose(0, 1) #(B x T x h)
        out = self.hidden2class(paded_x) #(B x T x nclass)
        # out = torch.softmax(out, 2)
        return out, lengths

    def lstm_crf_forward(self, batch_x)->(list, list, torch.Tensor):
        '''
        :param batch_x:
        :return: paths, tag_index in shape(batch_size, lengths)
                scores, list
                lengths
        '''
        feats, lengths = self._get_lstm_feats(batch_x)
        return self._viterbi_decode(feats, lengths)

    def lstm_crf_loss(self, batch_x, batch_y):
        '''
        :param batch_x: (batch of padded sentences, lengths)
        :param batch_y: (batch of labels)
        :return: loss value, refer to
            1. https://blog.csdn.net/geek_hch/article/details/105156117
            2. https://createmomo.github.io/2017/11/11/CRF-Layer-on-the-Top-of-BiLSTM-5/
        '''
        feats, lengths = self._get_lstm_feats(batch_x)
        # global_score = self._get_global_score(feats)
        # sentence_score = self._get_sentence_score(feats, batch_y)
        losses, self.transition = crf_neg_log_likelihood(feats, batch_y, lengths, self.transition)
        return torch.mean(losses)

    def forward(self, batch_x):
        # input : shape(b, src_len, ), padded sentences
        # output : tag_ids(b, src_len), scores(b), lengths(b)
        return self.lstm_crf_forward(batch_x)


    '''
    follow functions based on LSTM ignore the crf layer
    '''
    def lstm_only_forward(self, batch_x):
        return self._get_lstm_feats(batch_x)


    def lstm_only_loss(self, batch_x, batch_y):
        ''' take the hidden states of lstm as tag scores
        :param predict(Tensor): padded, shape(B, T, nclass)
        :param target(list of Tensors): unpadded, shape(B, lengths,)
        :param lengths(Tensor): lengths of real sequence
        :return: mean cross entropy loss value
        '''
        feats, lengths = self._get_lstm_feats(batch_x)

        # without crf
        batch_size = len(feats)
        all_steps = torch.sum(lengths)

        loss = torch.tensor(0, dtype=torch.float)
        scores = torch.stack([feats[i, j, batch_y[i][j]] for i in range(batch_size) for j in range(lengths[i])])
        loss = torch.sum(-torch.log(scores))/all_steps
        return loss

    @staticmethod
    def debugcrf(model, dev_loader, visualizer = None, dataClass=None):
        with torch.no_grad():
            dev_batch = iter(dev_loader).__next__()
            x, y_true, raw_text = dev_batch
            y_pred, scores, lengths = model.lstm_crf_forward(x)

            sample_pred, sample_true, length, text = y_pred[0], y_true[0], lengths[0], raw_text[0]
            tags_pred = list(map(dataClass.tags.__getitem__, sample_pred[:length]))
            tags_true = list(map(dataClass.tags.__getitem__, sample_true[:length]))
            show_pred = ' '.join([text[i] + ':' + tags_pred[i] for i in range(len(tags_pred))])
            show_true = ' '.join([text[i] + ':' + tags_true[i] for i in range(len(tags_true))])

            logger.debug(show_pred)
            logger.debug(show_true)

            T, A = 0.0, 0.0
            for p, t, l in zip(y_pred, y_true, lengths):
                for i in range(l):
                    T += int(p[i] == t[i])
                A += l
            logger.debug(f'acc = {100.0 * T / A:.2f}%')
            return T/A # dev seg_acc

    # train model
    @staticmethod
    def train_lstm_crf(model, train_loader, dev_loader, args,
                       model_save_dir=None,
                       visual_log_dir=None,
                       dataClass=None
                       ):
        logger.info(str(model))
        # 可视化日志
        from datetime import datetime
        current_time = datetime.now().strftime('%b%d_%H-%M-%S')
        visualizer = SummaryWriter(logdir=path.join(visual_log_dir, current_time) +
                                    f'crf_h{model.hidden_size}_l{model.drop_rate}')

        train_iter = 0
        optimizer = torch.optim.Adam(model.parameters(), lr=float(args['--lr']))
        scheduler = StepLR(optimizer, step_size=int(args['--lr-patience']), gamma=float(args['--lr-decay']))
        loss = None
        for epoch in range(int(args['--epoch'])):
            for idx_batch, data_batch in enumerate(train_loader):
                x_train, y_train, rawtext = data_batch

                optimizer.zero_grad()
                x_train = (x_train[0].to(device), x_train[1].to(device))
                y_train = y_train.to(device)
                loss = model.lstm_crf_loss(x_train, y_train)
                loss.backward()
                # clip gradient
                _ = torch.nn.utils.clip_grad_norm_(model.parameters(), float(args['--clip-grad']))
                optimizer.step()
                # lr-decay
                scheduler.step()
                # debug
                if train_iter % 50 == 0:
                    dev_acc = BiLSTM_CRF.debugcrf(model, dev_loader, dataClass=dataClass)
                    visualizer.add_scalar("dev_seg_acc", dev_acc, train_iter)

                train_iter += 1
                logger.info(f'epoch:{epoch} iter:{train_iter} loss={loss.item():.3}')
                visualizer.add_scalar('train-loss', loss.item(), train_iter)
            
            import os
            if not os.path.exists(model_save_dir):
                os.makedirs(model_save_dir)
            save_path = f'{model_save_dir}/{time.strftime("%m%d%H%M")}-{model.hidden_size}-{loss.item():.2}.pt'
            torch.save(model, save_path)
            logger.debug('saved model!')
            eval_loader = dataClass.get_loader('test')
            BiLSTM_CRF.evaluate(model, dataloader=eval_loader, dataClass=dataClass)
            

    @staticmethod
    def debug_lstm_only(model, dev_loader):
        '''validation'''
        with torch.no_grad():
            dev_batch = iter(dev_loader).__next__()
            x, y_true, raw_text = dev_batch
            y_pred, lengths = model(x)

            sample_pred, sample_true, length, text = y_pred[0], y_true[0], lengths[0], raw_text[0]
            tags_pred = list(map(dev_loader.pred2tags, sample_pred[:length]))
            tags_true = list(map(dev_loader.pred2tags, sample_true[:length]))
            show_pred = ' '.join([text[i] + ':' + tags_pred[i] for i in range(len(tags_pred))])
            show_true = ' '.join([text[i] + ':' + tags_true[i] for i in range(len(tags_true))])
            logger.debug(show_pred)
            logger.debug(show_true)

            T, A = 0.0, 0.0
            for p, t, l in zip(y_pred, y_true, lengths):
                T += t[range(l), torch.argmax(p[:l], 1)].sum().item()
                A += l
            logger.debug(f'acc = {100.0 * T / A:.2f}%')

    @staticmethod
    def train_lstm_only(model, train_loader, dev_loader, args):
        train_iter = 0
        optimizer = torch.optim.Adam(model.parameters(), lr=float(args['--lr']))
        scheduler = StepLR(optimizer, step_size=int(args['--lr-patience']), gamma=float(args['--lr-decay']))

        for epoch in range(int(args['--epoch'])):
            for idx_batch, data_batch in enumerate(train_loader):
                x_train, y_train, rawtext = data_batch
                predict, lengths = model(x_train)
                optimizer.zero_grad()
                loss = model.loss(predict, y_train, lengths)
                loss.backward()
                # clip gradient
                _ = torch.nn.utils.clip_grad_norm_(model.parameters(), float(args['--clip-grad']))
                optimizer.step()
                # lr-decay
                scheduler.step()
                # debug
                if train_iter % 20 == 0:
                    BiLSTM_CRF.debug_lstm_only(model, dev_loader)

                train_iter += 1
                logger.info(f'epoch:{epoch} iter:{train_iter} loss={loss.item():.3}')

    # predict
    @staticmethod
    def predict(model, sentence: str, readable = False, dataClass=None):
        with torch.no_grad():
            sent_ids_tensor = dataClass.sentence2ids(sentence).view(1,-1)
            sent_len_tensor = torch.tensor([sent_ids_tensor.shape[1]], dtype=torch.int)
            batch_x = (sent_ids_tensor.to(device), sent_len_tensor.to(device))
            paths, scores, _ = model.lstm_crf_forward(batch_x)
            if not readable:
                return paths[0], scores[0]
            else:
                tags_pred = list(map(dataClass.tags.__getitem__, paths[0]))
                entities = {}

                tmp_info = ['', [0, 0]]
                tmp_enti = ''
                for idx, tag in enumerate(tags_pred):
                    if tmp_enti != '' and (tag == 'O' or
                                           tag[0] == 'B' or
                                           (tag[0] == 'I' and tag[2:] != tmp_info[0])):

                        tmp_info[1][1] = idx
                        entities[tmp_enti] = tmp_info
                        tmp_info = ['', [0, 0]]
                        tmp_enti = ''
                    if tag[0] == 'B':
                        tmp_enti += sentence[idx]
                        tmp_info[0] = tag[2:]
                        tmp_info[1][0] = idx
                    if tag[0] == 'I' and tag[2:] == tmp_info[0]:
                        tmp_enti += sentence[idx]
                if tmp_enti != '':
                    tmp_info[1][1] = len(sentence)
                    entities[tmp_enti] = tmp_info
                return entities


    @staticmethod
    def evaluate(model=None, dataloader=None, model_dir=DULE_NER_MODEL, dataClass=None):
        from myNER.evaluation import Evaluation
        evaluator = Evaluation()
        
        if model is None:
            model_path = f'{model_dir}/{max(os.listdir(model_dir))}'
            model = torch.load(model_path)
            logger.info('eval with model : ' + model_path)
        if dataloader is None:
            raise Exception("dataloader can't be None!")

        with torch.no_grad():
            from tqdm import tqdm
            for x, y_true, raw_text in tqdm(dataloader):
                y_pred, scores, lengths = model.lstm_crf_forward(x)
                for gold, pred, seq_len, text in zip(y_true, y_pred, lengths, raw_text):
                    evaluator.add_sample(gold, pred, seq_len, dataClass=dataClass, text=text)
        logger.info(str(evaluator.eval_scores()))


if __name__ == '__main__':
    # net = BiLSTM_CRF(300)
    import os
    net = torch.load(f'{CLUENER_MODEL}/{max(os.listdir(CLUENER_MODEL))}')
    print(net)
    # from myNER.myDataset import CLUE

    # text = "唐佳老老实实的做了差不多20年的武术指导，才在1982年转做了导演，但是也只导演过《少林传人》、《少林子弟》和《三闯少林》三部片，但是在每部片里都玩出了不少花样，《少林传人》里的十八罗汉阵和《三闯少林》里的板凳阵尤为经典"
    # tag_ids, score = BiLSTM_CRF.predict(net, text)
    # tags = list(map(CLUE.tags.__getitem__, tag_ids))
    # show_pred = ' '.join([text[i] + ':' + tags[i] for i in range(len(tags))])
    # print(show_pred, score)
    # from torch.utils.data import DataLoader
    # dev_loader = DataLoader(CLUE(path.join(CLUENER_DIR, 'dev.json')),
                            # collate_fn=CLUE.collate_fn, batch_size=30,
                            # num_workers=1, shuffle=True)
    
    # BiLSTM_CRF.evaluate(net, dev_loader)









