# -*- coding: utf-8 -*-

import time
import torch
import torch.nn as nn
from torch import optim
import numpy as np
from tensorboardX import SummaryWriter

from models.model.lstm_softmax import LSTM_Softmax
from load_data import TrainIteratorWrapper, TestIteratorWrapper
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix, accuracy_score
from typing import Dict

weight = torch.FloatTensor(
            [0.018705759119383276, 0.38974488334075025, 0.3608748919821761, 0.012895970404657174,
             0.1451864217742861, 0.02437937135158988, 0.03935933516681723, 0.008853366860340111])


def init_network(model, method='xavier', exclude='embedding'):
    # 模型权重初始化，默认xavier
    for name, w in model.named_parameters():
        if exclude not in name:
            if 'weight' in name:
                if w.dim() < 2:
                    nn.init.constant_(w, 0)
                else:
                    if method == 'xavier':
                        # ValueError: Fan in and fan out can not be computed for tensor with fewer than 2 dimensions
                        nn.init.xavier_normal_(w)
                    elif method == 'kaiming':
                        nn.init.kaiming_normal_(w)
                    else:
                        nn.init.normal_(w)
            elif 'bias' in name:
                nn.init.constant_(w, 0)
            else:
                pass


def train(model: LSTM_Softmax,
          train_iter: TrainIteratorWrapper,
          eval_iter: TrainIteratorWrapper,
          lr: float,
          epochs: int,
          idx2char: Dict[int, str],
          idx2tag: Dict[int, str],
          global_step: int = 0,
          save_path: str = "./checkpoint/model.epoch{}"):
    optimizer = optim.Adam(model.parameters(), lr)
    model.train()

    global weight
    start_time = time.time()
    eval_best_loss = float('inf')
    last_improve = 0  # 记录上次验证集loss下降的batch数
    flag = False  # 记录是否很久没有效果提升
    writer = SummaryWriter("./log/" + time.strftime('%m-%d_%H.%M', time.localtime()))
    msg = 'Iter: {0:>6},  Train Loss: {1:>5.2},  Train Acc: {2:>6.2%},  Val Loss: {3:>5.2},  Val Acc: {4:>6.2%},  Time: {5:.2f}s {6}'
    total_batches = global_step
    for epoch in range(1, epochs + 1):
        print("[Epoch]: %d/%d" % (epoch, epochs))
        for entities, tokens, types, masks, _ in train_iter:
            if weight.device != entities.device:
                weight = weight.to(entities.device)
            optimizer.zero_grad()
            probs = model(entities, tokens, masks)
            loss = torch.nn.functional.cross_entropy(probs, types, weight)
            loss.backward()
            optimizer.step()

            total_batches += 1
            if total_batches % 20 == 0:
                _, pred_types = torch.max(probs, dim=1)
                gold_types, pred_types = types.cpu().numpy(), pred_types.cpu().numpy()
                train_acc = accuracy_score(gold_types, pred_types)
                eval_loss, eval_acc = eval(model, eval_iter, idx2char, idx2tag, False)
                if eval_loss < eval_best_loss:
                    eval_best_loss = eval_loss
                    torch.save(model.state_dict(), save_path.format(total_batches))
                    last_improve = total_batches
                    improve = '*'
                else:
                    improve = ''
                print(msg.format(total_batches, loss, train_acc, eval_loss, eval_acc,time.time() - start_time, improve))
                model.train()
                writer.add_scalar("loss/train", loss, total_batches)
                writer.add_scalar("loss/dev", eval_loss, total_batches)
                writer.add_scalar("acc/train", train_acc, total_batches)
                writer.add_scalar("acc/dev", eval_acc, total_batches)

            if total_batches - last_improve > 1000:  # 验证集loss超过1000batch没下降，结束训练
                print("No optimization for a long time, auto-stopping...")
                flag = True
                break
            pass
        if flag:
            break
        pass
    writer.close()
    torch.save(model.state_dict(), save_path.format("s"))
    eval(model, eval_iter, idx2char, idx2tag, True)
    pass


def __write_predict(entities, gold_types, pred_types, masks, idx2char, idx2tag):
    with open("predict.txt", 'a+') as f:
        for entity, gold, pred, mask in zip(entities, gold_types, pred_types, masks):
            seq_len = torch.sum(mask, dim=0).cpu().long()
            entity = "".join([idx2char[idx] for _, idx in zip(range(seq_len), entity)])
            f.write("{}\t{}\t{}\n".format(entity, idx2tag[gold], idx2tag[pred]))


def eval(model: LSTM_Softmax,
         eval_iter: TrainIteratorWrapper,
         idx2char: Dict[int, str],
         idx2tag: Dict[int, str],
         printable=True):
    global weight
    model.eval()
    losses = 0
    golds = np.array([], dtype=int)
    preds = np.array([], dtype=int)
    batches = 0

    with torch.no_grad():
        # 需要加上 `torch.no_grad()`, 不然 GPU 不够用...
        # RuntimeError: CUDA out of memory. Tried to allocate 20.00 MiB (GPU 0; 10.76 GiB total capacity; 9.63 GiB already allocated; 11.06 MiB free; 9.97 GiB reserved in total by PyTorch)
        for entities, tokens, tags, masks, _ in eval_iter:
            if weight.device != entities.device:
                weight = weight.to(entities.device)
            probs = model(entities, tokens, masks)

            loss = torch.nn.functional.cross_entropy(probs, tags, weight)
            _, pred_types = torch.max(probs, dim=1)
            gold_types, pred_types = tags.cpu().numpy(), pred_types.cpu().numpy()
            __write_predict(entities.cpu().numpy(), gold_types, pred_types, masks, idx2char, idx2tag)

            losses += loss
            golds = np.append(golds, gold_types)
            preds = np.append(preds, pred_types)
            batches += 1

    pred_acc = accuracy_score(golds, preds)

    if printable:
        report = classification_report(golds, preds, labels=[x for x in range(len(idx2tag))], target_names=list(idx2tag.values()), digits=4)
        confusion = confusion_matrix(golds, preds)
        msg = 'Eval Loss: {0:>5.2},  Eval Acc: {1:>6.2%}'
        print("="*40)
        print(msg.format(losses/batches, pred_acc))
        print('-'*40, '\n', "Classification report:")
        print(report)
        print('-'*40, '\n', "Confusion matrix:")
        print(confusion)

    return losses / batches, pred_acc


def test(model: LSTM_Softmax,
         test_iter: TestIteratorWrapper,
         idx2char: Dict[int, str],
         idx2tag: Dict[int, str],
         out_validate: str,
         test_file: str):
    model.eval()
    total_batches = 0
    entity_types = {}
    for entities, tokens, _, masks, raw in test_iter:
        prob = model(entities, tokens, masks)
        _, preds = torch.max(prob, dim=1)
        preds = preds.cpu().numpy()
        for entity, pred in zip(raw, preds):
            entity_types[entity] = idx2tag[pred]

        total_batches += 1
        print("\rprocessed %d." % total_batches, end="")

    with open(test_file, 'r') as f, open(out_validate, 'w') as fo:
        for line in f:
            line = line.strip()
            fo.write("{}\t{}\n".format(line, entity_types[line]))

    print(' done. ')
    pass


def infer(model,
          entity: str,
          char_vocabs: Dict[str, int],
          word_vocabs: Dict[str, int],
          idx2char: Dict[int, str],
          idx2tag: Dict[int, str],
          is_for_bert: bool = False,
          max_len: int = 32,
          unk_idx: int = 1,
          pad_idx: int = 0,
          device: torch.device = torch.device("cuda:0")):
    from load_data import __to_tensor, CLS, SEP
    import jieba
    tokenizer = lambda x: jieba.cut(x)
    model.eval()
    entity_idx = [char_vocabs.get(char, unk_idx) for char in entity]
    token = [word_vocabs.get(tk, unk_idx) for tk in tokenizer(entity) for _ in tk]
    if is_for_bert:
        entity_idx = [char_vocabs.get(CLS)] + entity_idx
        token = [word_vocabs.get(CLS)] + token
    if len(entity_idx) < max_len:
        entity_idx.extend([pad_idx] * (max_len - len(entity_idx) - 1))
        entity_idx.extend([char_vocabs.get(SEP)]) if is_for_bert else entity_idx.extend([pad_idx])
        token.extend([pad_idx] * (max_len - len(token) - 1))
        token.extend([word_vocabs.get(SEP)]) if is_for_bert else token.extend([pad_idx])
    else:
        entity_idx = entity_idx[(max_len - len(entity_idx)):] if not is_for_bert \
            else [char_vocabs.get(CLS)] + entity_idx[(max_len - len(entity_idx) + 2):] + [char_vocabs.get(SEP)]
        token = token[(len(token) - max_len):] if not is_for_bert \
            else ([word_vocabs.get(CLS)] + token[(len(token) - max_len + 2):] + [word_vocabs.get(SEP)])
    entities, tokens, types, masks, raw = __to_tensor([entity_idx], [token], None, [entity], pad_idx, device)
    prob = model(entities, tokens, masks)
    _, preds = torch.max(prob, dim=1)
    preds = preds.cpu().numpy()
    print("entity: {}, type: {}".format(entity, idx2tag[preds[0]]))



