# -*- coding: utf-8 -*-

import time
import torch
import torch.nn as nn
from torch import optim
from tensorboardX import SummaryWriter

from model.lstm_crf import LSTM_CRF
from load_data import TrainIteratorWrapper, TestIteratorWrapper
from utils import prf
from utils import tagging2ner, tagging2sentence
from typing import Dict


def init_network(model, method='xavier', exclude='embedding'):
    # 模型权重初始化，默认xavier
    for name, w in model.named_parameters():
        if exclude not in name:
            if 'weight' in name:
                if method == 'xavier':
                    # ValueError: Fan in and fan out can not be computed for tensor with fewer than 2 dimensions
                    nn.init.xavier_normal_(w)
                elif method == 'kaiming':
                    nn.init.kaiming_normal_(w)
                else:
                    nn.init.normal_(w)
            elif 'bias' in name:
                nn.init.constant_(w, 0)
            else:
                pass


def train(model: LSTM_CRF,
          train_iter: TrainIteratorWrapper,
          eval_iter: TrainIteratorWrapper,
          lr: float,
          epochs: int,
          idx2char: Dict[int, str],
          idx2tag: Dict[int, str],
          global_step: int = 0,
          save_path: str = "./checkpoint/models.epoch{}"):
    optimizer = optim.Adam(model.parameters(), lr)
    model.train()

    start_time = time.time()
    writer = SummaryWriter("./log/" + time.strftime('%m-%d_%H.%M', time.localtime()))
    total_batches = global_step
    for epoch in range(1, epochs + 1):
        print("[Epoch]: %d/%d" % (epoch, epochs))
        for sent, tags, masks in train_iter:
            optimizer.zero_grad()
            loss = model(sent, tags, masks)
            loss.backward()
            optimizer.step()

            total_batches += 1
            if total_batches % 20 == 0:
                res = eval(model, eval_iter, idx2char, idx2tag, True)
                print("Iter: {}, loss: {:.2f}, eval loss: {:.2f}, time: {:.2f}s".format(total_batches, loss, res[0],
                                                                                        time.time() - start_time))
                model.train()
                writer.add_scalar("loss/train", loss, total_batches)
                writer.add_scalar("loss/dev", res[0], total_batches)

            if total_batches % 1000 == 0:
                torch.save(model.state_dict(), save_path.format(total_batches))
            pass
        pass
    torch.save(model.state_dict(), save_path.format("s"))
    pass


def eval(model: LSTM_CRF,
         eval_iter: TrainIteratorWrapper,
         idx2char: Dict[int, str],
         idx2tag: Dict[int, str],
         printable=True):
    model.eval()
    start_time = time.time()
    sents, tags, masks = next(eval_iter)
    loss = model(sents, tags, masks)
    predict_tags = model.decode(sents, masks)
    ner_p, ner_r, ner_f1, type_p, type_r, type_f1 = prf(sents, predict_tags, tags, masks, idx2char, idx2tag)
    msg = "Eval: loss: {:.2f}; take time: {:.2f}s ; " \
          "\tner p:{:.2f}, ner r: {:.2f}, ner f1: {:.2f};" \
          "\ttype p: {:.2f} type r: {:.2f} type f1: {:.2f}"
    lens = len(sents)
    if printable:
        print(msg.format(loss / lens, time.time() - start_time,
                         ner_p, ner_r, ner_f1,
                         type_p, type_r, type_f1))
    return loss / lens, ner_p, ner_r, ner_f1, type_p, type_r, type_f1
    pass


def test(model: LSTM_CRF,
         test_iter: TestIteratorWrapper,
         idx2char: Dict[int, str],
         idx2tag: Dict[int, str],
         out_ners: str,
         out_text: str):
    model.eval()
    start_time = time.time()
    total_batches = 0
    fo_ners, fo_text = open(out_ners, 'w'), open(out_text, 'w')
    for sents, tags, masks in test_iter:
        predict_tags = model.decode(sents, masks)
        sents, masks = sents.cpu().numpy(), masks.cpu()
        pred_sentences = tagging2sentence(sents, predict_tags, masks, idx2char, idx2tag)
        pred_ners = tagging2ner(sents, predict_tags, masks, idx2char, idx2tag)
        for sentence in pred_sentences:
            fo_text.write(sentence + "\n")
        for ners in pred_ners:
            for ner in ners:
                fo_ners.write("{}\t{}\n".format(ner[0], ner[2]))
        total_batches += 1
        print("\rProcessed {} batches, take {:.2}s".format(total_batches, time.time()-start_time), end="")
    print(" done. ")
    fo_ners.close()
    fo_text.close()
    pass


def post_test(pred_ners, entity_validate, out_validate):
    UNK = "UNK"
    type_dict = {"医学专科": "SPE", "检查科目": "CHK", "药物": "MED", "症状": "SYM", "疾病": "DIS", "细菌": "BAC", "病毒": "VIR",
                 "NoneType": "Non", UNK: UNK}
    type2text = {}
    for key, value in type_dict.items():
        type2text[value] = key

    print('processing post test ... ', end='')
    entities = {}
    with open(entity_validate, 'r') as f:
        for line in f:
            line = line.strip()
            if line is None or line == "":
                continue
            entities[line] = type_dict['NoneType']

    tmp_entities = {}
    with open(pred_ners, 'r') as f:
        for line in f:
            line = line.strip()
            if line is None or line == "":
                continue
            if len(line.split()) < 2:
                continue
            entity, label = line.split()
            tmp_entities[entity] = label
            if entity in entities.keys():
                entities.update({entity: type2text[label]})

    print("\nintersection: ", len(set(entities.keys()).intersection(set(tmp_entities.keys()))))

    with open(out_validate, 'w') as f:
        for entity, label in entities.items():
            f.write("{}\t{}\n".format(entity, label))

    print(' done. ')
    pass
