import pickle
import torch
from torch import optim
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils import clip_grad_norm_
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from model.LSTM import NER_LSTM
from model.LSTM_CRF import NER_LSTM_CRF
from model.BERT_LSTM_CRF import NER_BERT_LSTM_CRF
import config
from tqdm import tqdm
from utils import *


class NERDataset(Dataset):
    def __init__(self, X, Y):
        # 真实数据
        self.data = [{'x': X[i], 'y': Y[i]} for i in range(X.shape[0])]

    def __getitem__(self, index):
        return self.data[index]

    def __len__(self):
        return len(self.data)


def get_preds_and_labels(y_true, predict):
    """
     统计非0的，也就是真实标签的长度
    """
    leng = []
    preds, labels = [], []
    for item in y_true.cpu():
        tmp = []
        for j in item:
            if j.item() > 0:
                tmp.append(j.item())
        leng.append(tmp)

    for index, item in enumerate(predict):
        preds += item[:len(leng[index])]

    for index, item in enumerate(y_true.tolist()):
        labels += item[:len(leng[index])]

    return preds, labels


def train(model, model_name, train_dataloader, valid_dataloader):
    model = model.to(config.device)

    # # 损失函数
    # criterion = nn.CrossEntropyLoss(ignore_index=0)

    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)

    best_accuracy = 0.0
    for epoch in range(config.max_epoch):
        model.train()

        epoch_loss = 0.0
        epoch_accuracy = 0.0
        epoch_preds = []
        epoch_labels = []

        process_bar = tqdm(total=len(train_dataloader))
        for step, batch in enumerate(train_dataloader):
            optimizer.zero_grad()
            X = batch['x']
            y = batch['y']

            X = X.to(config.device)
            y = y.to(config.device)

            predict = model(X)

            # 对数似然损失
            loss = model.log_likehood(X, y)

            epoch_loss += loss.item()
            loss.backward()

            preds, labels = get_preds_and_labels(y, predict)

            epoch_preds += preds
            epoch_labels += labels

            # 梯度裁剪
            clip_grad_norm_(model.parameters(), max_norm=10)

            optimizer.step()

            accuracy = accuracy_score(labels, preds)

            process_bar.set_description(
                f'train loss::{format(loss.item(), ".4f")}, train accuracy:{format(accuracy, ".4f")}\t-->'
            )
            process_bar.update(1)

            # if step % config.print_freq == 0:
            #     print(
            #         'epoch:', epoch,
            #         'step:', step,
            #         'train loss:', round(loss.item(), 3),
            #         'train accuracy: ', round(accuracy, 3)
            #     )

        del process_bar

        epoch_accuracy = accuracy_score(epoch_labels, epoch_preds)
        epoch_loss = epoch_loss / len(train_dataloader)
        # precision = precision_score(epoch_labels, epoch_preds, average='macro')
        # recall = recall_score(epoch_labels, epoch_preds, average='macro')
        # f1 = f1_score(epoch_labels, epoch_preds, average='macro')

        # print(
        #     'epoch:', epoch,
        #     'train loss:', format(epoch_loss, '.3f'),
        #     'train accuracy: ', epoch_accuracy
        # )
        # print(f"epoch: {epoch}, train loss: {format(epoch_loss, '.3f')}, train accuracy: {epoch_accuracy}")

        epoch_loss = 0.0
        epoch_accuracy = 0.0
        epoch_preds = []
        epoch_labels = []

        model.eval()
        y_pred, y_true = [], []
        for step, batch in enumerate(valid_dataloader):
            X = batch['x']
            y = batch['y']

            X = X.to(config.device)
            y = y.to(config.device)

            predict = model(X)
            y_pred += predict
            y_true += y.cpu().numpy().tolist()

            # # 对数似然损失
            # loss = model.log_likehood(X, y)
            #
            # epoch_loss += loss.item()
            #
            # preds, labels = get_preds_and_labels(y, predict)
            #
            # epoch_preds += preds
            # epoch_labels += labels

            # accuracy = accuracy_score(labels, preds)

            # if step % 20 == 0:
            #     print('epoch:', epoch, 'step:', step, 'valid loss:', round(loss.item(), 3),
            #           'valid accuracy: ', round(accuracy, 3))

        xxx = {key: value.replace('_', '-') for key, value in id2tag.to_dict().items()}
        y_pred = [[xxx[__] for __ in _] for _ in y_pred]
        y_true = [[xxx[__] for __ in _] for _ in y_true]
        f1 = f1_score(y_true, y_pred, suffix=False)
        p = precision_score(y_true, y_pred, suffix=False)
        r = recall_score(y_true, y_pred, suffix=False)
        acc = accuracy_score(y_true, y_pred)
        print(
            "\nf1_score: {:.4f}, precision_score: {:.4f}, recall_score: {:.4f}, accuracy_score: {:.4f}".format(f1, p, r,
                                                                                                               acc))
        print(classification_report(y_true, y_pred, digits=4, suffix=False))

        # epoch_accuracy = accuracy_score(epoch_labels, epoch_preds)
        # epoch_loss = epoch_loss / len(valid_dataloader)
        # precision = precision_score(epoch_labels, epoch_preds, average='macro')
        # recall = recall_score(epoch_labels, epoch_preds, average='macro')
        # f1 = f1_score(epoch_labels, epoch_preds, average='macro')
        #
        # print(
        #     'epoch:', epoch,
        #     'valid loss:', format(epoch_loss, '.4f'),
        #     'valid accuracy: ', format(epoch_accuracy, '.4f')
        # )
        if acc > best_accuracy:
            best_accuracy = acc
            print('Save best valid accuracy:', best_accuracy)
            torch.save(model.state_dict(), config.root_path + '/model/'
                       + model_name.lower() + '.pkl')


if __name__ == '__main__':
    # 加载数据
    with open(config.pickle_path, 'rb') as inp:
        word2id = pickle.load(inp)
        id2word = pickle.load(inp)
        tag2id = pickle.load(inp)
        id2tag = pickle.load(inp)
        x_train = pickle.load(inp)
        y_train = pickle.load(inp)
        x_test = pickle.load(inp)
        y_test = pickle.load(inp)
        x_valid = pickle.load(inp)
        y_valid = pickle.load(inp)

    # input(type(x_test))

    print("train data len:", len(x_train))
    print("valid data len:", len(x_valid))

    train_dataset = NERDataset(x_train, y_train)
    valid_dataset = NERDataset(x_valid, y_valid)

    _train_dataloader = DataLoader(
        dataset=train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_workers
    )

    _valid_dataloader = DataLoader(
        dataset=valid_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_workers
    )

    _models = {
        'NER_LSTM': NER_LSTM,
        'NER_LSTM_CRF': NER_LSTM_CRF,
        'NER_BERT_LSTM_CRF': NER_BERT_LSTM_CRF
    }

    _models = _models[config.model_name](
        embedding_dim=config.embedding_dim,
        hidden_dim=config.hidden_dim,
        dropout=config.dropout,
        word2id=word2id,
        tag2id=tag2id
    )
    train(_models, config.model_name, _train_dataloader, _valid_dataloader)
