'''
train.py 用于训练模型
'''

import os
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from config import args

from dataset.MNERDataset import MNERDataset, collate_fn
# from model.Model import MMNerModel
from model.Model_extract_phrase_test1 import MMNerModel
from utils.metric import *
from model.encoders.ExtractNounPhrase_v2 import *


def train(args):
    args.logging.info("Create DataLoader.........")
    train_dataset = MNERDataset(etype="train", args=args)
    train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
    args.logging.info("number of train instances: {}".format(len(train_dataset)))
    val_dataset = MNERDataset(etype="val", args=args)
    val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
    args.logging.info("number of val instances: {}".format(len(val_dataset)))
    test_dataset = MNERDataset(etype="test", args=args)
    test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
    args.logging.info("number of test instances: {}".format(len(test_dataset)))
    args.logging.info("")

    args.logging.info("Create model.........")
    model = MMNerModel(args)
    model = model.to(args.device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 2, gamma=0.8)

    res = {}
    res["best_f1"] = -1.0
    res["epoch"] = -1

    args.logging.info("Start training........")
    for epoch in range(args.train.max_epochs):
        model.train()
        for i, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader), desc="Training"):        # getitem+collate_fn
            optimizer.zero_grad()
            loss = model.log_likelihood(batch)
            loss.backward()
            optimizer.step()

            if i % args.train.interval == 0:
                args.logging.info("EPOCH: {} Step: {} Loss: {}".format(epoch, i, loss.data))

        scheduler.step()
        predict(epoch, model, val_dataloader, mode="val", res=res)
        to_save = predict(epoch, model, test_dataloader, mode="test", res=res)
        if to_save:
            save_model(model, os.path.join(args.dataset.ckpt_dir, "model_{}.pt".format(epoch)))
    args.logging.info("================== train done! ================" + "\n")


def save_model(model, model_path='./model_{}.{}_{}.{}.pt'.format(args.now.month, args.now.day, args.now.hour, args.now.minute)):
    # torch.save(model.state_dict(), model_path)
    args.logging.info("Current Best mmner model has beed saved!" + "\n")


def predict(epoch, model, dataloader, mode="val", res=None):
    model.eval()
    with torch.no_grad():
        file_dir = os.path.join(args.dataset.save_dir, mode)
        file_path = args.dataset.predict_file.format(mode, epoch)
        if not os.path.exists(file_dir):
            os.makedirs(file_dir)
        with open(file_path, "w", encoding="utf-8") as fw:
            for i, batch in tqdm(enumerate(dataloader), total=len(dataloader), desc="Predicting"):
                output = model(batch)

                # write into file
                for idx, pre_seq in enumerate(output):
                    ground_seq = batch["label"][idx]
                    for pos, (pre_idx, ground_idx) in enumerate(zip(pre_seq, ground_seq)):
                        if ground_idx == args.train.tag2idx["PAD"] or ground_idx == args.train.tag2idx["X"] or ground_idx == args.train.tag2idx["CLS"] or ground_idx == args.train.tag2idx["SEP"]:
                            continue
                        else:
                            predict_tag = args.train.idx2tag[str(pre_idx)] if args.train.idx2tag[str(pre_idx)] not in [
                                "PAD", "X", "CLS", "SEP"] else "O"
                            true_tag = args.train.idx2tag[str(ground_idx.data.item())]
                            line = "{}\t{}\t{}\n".format(batch["Bert_input_tokens"][idx][pos], predict_tag, true_tag)
                            fw.write(line)
        args.logging.info("=============={} -> {} epoch eval done=================".format(mode, epoch))
        cur_f1 = evaluate_pred_file(file_path, args.train.tag2idx, args.logging)
        to_save = False
        if mode == "test":
            if res["best_f1"] < cur_f1:
                res["best_f1"] = cur_f1
                res["epoch"] = epoch
                to_save = True
            args.logging.info("current best f1: {}, epoch: {} \n".format(res["best_f1"], res["epoch"]))
        return to_save


def test(args):
    args.logging.info("test。。。。")


def main():
    # 通用性
    assert args.dataset.type in ['none', 'image', 'video']
    if args.dataset.type == 'none':
        args.logging.info("input data: text!" + "\n")
    elif args.dataset.type == 'image':
        args.logging.info("input data: text + image!" + "\n")
    elif args.dataset.type == 'video':
        args.logging.info("input data: text + video!" + "\n")

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)

    if not os.path.exists(os.path.join(args.dataset.data_dir, args.dataset.phrase.format('test', args.extract_algorithm))):
        save_Phrase_info(args)         # algorithm = "StanfordCoreNLP" / "Spacy"

    if args.do_train:
        train(args)
    elif args.do_test:
        test(args)
    else:
        raise ValueError('At least one of `do_train`, `do_test` must be True.')


if __name__ == '__main__':
    main()
    # nlp_close()