import os
import pathlib
import pickle
import torch
from fastNLP import logger
from torch.utils.tensorboard import SummaryWriter
import nlp_trainers
from args import args
from nlp_model.cnnbilstm import get_model
from utils import init_task, set_seed


def main():
    # 设置随机种子
    if args.seed is not None:
        set_seed(args.seed)

    # 保存参数信息
    i = 0
    while True:
        run_base_dir = pathlib.Path(
            f"{args.log_dir}/{args.exp_name}~try={str(i)}")

        if not run_base_dir.exists():
            os.makedirs(run_base_dir)
            args.exp_name = args.exp_name + f"~try={i}"
            break
        i += 1
    (run_base_dir / "settings.txt").write_text(str(args))
    args.run_base_dir = run_base_dir

    # log参数
    global logger
    logger.add_file(os.path.join(args.run_base_dir, "train.log"))
    logger.info(args)
    logger.info(f"=> Saving data in {run_base_dir}")

    # 对于 Conll2003 数据集选择不同的 baseline
    if args.trainer == "trainer_batch_first":
        run_batch_first()
    elif args.trainer == "trainer_task_first":
        run_task_first()


def run_batch_first():
    writer = SummaryWriter(log_dir=args.run_base_dir)
    trainer = getattr(nlp_trainers, args.trainer)
    # print(f"=> Using trainer {trainer}")

    with open(args.task_lst_name, "rb") as f:
        ptask = pickle.load(f)
        task_lst = ptask["task_lst"]
        vocabs = ptask["vocabs"]
        task_lst = [init_task(task) for task in task_lst]
    # exit()

    for task in task_lst:
        if args.debug:
            task.train_set = task.train_set[:200]
            task.dev_set = task.dev_set[:200]
            task.test_set = task.test_set[:3200]
            args.epochs = 3
        task.init_data_loader(args.batch_size)  # 读取coll03数据

    model = get_model(args, task_lst, vocabs)
    model.to(args.device)

    # # Put the model on the GPU,
    # model = utils.set_gpu(model)
    trainer.train_nlp(model, task_lst, writer, vocabs)

    if args.save:
        torch.save(
            {
                "epoch": args.epochs,
                "arch": args.model,
                "state_dict": model.state_dict(),
                "args": args,
            },
            args.run_base_dir / "final.pt",
        )


def run_task_first():

    # 将三个任务的训练、验证、测试数据一一对应
    with open(args.task_lst_name, "rb") as f:
        ptask = pickle.load(f)
        task_lst = ptask["task_lst"]
        vocabs = ptask["vocabs"]
        task_lst = [init_task(task) for task  in task_lst]

# #### 从这里开始使用sparse sharing的训练代码
#     task_db = task_lst[args.task_id]
#     train_data = task_db.train_set
#     dev_data = task_db.dev_set
#     test_data = task_db.test_set
#     task_name = task_db.task_name
#
#     logger.info("task name: {}, task id: {}".format(task_db.task_name, task_db.task_id))
#     logger.info(
#         "train len {}, dev len {}, test len {}".format(
#             len(train_data), len(dev_data), len(test_data)
#         )
#     )
#
#     model = get_model(args, task_lst, vocabs)
#     import utils
#     from fastNLP import (
#         Trainer,
#         Tester,
#         Callback,
#         LRScheduler,
#         LossInForward,
#         AccuracyMetric,
#         SpanFPreRecMetric,
#         GradientClipCallback,
#     )
#     import fastNLP
#     from metric import YangJieSpanMetric
#     from utils import MetricInForward,get_optim
#
#     if utils.need_acc(task_name):
#         metrics = [AccuracyMetric(target="y"), MetricInForward(val_name="loss")]
#         metric_key = "acc"
#
#     else:
#         metrics = [
#             YangJieSpanMetric(
#                 tag_vocab=vocabs[task_name],
#                 pred="pred",
#                 target="y",
#                 seq_len="seq_len",
#                 encoding_type="bioes" if task_name == "ner" else "bio",
#             ),
#             MetricInForward(val_name="loss"),
#         ]
#         metric_key = "f"
#     logger.info(metrics)
#
#     optim_params = [p for p in model.parameters() if p.requires_grad]
#     utils.get_logger(__name__).debug(len(optim_params))
#     optimizer = get_optim(args.optimizer, optim_params)
#     # optimizer = TriOptim(optimizer, args.n_filters, args.warmup, args.decay)
#     factor = 1.0
#     # print(factor, pruner.cur_rate)
#     for pg in optimizer.param_groups:
#         pg["lr"] = factor * pg["lr"]
#     utils.get_logger(__name__).info(optimizer)
#
#
#     trainer = Trainer(
#         train_data,
#         model,
#         loss=LossInForward(),
#         optimizer=optimizer,
#         metric_key=metric_key,
#         metrics=metrics,
#         print_every=200,
#         batch_size=1,
#         num_workers=4,
#         n_epochs=args.epochs,
#         dev_data=dev_data,
#         save_path=None,
#         sampler=fastNLP.BucketSampler(batch_size=args.batch_size),
#         use_tqdm=False,
#         device="cuda",
#         check_code_level=-1,
#     )
#     res = trainer.train()
#
#     def get_metric(res):
#         if "acc" in res:
#             return "acc", res["acc"]
#         elif "f" in res:
#             return "f", res["f"]
#         for n, v in res.items():
#             if isinstance(v, dict):
#                 ans = get_metric(v)
#                 if ans is not None:
#                     return ans
#         return None
#
#     name, val = get_metric(res)
#
#     tester = Tester(
#         test_data,
#         model,
#         metrics=metrics,
#         batch_size=args.batch_size,
#         num_workers=4,
#         device="cuda",
#         use_tqdm=False,
#     )
#     res = tester.test()
#     logger.info("testing, Result: {}".format(res))
#     name, val = get_metric(res)
#
#
#     #### 到此结束对sparse sharing的代码使用


    for task in task_lst:
        if args.debug:
            task.train_set = task.train_set[:200]
            task.dev_set = task.dev_set[:200]
            task.test_set = task.test_set[:3200]
            args.epochs = 3
        task.init_data_loader(args.batch_size)

    task_db = task_lst[args.task_id]

    writer = SummaryWriter(log_dir=args.run_base_dir)

    logger.info("task name: {}, task id: {}".format(task_db.task_name,
                                                    task_db.task_id))

    logger.info("train len {}, dev len {}, test len {}".format(
        len(task_db.train_set), len(task_db.dev_set), len(task_db.test_set)))

    model = get_model(args, task_lst, vocabs)
    model.to(args.device)

    trainer = getattr(nlp_trainers, args.trainer)
    trainer.train_nlp(model, task_lst, writer, args.task_id, vocabs)

    # 多任务代码格式
    # for i in range(3):
    #     args.task_id = i # todo : 1、该参数没有输入到控制区域；2、判断是否会造成bug
    #     task_db = task_lst[args.task_id]
    #
    #     writer = SummaryWriter(log_dir=args.run_base_dir)
    #
    #     logger.info("task name: {}, task id: {}".format(task_db.task_name,
    #                                                     task_db.task_id))
    #
    #     logger.info("train len {}, dev len {}, test len {}".format(
    #         len(task_db.train_set), len(task_db.dev_set), len(task_db.test_set)))
    #
    #     model = get_model(args, task_lst, vocabs)
    #     model.to(args.device)
    #
    #
    #     trainer = getattr(nlp_trainers, args.trainer)
    #     trainer.train_nlp(model, task_lst, writer, args.task_id)


if __name__ == "__main__":
    main()