# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import time
import os
from functools import partial

import paddle
from paddle.utils.download import get_path_from_url
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import AutoTokenizer
from paddlenlp.metrics import SpanEvaluator
from paddlenlp.utils.log import logger

from model import UIE
from evaluate import evaluate
from utils import set_seed, convert_example, reader, MODEL_MAP, create_data_loader



def do_train():
    paddle.set_device(args.device)
    rank = paddle.distributed.get_rank()
    if paddle.distributed.get_world_size() > 1:
        paddle.distributed.init_parallel_env()

    set_seed(args.seed)

    resource_file_urls = MODEL_MAP[args.model]['resource_file_urls']

    logger.info("Downloading resource files...")
    for key, val in resource_file_urls.items():
        file_path = os.path.join(args.model, key)
        if not os.path.exists(file_path):
            get_path_from_url(val, args.model)

    tokenizer = AutoTokenizer.from_pretrained(args.model)
    model = UIE.from_pretrained(args.model)

    train_ds = load_dataset(reader,
                            data_path=args.train_path,
                            max_seq_len=args.max_seq_len,
                            lazy=False)
    dev_ds = load_dataset(reader,
                          data_path=args.dev_path,
                          max_seq_len=args.max_seq_len,
                          lazy=False)

    trans_fn = partial(convert_example,
                       tokenizer=tokenizer,
                       max_seq_len=args.max_seq_len)

    train_data_loader = create_data_loader(train_ds,
                                           mode="train",
                                           batch_size=args.batch_size,
                                           trans_fn=trans_fn)
    dev_data_loader = create_data_loader(dev_ds,
                                         mode="dev",
                                         batch_size=args.batch_size,
                                         trans_fn=trans_fn)

    if args.init_from_ckpt and os.path.isfile(args.init_from_ckpt):
        state_dict = paddle.load(args.init_from_ckpt)
        model.set_dict(state_dict)

    if paddle.distributed.get_world_size() > 1:
        model = paddle.DataParallel(model)

    optimizer = paddle.optimizer.AdamW(learning_rate=args.learning_rate,
                                       parameters=model.parameters())

    criterion = paddle.nn.BCELoss()
    metric = SpanEvaluator()

    loss_list = []
    global_step = 0
    best_f1 = 0
    tic_train = time.time()
    for epoch in range(1, args.num_epochs + 1):
        for batch in train_data_loader:
            # todo 模型输入：
            #  batch的获取在 utils.py 的 convert_example(xx) 方法中实现。
            #  该方法底层主要通过以下类实现编码encode：
            #  （1）site-packages\paddlenlp\transformers\tokenizer_utils.py中的_batch_encode_plus(xx)方法；
            #  （2）site-packages\paddlenlp\transformers\tokenizer_utils_base.py中的prepare_for_model(xx)方法。

            #  在以下基础上根据 max_len截断或补0：
            #  input_ids = cls + prompt + sep + content + sep。 记录实体的属性值（不是实体，如张三的属性是人名，这里记录的是人名，不是张三）+文本描述
            #  token_type_ids = [0]*len(cls + prompt + sep) + [1]*len(content + sep)
            #  att_mask = [1]*len(input_ids)
            #  pos_ids = range(input_ids)

            #  todo 样本标注结构举例
            #   {
            #       "content": "网易公司首席架构设计师,丁磊1997年6月创立网易公司,将网易从一个十几个人的私企发展到今天拥有近3000员工在美国公开上市的知名互联网技术企业。",
            #       "result_list": [{"text": "网易公司", "start": 0, "end": 4}, {"text": "网易公司", "start": 23, "end": 27}],
            #       "prompt": "公司"
            #   }
            #  start_ids = 当前标注样本结构里result_list中所有 text 的 start 处=1，其他处=0。 记录实体的开始位置
            #  end_ids = 当前标注样本结构里result_list中所有 text 的 end 处=1，其他处=0 。 记录实体的结束位置
            input_ids, token_type_ids, att_mask, pos_ids, start_ids, end_ids = batch

            # todo 模型输出： 这里batch_size=8,max_len=30
            #  start_prob -- 输出每个样本里每个字作为起始位置的得分，每个样本输出shape=[1,max_len]，按batch后shape=[batch_size, max_len]
            #  end_prob -- 输出每个样本里每个字作为结束位置的概率，每个样本输出shape=[1,max_len]，按batch后shape=[batch_size, max_len]
            #  例如：Tensor(shape=[8, 30], dtype=float32, place=Place(gpu:0), stop_gradient=False,
            #        [[0.00000031, 0.00000129, 0.00000024, 0.00000031, 0.00000042, 0.00070048,
            #          0.00000877, 0.00002038, 0.00000138, 0.00001625, 0.00000029, 0.00008086,
            #          0.00000202, 0.00000990, 0.00000406, 0.00016828, 0.00001869, 0.00021678,
            #          0.00000937, 0.00046841, 0.00002430, 0.00000023, 0.00006322, 0.00000595,
            #          0.00000095, 0.00000166, 0.00000030, 0.00000033, 0.00000023, 0.00000073],
            #         [0.00000190, 0.00000188, 0.00000061, 0.00000031, 0.00000152, 0.00057717,
            #          0.00000707, 0.00004766, 0.00000537, 0.00017263, 0.00000520, 0.00002636,
            #          0.00000283, 0.00066136, 0.00001277, 0.00100498, 0.00003792, 0.00072994,
            #          0.00085996, 0.00004832, 0.00000354, 0.00000241, 0.00000104, 0.00000189,
            #          0.00000262, 0.00000088, 0.00000112, 0.00000747, 0.00000136, 0.00000190],
            #         [0.00000082, 0.00000135, 0.00000011, 0.00000184, 0.00000276, 0.01416136,
            #          0.00006575, 0.00002398, 0.00433168, 0.00007143, 0.00002933, 0.00001502,
            #          0.00000795, 0.00002079, 0.00000122, 0.00001549, 0.00000252, 0.00002578,
            #          0.00000451, 0.00000495, 0.00000058, 0.00000076, 0.00000072, 0.00000065,
            #          0.00000183, 0.00000128, 0.00000089, 0.00000143, 0.00000092, 0.00000059],
            #         [0.00000310, 0.00000056, 0.00000012, 0.00000066, 0.98270828, 0.00001563,
            #          0.00001566, 0.00013158, 0.00000047, 0.00003079, 0.00000261, 0.00002862,
            #          0.00000128, 0.00000287, 0.00000035, 0.00000161, 0.00000160, 0.00000046,
            #          0.00000067, 0.00000049, 0.00000047, 0.00000088, 0.00000070, 0.00000102,
            #          0.00000038, 0.00000045, 0.00000058, 0.00000048, 0.00000076, 0.00000086],
            #         [0.00000288, 0.00000049, 0.00000015, 0.00000016, 0.00000197, 0.00002671,
            #          0.00000357, 0.00000193, 0.00000425, 0.00000676, 0.00000180, 0.00009420,
            #          0.05136376, 0.00011208, 0.00007415, 0.02065694, 0.00003858, 0.00000352,
            #          0.00000202, 0.00004982, 0.00000858, 0.00003040, 0.00002816, 0.00000214,
            #          0.00000188, 0.00000202, 0.00000312, 0.00000156, 0.00000234, 0.00000251],
            #         [0.00000035, 0.00000123, 0.00000029, 0.00000053, 0.71992922, 0.00012994,
            #          0.00003325, 0.00004523, 0.00003941, 0.00000144, 0.00001109, 0.00000611,
            #          0.00000129, 0.00012184, 0.00004307, 0.00000224, 0.00001191, 0.00002946,
            #          0.00000537, 0.00008847, 0.00002351, 0.00000039, 0.00000065, 0.00000039,
            #          0.00000026, 0.00000050, 0.00000176, 0.00000064, 0.00000034, 0.00000079],
            #         [0.00000126, 0.00000964, 0.00000142, 0.00000091, 0.00000283, 0.01225603,
            #          0.00028963, 0.00000450, 0.00007187, 0.00000975, 0.00098875, 0.00000235,
            #          0.00004167, 0.00000309, 0.00036950, 0.00002466, 0.00126630, 0.00023501,
            #          0.00562316, 0.00004048, 0.00000703, 0.00022977, 0.00001388, 0.00000123,
            #          0.00000202, 0.00000168, 0.00000258, 0.00000113, 0.00000218, 0.00000248],
            #         [0.00000040, 0.00000045, 0.00000008, 0.00000032, 0.00022130, 0.00000234,
            #          0.00001500, 0.00006374, 0.76793706, 0.00115055, 0.00060874, 0.38059372,
            #          0.00022558, 0.00005922, 0.00001344, 0.00001371, 0.00000058, 0.00000036,
            #          0.00000137, 0.00000051, 0.00000054, 0.00000072, 0.00000063, 0.00000039,
            #          0.00000051, 0.00000096, 0.00000066, 0.00000111, 0.00000035, 0.00000046]])
            start_prob, end_prob = model(input_ids, token_type_ids, att_mask, pos_ids)
            start_ids = paddle.cast(start_ids, 'float32')
            end_ids = paddle.cast(end_ids, 'float32')
            loss_start = criterion(start_prob, start_ids)
            loss_end = criterion(end_prob, end_ids)
            loss = (loss_start + loss_end) / 2.0
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()
            loss_list.append(float(loss))

            global_step += 1
            if global_step % args.logging_steps == 0 and rank == 0:
                time_diff = time.time() - tic_train
                loss_avg = sum(loss_list) / len(loss_list)
                logger.info(
                    "global step %d, epoch: %d, loss: %.5f, speed: %.2f step/s"
                    % (global_step, epoch, loss_avg,
                       args.logging_steps / time_diff))
                tic_train = time.time()

            if global_step % args.valid_steps == 0 and rank == 0:
                save_dir = os.path.join(args.save_dir, "model_%d" % global_step)
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                model_to_save = model._layers if isinstance(
                    model, paddle.DataParallel) else model
                model_to_save.save_pretrained(save_dir)
                logger.disable()
                tokenizer.save_pretrained(save_dir)
                logger.enable()

                precision, recall, f1 = evaluate(model, metric, dev_data_loader)
                logger.info(
                    "Evaluation precision: %.5f, recall: %.5f, F1: %.5f" %
                    (precision, recall, f1))
                if f1 > best_f1:
                    logger.info(
                        f"best F1 performence has been updated: {best_f1:.5f} --> {f1:.5f}"
                    )
                    best_f1 = f1
                    save_dir = os.path.join(args.save_dir, "model_best")
                    model_to_save = model._layers if isinstance(
                        model, paddle.DataParallel) else model
                    model_to_save.save_pretrained(save_dir)
                    logger.disable()
                    tokenizer.save_pretrained(save_dir)
                    logger.enable()
                tic_train = time.time()


if __name__ == "__main__":
     # yapf: disable
    parser = argparse.ArgumentParser()

    # todo 新增路径前缀
    # path_prefix = './self-mark'  # 自己标注的样本：包括实体和关系
    path_prefix = './relation'  # 关系抽取【目标：同时完成实体提取和关系抽取，所以标注时prompt即有实体的提示语，如“人名”，也有关系的提示语，如“小米公司的高管”】
    # path_prefix = './ner'  # 实体识别
    '''
    执行run没报错，但显示“进程已结束,退出代码-1073740791 (0xC0000409)”， 可能是因为gpu显存不足，可以尝试把batch_size缩小。
    这里batch_size从16减到2就可以正常运行了！！
    '''
    # parser.add_argument("--batch_size", default=16, type=int, help="Batch size per GPU/CPU for training.")
    # todo batch_size实体识别default=8,关系抽取default=2
    parser.add_argument("--batch_size", default=2, type=int, help="Batch size per GPU/CPU for training.")
    parser.add_argument("--learning_rate", default=1e-5, type=float, help="The initial learning rate for Adam.")
    # parser.add_argument("--train_path", default='./data/train.txt', type=str, help="The path of train set.")
    parser.add_argument("--train_path", default=path_prefix + '/data/train.txt', type=str,
                        help="The path of train set.")
    # parser.add_argument("--dev_path", default='./data/dev.txt', type=str, help="The path of dev set.")
    parser.add_argument("--dev_path", default=path_prefix + '/data/dev.txt', type=str, help="The path of dev set.")
    # parser.add_argument("--save_dir", default='./checkpoint_gx', type=str, help="The output directory where the model checkpoints will be written.")
    parser.add_argument("--save_dir", default=path_prefix + '/checkpoint', type=str,
                        help="The output directory where the model checkpoints will be written.")
    # todo 默认是512，debug时可以设置小点，方便看矩阵里的值，比如实体提取=30，关系抽取=100。
    parser.add_argument("--max_seq_len", default=100, type=int, help="The maximum input sequence length. "
                                                                     "Sequences longer than this will be split automatically.")
    # parser.add_argument("--num_epochs", default=100, type=int, help="Total number of training epochs to perform.")
    # todo epochs实体识别default=10,关系抽取default=10或15
    parser.add_argument("--num_epochs", default=10, type=int, help="Total number of training epochs to perform.")
    parser.add_argument("--seed", default=1000, type=int, help="Random seed for initialization")
    parser.add_argument("--logging_steps", default=10, type=int, help="The interval steps to logging.")
    # parser.add_argument("--valid_steps", default=100, type=int, help="The interval steps to evaluate model performance.")
    # todo valid_steps实体识别default=10,关系抽取default=4。
    #  默认模型保存的step间隔等于--valid_steps，即实体识别每10步保存一次模型，关系抽取则是每4步。
    parser.add_argument("--valid_steps", default=4, type=int, help="The interval steps to evaluate model performance.")
    parser.add_argument('--device', choices=['cpu', 'gpu'], default="gpu",
                        help="Select which device to train model, defaults to gpu.")
    parser.add_argument("--model", choices=["uie-base", "uie-tiny", "uie-medium", "uie-mini", "uie-micro", "uie-nano"],
                        default="uie-base", type=str, help="Select the pretrained model for few-shot learning.")
    parser.add_argument("--init_from_ckpt", default=None, type=str,
                        help="The path of model parameters for initialization.")

    args = parser.parse_args()
    # yapf: enable

    do_train()
