# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#

import os
import sys
import argparse
import paddle
paddle.set_device('gpu')
print('device:',paddle.get_device())
import sys
sys.path.insert(0,os.path.join(os.path.dirname(os.path.abspath(__file__)),'..'))
from paddle_xlm.paddle_utils import bool_flag, initialize_exp
from paddle_xlm.evaluation.paddle_xnli import XNLI
from paddle_xlm.model.paddle_embedder import SentenceEmbedder


TASKS = ['XNLI']

# parse parameters
parser = argparse.ArgumentParser(description='Train on GLUE or XNLI')

# main parameters
parser.add_argument("--exp_name", type=str, default="test_xnli_mlm_tlm",
                    help="Experiment name")
parser.add_argument("--dump_path", type=str, default="./dumped",
                    help="Experiment dump path")
parser.add_argument("--exp_id", type=str, default="",
                    help="Experiment ID")

# evaluation task / pretrained model
parser.add_argument("--transfer_tasks", type=str, default="XNLI",
                    help="Transfer tasks, example: 'MNLI-m,RTE,XNLI' ")
model_path = '../../t2p_modle/xnli_paddle_model.bin' if "win" in sys.platform else "/home/aistudio/xnli_paddle_model.bin"
parser.add_argument("--model_path", type=str, default=model_path,
                    help="Model location")

# data
data_path = "../../data/processed/XLM15"
parser.add_argument("--data_path", type=str, default=data_path,
                    help="Data path")
# parser.add_argument("--max_vocab", type=int, default=-1,
parser.add_argument("--max_vocab", type=int, default=95000,
                    help="Maximum vocabulary size (-1 to disable)")
parser.add_argument("--min_count", type=int, default=0,
                    help="Minimum vocabulary count")

# batch parameters
parser.add_argument("--max_len", type=int, default=256,
                    help="Maximum length of sentences (after BPE)")
parser.add_argument("--group_by_size", type=bool_flag, default=False,
                    help="Sort sentences by size during the training")
parser.add_argument("--batch_size", type=int, default=8,
                    help="Number of sentences per batch")
parser.add_argument("--max_batch_size", type=int, default=0,
                    help="Maximum number of sentences per batch (used in combination with tokens_per_batch, 0 to disable)")
parser.add_argument("--tokens_per_batch", type=int, default=-1,
                    help="Number of tokens per batch")

# model / optimization
parser.add_argument("--finetune_layers", type=str, default='0:_1',
                    help="Layers to finetune. 0 = embeddings, _1 = last encoder layer")
parser.add_argument("--weighted_training", type=bool_flag, default=False,
                    help="Use a weighted loss during training")
parser.add_argument("--dropout", type=float, default=0,
                    help="Fine-tuning dropout")
parser.add_argument("--optimizer_e", type=str, default="adam,lr=0.000025",
                    help="Embedder (pretrained model) optimizer")
parser.add_argument("--optimizer_p", type=str, default="adam,lr=0.000025",
                    help="Projection (classifier) optimizer")
parser.add_argument("--n_epochs", type=int, default=250,
                    help="Maximum number of epochs")
# 单epoch_size != -1 时 会迭代 20000 个epoch_size 就会进行下一个迭代
parser.add_argument("--epoch_size", type=int, default=20000,
                    help="Epoch size (-1 for full pass over the dataset)")

# debug
parser.add_argument("--debug_train", type=bool_flag, default=False,
                    help="Use valid sets for train sets (faster loading)")
parser.add_argument("--debug_slurm", type=bool_flag, default=False,
                    help="Debug multi-GPU / multi-node within a SLURM job")

# 后面自己添加的参数
only_eval_mode = False if "win" in sys.platform else False
only_eval_test_set = False if "win" in sys.platform else False
only_load_test_set = False if "win" in sys.platform else False
# 评估参数
parser.add_argument('--only_eval_mode',default=only_eval_mode,help="不训练仅仅只评估模型")
parser.add_argument('--only_eval_test_set',default=only_eval_test_set,help="评估时仅仅会对测试集（test）评估，不会对验证集（valid）评估")
# 数据参数
parser.add_argument('--only_load_test_set',default=only_load_test_set,help="在评估模式下加快数据的加载速度")
# checkpoint
save_checkpoint = True
save_checkpoint_path = "checkpoint.pth"
parser.add_argument('--save_checkpoint',default=save_checkpoint,help="是否在训练时存储模型")
parser.add_argument('--save_checkpoint_path',default=save_checkpoint_path,help="checkpoint 路径")
# load_checkpoint_path = '../model/xnli/epoch32/checkpoint.pth'
load_checkpoint_path = ''
parser.add_argument('--load_checkpoint_path',default=load_checkpoint_path,help="checkpoint 路径")
# 使用GPU
parser.add_argument('--use_gpu',default=True,help="checkpoint 路径")
# parse parameters
params = parser.parse_args()
if params.tokens_per_batch > -1:
    params.group_by_size = True

# check parameters
assert os.path.isdir(params.data_path)
assert os.path.isfile(params.model_path)

# reload pretrained model
embedder = SentenceEmbedder.reload(params.model_path, params)

# reload langs from pretrained model
params.n_langs = embedder.pretrain_params['n_langs']
params.id2lang = embedder.pretrain_params['id2lang']
params.lang2id = embedder.pretrain_params['lang2id']

# initialize the experiment / build sentence embedder
logger = initialize_exp(params)
scores = {}

# prepare trainers / evaluators
xnli = XNLI(embedder, scores, params)

xnli.run()
