"""
python -m accelerate.commands.launch --config_file=acc.ddp.conf train_it_steplr_accelerate.py

https://huggingface.co/docs/transformers/tasks/sequence_classification
"""
from PyCmpltrtok.common import sep, rand_name_on_now, get_dir_name_ext, has_content
import logging
import os
import copy

LOG_FORMAT = "%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(name)s: %(message)s"
DIR, BASE, EXT = get_dir_name_ext(os.path.abspath(__file__))
PATH = os.path.join(DIR, f'{BASE}.tmp.log')
logging.basicConfig(
    level=logging.DEBUG,
    filename=PATH,
    format=LOG_FORMAT,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

logger_ta = logging.getLogger('transformers.training_args')
logger_ta.setLevel(logging.DEBUG)

logger_t = logging.getLogger('transformers.trainer')
logger_t.setLevel(logging.DEBUG)

hdl = logging.FileHandler(filename=PATH)
hdl.setLevel(logging.DEBUG)
hdl.setFormatter(logging.Formatter(LOG_FORMAT))

logger.addHandler(hdl)
logger_ta.addHandler(hdl)
logger_t.addHandler(hdl)

import sys
import torch
import re
import numpy as np
import math
import transformers

from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
from transformers import AutoConfig
from transformers import DataCollatorWithPadding
from transformers import DistilBertForSequenceClassification
from transformers import Trainer
from transformers import TrainerState
from transformers import TrainerControl
from transformers import TrainingArguments
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR

from datasets import load_dataset, DatasetDict
import evaluate

from PyCmpltrtok.auth.mongo.conn import conn
import tvts.tvts as tvts

IS_AUTODL = 0  # 是否是算力云，算力云写1，WSL写0

sep('Choose GPU')
# 看来huggingface的做法是占据所有GPU，像我这种2GPU的机器，batch_size等同翻倍，num_epoch等同减半。
# https://discuss.huggingface.co/t/setting-specific-device-for-trainer/784/18
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

sep('Anti-GWF')

sep('PYTHONPATH')
print(sys.path)

########################################################################################################################
sep('Hyper Parameters')
# INIT_DIR = 'distilbert-base-uncased'  # 从网上下载模型的路径
INIT_DIR = '/home/yunpeng/.cache/huggingface/hub/models--distilbert-base-uncased/snapshots/6cdc0aad91f5ae2e6712e91bc7b65d1cf5c05411'

IS_TEMP = 1  # 是否是只跑部分数据
TF32 = 0
BF16 = 0
VAL_LEN_FINAL = 2048  # 这个imdb测试集25000数据，太大了，用这个数目限制一下
if IS_TEMP:
    TRAIN_LEN = 1024  # 非全量时的训练数据条数
    VAL_LEN = 256  # 非全量时的验证数据条数
    M = 2  # 一个周期验证M次（ceil）  # just for test
    N = 2  # 几个epoch
else:
    TRAIN_LEN = 20000
    VAL_LEN = VAL_LEN_FINAL
    M = 5  # 一个周期验证M次（ceil）
    N = 1  # 几个epoch
IS_FREEZE = 0  # 是否冻结预训练参数
IS_FROM_SCRATCH = 1   # 是否从头训练

BATCH_SIZE = 32  # 批次大小
GRAD_ACC = 2
N_SAVE = 1  # 验证N_SAVE次，保存一次

LR = 2e-5  # 学习率
GAMMAR = 0.997
WEIGHT_DECAY = 0.01
DROP_OUT = 0.1
HIDDEN = 256
DEV = 1
N_GPU = torch.cuda.device_count()

if TF32:
    torch.backends.cuda.matmul.allow_tf32 = True
    torch.backends.cudnn.allow_tf32 = True
if BF16:
    bf16dict = {
        # ValueError: Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0
        'bf16': True,
    }
else:
    bf16dict = {}

if IS_AUTODL:
    # 算力云保存的路径的基础（实际路径在此基础上加一些东西）
    OUTPUT_DIR_BASE = "/root/autodl-tmp/checkpoints/distilbert-base-uncased.on.imdb/distilbert-base-uncased.on.imdb_"
else:
    # WSL的保存的路径的基础（实际路径在此基础上加一些东西）
    OUTPUT_DIR_BASE = "/home/yunpeng/checkpoints/distilbert-base-uncased.on.imdb/distilbert-base-uncased.on.imdb_"
# 路径基础上加上日期和随机数，加上是否非全量训练（_temp0 / _temp1），非全量是什么量
OUTPUT_DIR = OUTPUT_DIR_BASE + f'_temp{IS_TEMP}{"_len" + str(TRAIN_LEN) if IS_TEMP else "_all"}_freeze{IS_FREEZE}_batch{BATCH_SIZE}_drop{DROP_OUT}_003'
xis_dir, xhas_content = has_content(OUTPUT_DIR)
if xhas_content:
    error = f'The output dir |{OUTPUT_DIR}| is not empty! Cannot go on!'
    print(error, file=sys.__stderr__, flush=True)
    print(error, file=sys.stderr, flush=True)
    sys.exit(1)

print('INIT_DIR:', INIT_DIR)
print('IS_TEMP:', IS_TEMP)
print('TF32', TF32)
print('TRAIN_LEN:', TRAIN_LEN)
print('VAL_LEN:', VAL_LEN)
print('IS_FREEZE:', IS_FREEZE)
print('IS_FROM_SCRATCH:', IS_FROM_SCRATCH)
print('OUTPUT_DIR:', OUTPUT_DIR)
print('BATCH_SIZE:', BATCH_SIZE)
print('N:', N)
print('LR:', LR)
print('DROP_OUT:', DROP_OUT)
print('HIDDEN:', HIDDEN)
print('DEV:', DEV)

########################################################################################################################
sep('Model')
id2label = {0: "NEGATIVE", 1: "POSITIVE"}
label2id = {"NEGATIVE": 0, "POSITIVE": 1}

model = AutoModelForSequenceClassification.from_pretrained(
    INIT_DIR, num_labels=2, id2label=id2label, label2id=label2id,
)
print(model)

########################################################################################################################
sep('Load dataset')
# 从网上下载的路径
# DATASET_DIR = 'imdb'
# 下载后的路径
DATASET_DIR = '/home/yunpeng/.cache/huggingface/datasets/imdb/plain_text/0.0.0/e6281661ce1c48d982bc483cf8a173c1bbeb5d31'

imdb = load_dataset(DATASET_DIR)
print(imdb)

imdb_rn = imdb.shuffle(seed=666)
if IS_TEMP:
    imdb_rn_train = imdb_rn['train'].select(range(TRAIN_LEN))
    imdb_rn_test = imdb_rn['test'].select(range(VAL_LEN))
else:
    imdb_rn_train = imdb_rn['train']
    imdb_rn_test = imdb_rn['test'].select(range(VAL_LEN_FINAL))
print(imdb_rn_train, imdb_rn_test)
if not IS_TEMP:  # 如果全量的话，把TRAIN_LEN和VAL_LEN设为全量长度
    TRAIN_LEN = imdb['train'].num_rows
    VAL_LEN = imdb['test'].num_rows
print('TRAIN_LEN:', TRAIN_LEN)
print('VAL_LEN:', VAL_LEN)

imdb_rn = DatasetDict()
imdb_rn['train'] = imdb_rn_train
imdb_rn['test'] = imdb_rn_test
print('imdb_rn:', imdb_rn)

sep('Train[:4]')
print(imdb_rn['train'][:4])

sep('Test[:4]')
print(imdb_rn['test'][:4])

########################################################################################################################
sep('Pre-process')
sep('Tokenizer')
tokenizer = AutoTokenizer.from_pretrained(INIT_DIR)
print(tokenizer)
sep('Config')
config = AutoConfig.from_pretrained(INIT_DIR)
print(config)
print('config.max_position_embeddings', config.max_position_embeddings)
print('tokenizer.max_model_input_sizes', tokenizer.max_model_input_sizes)
print('tokenizer.model_max_length', tokenizer.model_max_length)
max_length = min(config.max_position_embeddings, tokenizer.max_model_input_sizes['distilbert-base-uncased'])
print('max_length', max_length)


def preprocess_function(examples):
    return tokenizer(
        examples["text"],
        max_length=512,
        truncation=True,

        # padding='max_length',  # https://stackoverflow.com/questions/70067608/how-padding-in-huggingface-tokenizer-works
        # https://stackoverflow.com/questions/61443480/huggingfaces-bert-tokenizer-not-adding-pad-token
        padding=True,
    )


tokenized_imdb = imdb_rn.map(preprocess_function, batched=True, batch_size=BATCH_SIZE*2)
print(tokenized_imdb)

sep('Check')
for i in range(min(100, TRAIN_LEN)):
    print(len(tokenized_imdb['train'][i]['input_ids']), end=', ')
sep("tokenized_imdb['train']['label'][:100]")
print(tokenized_imdb['train']['label'][:100])
sep("tokenized_imdb['train'][0]")
print(tokenized_imdb['train'][0]['input_ids'])
print(len(tokenized_imdb['train'][0]['input_ids']))
print(tokenized_imdb['train'][0]['attention_mask'])
print(len(tokenized_imdb['train'][0]['attention_mask']))

########################################################################################################################
sep('Collator')
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
print(data_collator)

########################################################################################################################
sep('Evaluate')
# EVAL_DIR = "accuracy"  # 网上下载的地址
# 下载后的地址【比较特殊的是evaluate.load需要指定到脚本名xxx/accuacy.py，不能只放一个目录名，和模型、数据集不同】
EVAL_DIR = '/home/yunpeng/.cache/huggingface/modules/evaluate_modules/metrics/evaluate-metric--accuracy/f887c0aab52c2d38e1f8a215681126379eca617f96c447638f751434e8e65b14/accuracy.py'
accuracy = evaluate.load(EVAL_DIR)
print(accuracy)


def compute_metrics(eval_pred):
    predictions, labels = eval_pred
    predictions = np.argmax(predictions, axis=1)
    return accuracy.compute(predictions=predictions, references=labels)


########################################################################################################################
sep('Training config')

STEPS = math.floor(TRAIN_LEN / M / BATCH_SIZE / GRAD_ACC / N_GPU)
print('STEPS:', STEPS)
STEPS = max(1, STEPS)
print('STEPS after max(1, x):', STEPS)
STEPS_SAVE = STEPS * N_SAVE
print('STEPS_SAVE:', STEPS_SAVE)

sep('TVTS')
mongo = conn('local')
mdb = mongo['tvts']
TVTS_NAME = 'distilbert_base_uncased_on_imdb06'
SAVE_DIR = OUTPUT_DIR
MEMO = ''
if TF32:
    MEMO += ' (TF32)'
if BF16:
    MEMO += ' (BF16)'

ts = tvts.Tvts(
    TVTS_NAME,
    memo=MEMO,
    is_temp=not not IS_TEMP,
    mongo_link=mongo,
    save_dir=SAVE_DIR,
    params = {
        'lr': LR,
        'batch_size': BATCH_SIZE,
        'epochs': N,
        'eval_steps': STEPS,
        'save_steps': STEPS_SAVE,
        'decay': WEIGHT_DECAY,
    }
)
print(ts)
print(f'python tvts.py --link local -m "loss|eval_loss,eval_accuracy" --batch_metrics "loss" -k "eval_accuracy" --hyper "learning_rate" --save_dir "{SAVE_DIR}" "{TVTS_NAME}"')

sep('Config it')
print('OUTPUT_DIR:', OUTPUT_DIR)
print('NAME:', TVTS_NAME)

# ImportError: Using the `Trainer` with `PyTorch` requires `accelerate>=0.20.1`: Please run `pip install transformers[torch]` or `pip install accelerate -U`
# pip install accelerate

# https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments
training_args = TrainingArguments(
    output_dir=OUTPUT_DIR,
    learning_rate=LR,
    per_device_train_batch_size=BATCH_SIZE,
    per_device_eval_batch_size=BATCH_SIZE,
    gradient_accumulation_steps=GRAD_ACC,
    num_train_epochs=N,
    weight_decay=WEIGHT_DECAY,

    # "no": No evaluation is done during training.
    # "steps": Evaluation is done (and logged) every eval_steps.
    # "epoch": Evaluation is done at the end of each epoch.
    evaluation_strategy="steps",
    eval_steps=STEPS,

    # "no": No save is done during training.
    # "epoch": Save is done at the end of each epoch.
    # "steps": Save is done every save_steps.
    save_strategy="steps",
    save_steps=STEPS_SAVE,

    # google搜索: training loss is no log
    # https://github.com/huggingface/transformers/issues/8910
    logging_steps=1,

    load_best_model_at_end=True,

    # push_to_hub=True, # 是否发布到huggingface
    
    split_batches=False,
    # split_batches=True,
    
    **bf16dict,
)

from PyCmpltrtok.common_hf import LogCallback
log_callback = LogCallback(ts, logger, STEPS, STEPS_SAVE, SAVE_DIR)

from transformers.optimization import AdamW
from transformers.trainer_pt_utils import get_parameter_names
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS

decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
    {
        "params": [
            p for n, p in model.named_parameters() if (n in decay_parameters and p.requires_grad)
        ],
        "weight_decay": WEIGHT_DECAY,
    },
    {
        "params": [
            p for n, p in model.named_parameters() if (n not in decay_parameters and p.requires_grad)
        ],
        "weight_decay": 0.0,
    },
]
optimizer_kwargs = {
    'lr': LR,
    "betas": (training_args.adam_beta1, training_args.adam_beta2),
    "eps": training_args.adam_epsilon,
}
print('optimizer_kwargs', optimizer_kwargs)
opt = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_imdb['train'],
    eval_dataset=tokenized_imdb['test'],
    tokenizer=tokenizer,
    data_collator=data_collator,
    compute_metrics=compute_metrics,
    callbacks=[log_callback],
    optimizers=(opt, torch.optim.lr_scheduler.StepLR(opt, step_size=1, gamma=GAMMAR, verbose=True))
)

########################################################################################################################
sep('Start to train')
trainer.train(
    # resume_from_checkpoint=True,
    resume_from_checkpoint=False,
)

########################################################################################################################
sep('All over')
