"""
This script is totally in the wrong direction!
Only for reference.
For a right script, refer to train_it_steplr_on_ckpt.py.

https://huggingface.co/docs/transformers/tasks/sequence_classification
"""
from PyCmpltrtok.common import sep, rand_name_on_now, get_dir_name_ext
import logging
import os
import copy

LOG_FORMAT = "%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(name)s: %(message)s"
DIR, BASE, EXT = get_dir_name_ext(os.path.abspath(__file__))
PATH = os.path.join(DIR, f'{BASE}.tmp.log')
logging.basicConfig(
    level=logging.DEBUG,
    filename=PATH,
    format=LOG_FORMAT,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

logger_ta = logging.getLogger('transformers.training_args')
logger_ta.setLevel(logging.DEBUG)

logger_t = logging.getLogger('transformers.trainer')
logger_t.setLevel(logging.DEBUG)

hdl = logging.FileHandler(filename=PATH)
hdl.setLevel(logging.DEBUG)
hdl.setFormatter(logging.Formatter(LOG_FORMAT))

logger.addHandler(hdl)
logger_ta.addHandler(hdl)
logger_t.addHandler(hdl)

import sys
import torch
import re
import numpy as np
import math
import transformers

from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
from transformers import AutoConfig
from transformers import DataCollatorWithPadding
from transformers import DistilBertForSequenceClassification
from transformers import Trainer
from transformers import TrainerState
from transformers import TrainerControl
from transformers import TrainingArguments
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR

from datasets import load_dataset, DatasetDict
import evaluate

from PyCmpltrtok.auth.mongo.conn import conn
import tvts.tvts as tvts

IS_AUTODL = 0  # 是否是算力云，算力云写1，WSL写0

sep('Choose GPU')
# 看来huggingface的做法是占据所有GPU，像我这种2GPU的机器，batch_size等同翻倍，num_epoch等同减半。
# https://discuss.huggingface.co/t/setting-specific-device-for-trainer/784/18
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

sep('Anti-GWF')

sep('PYTHONPATH')
print(sys.path)

########################################################################################################################
sep('Hyper Parameters')
# INIT_DIR = 'distilbert-base-uncased'  # 从网上下载模型的路径
INIT_DIR = '/home/yunpeng/.cache/huggingface/hub/models--distilbert-base-uncased/snapshots/6cdc0aad91f5ae2e6712e91bc7b65d1cf5c05411'

IS_TEMP = 1  # 是否是只跑部分数据
VAL_LEN_FINAL = 2048  # 这个imdb测试集25000数据，太大了，用这个数目限制一下
if IS_TEMP:
    TRAIN_LEN = 256  # 非全量时的训练数据条数
    VAL_LEN = 32  # 非全量时的验证数据条数
else:
    TRAIN_LEN = None
    VAL_LEN = None
IS_FREEZE = 0  # 是否冻结预训练参数
IS_FROM_SCRATCH = 1   # 是否从头训练

BATCH_SIZE = 16  # 批次大小
GRAD_ACC = 2
N = 2  # 几个epoch
# M = 5  # 一个周期验证M次（ceil）
M = 2  # 一个周期验证M次（ceil）  # just for test
N_SAVE = 1  # 验证N_SAVE次，保存一次

PI = 10
# PE = 0
PE = 1.2

LR = 2e-5  # 学习率
WEIGHT_DECAY = 0.01
DROP_OUT = 0.1
HIDDEN = 256
DEV = 1
N_GPU = torch.cuda.device_count()

if IS_AUTODL:
    # 算力云保存的路径的基础（实际路径在此基础上加一些东西）
    OUTPUT_DIR_BASE = "/root/autodl-tmp/checkpoints/distilbert-base-uncased.on.imdb/distilbert-base-uncased.on.imdb_"
else:
    # WSL的保存的路径的基础（实际路径在此基础上加一些东西）
    OUTPUT_DIR_BASE = "/home/yunpeng/checkpoints/distilbert-base-uncased.on.imdb/distilbert-base-uncased.on.imdb_"
# 路径基础上加上日期和随机数，加上是否非全量训练（_temp0 / _temp1），非全量是什么量
OUTPUT_DIR = OUTPUT_DIR_BASE + rand_name_on_now() + f'_temp{IS_TEMP}{"_len" + str(TRAIN_LEN) if IS_TEMP else "_all"}_freeze{IS_FREEZE}_batch{BATCH_SIZE}_drop{DROP_OUT}'

print('INIT_DIR:', INIT_DIR)
print('IS_TEMP:', IS_TEMP)
print('TRAIN_LEN:', TRAIN_LEN)
print('VAL_LEN:', VAL_LEN)
print('IS_FREEZE:', IS_FREEZE)
print('IS_FROM_SCRATCH:', IS_FROM_SCRATCH)
print('OUTPUT_DIR:', OUTPUT_DIR)
print('BATCH_SIZE:', BATCH_SIZE)
print('N:', N)
print('LR:', LR)
print('DROP_OUT:', DROP_OUT)
print('HIDDEN:', HIDDEN)
print('DEV:', DEV)

########################################################################################################################
sep('Model')
id2label = {0: "NEGATIVE", 1: "POSITIVE"}
label2id = {"NEGATIVE": 0, "POSITIVE": 1}

model = AutoModelForSequenceClassification.from_pretrained(
    INIT_DIR, num_labels=2, id2label=id2label, label2id=label2id,
)
print(model)

########################################################################################################################
sep('Load dataset')
# 从网上下载的路径
# DATASET_DIR = 'imdb'
# 下载后的路径
DATASET_DIR = '/home/yunpeng/.cache/huggingface/datasets/imdb/plain_text/0.0.0/e6281661ce1c48d982bc483cf8a173c1bbeb5d31'

imdb = load_dataset(DATASET_DIR)
print(imdb)

imdb_rn = imdb.shuffle()
if IS_TEMP:
    imdb_rn_train = imdb_rn['train'].select(range(TRAIN_LEN))
    imdb_rn_test = imdb_rn['test'].select(range(VAL_LEN))
else:
    imdb_rn_train = imdb_rn['train']
    imdb_rn_test = imdb_rn['test'].select(range(VAL_LEN_FINAL))
print(imdb_rn_train, imdb_rn_test)
if not IS_TEMP:  # 如果全量的话，把TRAIN_LEN和VAL_LEN设为全量长度
    TRAIN_LEN = imdb['train'].num_rows
    VAL_LEN = imdb['test'].num_rows
print('TRAIN_LEN:', TRAIN_LEN)
print('VAL_LEN:', VAL_LEN)

imdb_rn = DatasetDict()
imdb_rn['train'] = imdb_rn_train
imdb_rn['test'] = imdb_rn_test
print('imdb_rn:', imdb_rn)

sep('Train[:4]')
print(imdb_rn['train'][:4])

sep('Test[:4]')
print(imdb_rn['test'][:4])

########################################################################################################################
sep('Pre-process')
sep('Tokenizer')
tokenizer = AutoTokenizer.from_pretrained(INIT_DIR)
print(tokenizer)
sep('Config')
config = AutoConfig.from_pretrained(INIT_DIR)
print(config)
print('config.max_position_embeddings', config.max_position_embeddings)
print('tokenizer.max_model_input_sizes', tokenizer.max_model_input_sizes)
print('tokenizer.model_max_length', tokenizer.model_max_length)
max_length = min(config.max_position_embeddings, tokenizer.max_model_input_sizes['distilbert-base-uncased'])
print('max_length', max_length)


def preprocess_function(examples):
    return tokenizer(
        examples["text"],
        max_length=512,
        truncation=True,

        # padding='max_length',  # https://stackoverflow.com/questions/70067608/how-padding-in-huggingface-tokenizer-works
        # https://stackoverflow.com/questions/61443480/huggingfaces-bert-tokenizer-not-adding-pad-token
        padding=True,
    )


tokenized_imdb = imdb_rn.map(preprocess_function, batched=True, batch_size=BATCH_SIZE*2)
print(tokenized_imdb)

sep('Check')
for i in range(min(100, TRAIN_LEN)):
    print(len(tokenized_imdb['train'][i]['input_ids']), end=', ')
sep("tokenized_imdb['train']['label'][:100]")
print(tokenized_imdb['train']['label'][:100])
sep("tokenized_imdb['train'][0]")
print(tokenized_imdb['train'][0]['input_ids'])
print(len(tokenized_imdb['train'][0]['input_ids']))
print(tokenized_imdb['train'][0]['attention_mask'])
print(len(tokenized_imdb['train'][0]['attention_mask']))

########################################################################################################################
sep('Collator')
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
print(data_collator)

########################################################################################################################
sep('Evaluate')
# EVAL_DIR = "accuracy"  # 网上下载的地址
# 下载后的地址【比较特殊的是evaluate.load需要指定到脚本名xxx/accuacy.py，不能只放一个目录名，和模型、数据集不同】
EVAL_DIR = '/home/yunpeng/.cache/huggingface/modules/evaluate_modules/metrics/evaluate-metric--accuracy/f887c0aab52c2d38e1f8a215681126379eca617f96c447638f751434e8e65b14/accuracy.py'
accuracy = evaluate.load(EVAL_DIR)
print(accuracy)


def compute_metrics(eval_pred):
    predictions, labels = eval_pred
    predictions = np.argmax(predictions, axis=1)
    return accuracy.compute(predictions=predictions, references=labels)


########################################################################################################################
sep('Training config')

STEPS = math.floor(TRAIN_LEN / M / BATCH_SIZE / GRAD_ACC / N_GPU)
print('STEPS:', STEPS)
STEPS = max(1, STEPS)
print('STEPS after max(1, x):', STEPS)
STEPS_SAVE = STEPS * N_SAVE
print('STEPS_SAVE:', STEPS_SAVE)

sep('TVTS')
mongo = conn('local')
mdb = mongo['tvts']
TVTS_NAME = 'distilbert_base_uncased_on_imdb06'
SAVE_DIR = OUTPUT_DIR
MEMO = 'jupyter'

ts = tvts.Tvts(
    TVTS_NAME,
    memo=MEMO,
    is_temp=not not IS_TEMP,
    mongo_link=mongo,
    save_dir=SAVE_DIR,
    params = {
        'lr': LR,
        'batch_size': BATCH_SIZE,
        'epochs': N,
        'eval_steps': STEPS,
        'save_steps': STEPS_SAVE,
        'decay': WEIGHT_DECAY,
        'n_gpu': N_GPU,
    }
)
print(ts)
print(f'python tvts.py --link local -m "loss|eval_loss,eval_accuracy" --batch_metrics "loss" -k "eval_accuracy" --hyper "learning_rate" --save_dir "{SAVE_DIR}" "{TVTS_NAME}"')

sep('TVTS resume')
print(f'PI={PI}, PE={PE}')
r = ts.resume(PI, PE)
if r is None:
    print('This script is for resuming from ckpt. You must specify a PI (parent id of the former training) and a PE (parent epoch) combination with a save path. You specified PI={PI}, PE={PE}, not with no save path in tvts database.')
    sys.exit(0)
rpath, xdir = r
ckpt = os.path.join(xdir, rpath)
print(f'ckpt={ckpt}')

regexp = re.compile(r'^.+-(\d+)$')
matcher = regexp.match(ckpt)
if matcher is None:
    print('Cannot infer ckpt step from the ckpt path. Please check the path.')
    sys.exit(0)
else:
    len_dataloader = math.ceil(TRAIN_LEN / (BATCH_SIZE * N_GPU))
    glb_step = int(matcher[1])
    num_update_steps_per_epoch = len_dataloader // GRAD_ACC
    num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
    epochs_trained = glb_step // num_update_steps_per_epoch

sep('Config it')
print('OUTPUT_DIR:', OUTPUT_DIR)
print('NAME:', TVTS_NAME)

# ImportError: Using the `Trainer` with `PyTorch` requires `accelerate>=0.20.1`: Please run `pip install transformers[torch]` or `pip install accelerate -U`
# pip install accelerate

# https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments
training_args = TrainingArguments(
    
    ignore_data_skip=True,
    
    output_dir=OUTPUT_DIR,
    # learning_rate=LR,
    per_device_train_batch_size=BATCH_SIZE,
    per_device_eval_batch_size=BATCH_SIZE,
    gradient_accumulation_steps=GRAD_ACC,
    num_train_epochs=N + epochs_trained,
    weight_decay=WEIGHT_DECAY,

    # "no": No evaluation is done during training.
    # "steps": Evaluation is done (and logged) every eval_steps.
    # "epoch": Evaluation is done at the end of each epoch.
    evaluation_strategy="steps",
    eval_steps=STEPS,

    # "no": No save is done during training.
    # "epoch": Save is done at the end of each epoch.
    # "steps": Save is done every save_steps.
    save_strategy="steps",
    save_steps=STEPS_SAVE,

    # google搜索: training loss is no log
    # https://github.com/huggingface/transformers/issues/8910
    logging_steps=1,

    # load_best_model_at_end=True,

    # push_to_hub=True, # 是否发布到huggingface
    
    # ValueError: Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0
    # bf16=True,
)

from PyCmpltrtok.common_hf import LogCallback
log_callback = LogCallback(
    ts, logger, 
    STEPS, STEPS_SAVE, 
    SAVE_DIR, 
    epoch_base_hf=epochs_trained, epoch_base_tvts=PE
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_imdb['train'],
    eval_dataset=tokenized_imdb['test'],
    tokenizer=tokenizer,
    data_collator=data_collator,
    compute_metrics=compute_metrics,
    callbacks=[log_callback],
)

########################################################################################################################
sep('Start to train')
trainer.train(
    resume_from_checkpoint=ckpt,
)

########################################################################################################################
sep('All over')
