import logging
from PyCmpltrtok.common import sep
sep('Logger')
import logging
LOG_FORMAT = "%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(name)s: %(message)s"
TVTS_NAME = 'cmrc2018-bert-large-zh'
print('TVTS_NAME:', TVTS_NAME)
logging.basicConfig(
    level=logging.DEBUG,
    filename=f'{TVTS_NAME}.log',
    format=LOG_FORMAT,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
print(logger)
sep('transformers.trainer')
[logging.getLogger('transformers.trainer').addHandler(hd) for hd in logging.getLogger().handlers]
logging.getLogger('transformers.trainer').setLevel(logging.DEBUG)






from PyCmpltrtok.auth.mongo.conn import conn
from PyCmpltrtok.common import sep, rand_name_on_now, md5
import tvts.tvts as tvts
import os
import datasets
import os
import sys
import time
import redis
from transformers import AutoConfig
from transformers import AutoModelForQuestionAnswering
from transformers import AutoTokenizer
import re
from transformers import DefaultDataCollator
import torch
from ltp import LTP
import string
from collections import Counter
import evaluate
import numpy as np
import math
from transformers import Trainer
from transformers import TrainerState
from transformers import TrainerControl
from transformers import TrainingArguments
from PyCmpltrtok.common_hf import LogCallback
from transformers.optimization import AdamW
from transformers.trainer_pt_utils import get_parameter_names
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS





print('Connecting to redis ...')
rdb = redis.Redis('127.0.0.1', 6379, 0, password='lgdz4qEdt/ezElyQnXFYXB80iM3OxEbAWRjMFPcIXH5ni6eQ8QOlfp7G7gvV1svPu2Bv7v')
rdb.get('test')
print('Connected to redis.')





sep('Arguments')
sep('Important arguments')
path = '/home/yunpeng/code_github/cmrc2018/squad-style-data/'  # New PC
TEMP = 1
TEMP_LEN = 64
TEMP_LEN_DEV = 64
model_path = '/home/yunpeng/models/hf/bert-large-chinese/cde4a45'  # New PC

print('paht( of squad):', path)
print('TEMP:', TEMP)
print('TEMP_LEN:', TEMP_LEN)
print('TEMP_LEN_DEV:', TEMP_LEN_DEV)
print('model_path', model_path)

sep('Hyper')
REDIS_KEY = 'ltpo_t2st'  # LTP object for text to spaced-text
N_GPU = 2
N_SAVE = 1
WEIGHT_DECAY = 0.01
LR = 2.5e-5  # 学习率
LR_MIN_RATE = 0.33
GAMMAR = 0.99
WARMUP = 0.05
BATCH_SIZE_LTPO = 16
if not TEMP:
    BATCH_SIZE_TGT = 32
    GRAD_ACC = 4
    M = 4
    N = 2.1
else:
    BATCH_SIZE_TGT = 32
    GRAD_ACC = 4
    M = 2
    N = 2.1
BATCH_SIZE = BATCH_SIZE_TGT // GRAD_ACC // N_GPU
print('M', M)
print('N', N)
print('BATCH_SIZE_TGT', BATCH_SIZE_TGT)
print('BATCH_SIZE', BATCH_SIZE)
print('BATCH_SIZE_LTPO', BATCH_SIZE_LTPO)
print('GRAD_ACC', GRAD_ACC)
print('N_GPU', N_GPU)
print('N_SAVE', N_SAVE)
print('WEIGHT_DECAY', WEIGHT_DECAY)
print('LR', LR)
print('LR_MIN_RATE', LR_MIN_RATE)
print('GAMMAR', GAMMAR)
print('WARMUP', WARMUP)
batch_size100 = int(100 * BATCH_SIZE_TGT / GRAD_ACC / N_GPU)
if BATCH_SIZE * 100 != batch_size100:
    print('BATCH_SIZE * 100 != batch_size100', file=sys.stderr, flush=True)
    sys.exit(1)
else:
    print('sleep 3')
    time.sleep(3)

sep('TVTS')
mongo = conn('local')
mdb = mongo['tvts']
save_name = f'{rand_name_on_now()}_temp{TEMP}'
if TEMP:
    save_name += f'_train{TEMP_LEN}_dev{TEMP_LEN_DEV}'
SAVE_DIR = os.path.join('/home/yunpeng/checkpoints', TVTS_NAME, save_name)
MEMO = ''

print(f'python tvts.py --link local -m "loss|eval_loss,eval_exact_match,eval_f1" --batch_metrics "loss" -k "eval_f1" --hyper "learning_rate" --save_dir "{SAVE_DIR}" "{TVTS_NAME}"')

print('SAVE_DIR:', SAVE_DIR)
print('MEMO', MEMO)





sep('Load data')
ds_dict = datasets.DatasetDict.load_from_disk(os.path.join(path, 'hf'))
ds_dict = ds_dict.shuffle(seeds=1)
ds_dict_ = ds_dict
print(ds_dict)

if TEMP:
    ds_dict['train'] = ds_dict['train'].select(range(TEMP_LEN))
    ds_dict['dev'] = ds_dict['dev'].select(range(TEMP_LEN_DEV))
print(ds_dict)

print("ds_dict['train'][:3]", ds_dict['train'][:3])
print("ds_dict['dev'][:3]", ds_dict['dev'][:3])









sep('Preprocess')
sep('Config')
conf = AutoConfig.from_pretrained(
    model_path,
    trust_remote_code=True,
)
print(conf)
MAX_LEN = conf.max_position_embeddings
print('MAX_LEN', MAX_LEN)
sep('tokenizer')
tokenizer = AutoTokenizer.from_pretrained(model_path)
print(tokenizer)

def preprocess_function(examples):
    questions = examples["Q"]
    inputs = tokenizer(
        questions,
        examples["C"],
        max_length=MAX_LEN,
        truncation="only_second",
        return_offsets_mapping=True,
        padding="max_length",
    )

    offset_mapping = inputs.pop("offset_mapping")
    answers = examples["As"]
    start_positions = []
    end_positions = []

    for i, offset in enumerate(offset_mapping):
        answer = answers[i][0]
        # start_char和end_char都是context字符的offset
        start_char = answer["answer_start"]
        end_char = answer["answer_start"] + len(answer["text"])
        sequence_ids = inputs.sequence_ids(i)

        # Find the start and end of the context
        idx = 0  # idx是token的offset
        while sequence_ids[idx] != 1:
            idx += 1
        context_start = idx
        while sequence_ids[idx] == 1:
            idx += 1
        context_end = idx - 1

        # If the answer is not fully inside the context, label it (0, 0)
        if offset[context_start][0] > end_char or offset[context_end][1] < start_char:
            start_positions.append(0)
            end_positions.append(0)
        else:
            # Otherwise it's the start and end token positions
            idx = context_start  # idx是token的offset
            while idx <= context_end and offset[idx][0] <= start_char:
                idx += 1
            start_positions.append(idx - 1)

            idx = context_end
            while idx >= context_start and offset[idx][1] >= end_char:
                idx -= 1
            end_positions.append(idx + 1)

    inputs["start_positions"] = start_positions
    inputs["end_positions"] = end_positions
    return inputs

ds_train = ds_dict['train'].map(preprocess_function, batched=True, remove_columns=ds_dict["train"].column_names)
print('ds_train', ds_train)

print("ds_train[:3]", ds_train[:3])

ds_dev = ds_dict['dev'].map(preprocess_function, batched=True, remove_columns=ds_dict["train"].column_names)
print('ds_dev', ds_dev)

print("ds_dev[:3]", ds_dev[:3])


sep('Dependent parameters')
train_batch_size = BATCH_SIZE * N_GPU
print('train_batch_size', train_batch_size)
TRAIN_LEN = len(ds_train)
print('TRAIN_LEN', TRAIN_LEN)
len_dataloader = math.ceil(TRAIN_LEN / train_batch_size)
print('len_dataloader', len_dataloader)
EPOCH_STEPS = len_dataloader // GRAD_ACC  # num_update_steps_per_epoch
print('EPOCH_STEPS', EPOCH_STEPS)
STEPS_EVAL = math.floor(EPOCH_STEPS / M)
print('STEPS_EVAL:', STEPS_EVAL)
STEPS_EVAL = max(1, STEPS_EVAL)
print('STEPS_EVAL after max(1, x):', STEPS_EVAL)
STEPS_SAVE = STEPS_EVAL * N_SAVE
print('STEPS_SAVE:', STEPS_SAVE)
ALL_STEPS = math.ceil(N * EPOCH_STEPS)  # max_steps
print('ALL_STEPS', ALL_STEPS)
WARMUP_STEPS = math.ceil(ALL_STEPS * WARMUP)
print('WARMUP_STEPS', WARMUP_STEPS)
print('sleep 3')
time.sleep(3)


sep('Check the preprocessing')
regexp_spaces1 = re.compile(r'([\u4e00-\u9fa5]) ')
regexp_spaces2 = re.compile(r' ([\u4e00-\u9fa5])')

def check_answer(examples):
    input_ids = examples['input_ids']
    starts = examples['start_positions']
    ends = examples['end_positions']
    id_list = []
    for i, ids in enumerate(input_ids):
        ans = ids[starts[i]:ends[i]+1]
        # ans = tokenizer.decode(ans, skip_special_tokens=True, clean_up_tokenization_spaces=True)
        id_list.append(ans)
    answer_list = tokenizer.batch_decode(id_list, skip_special_tokens=True, clean_up_tokenization_spaces=True)
    answer_list = [ regexp_spaces1.sub(r'\1', text) for text in answer_list ]
    answer_list = [ regexp_spaces2.sub(r'\1', text) for text in answer_list ]
    return {
        'answer_text': answer_list
    }
ds_train_check = ds_train.map(check_answer, batched=True)
print('ds_train_check', ds_train_check)

for i, txt in enumerate(ds_train_check['answer_text'][:10]):
    print(i, f'|{txt}|{[ans["text"] for ans in ds_dict_["train"]["As"][i]]}|')
    
ds_dev_check = ds_dev.map(check_answer, batched=True)
print('ds_dev_check', ds_dev_check)

for i, txt in enumerate(ds_dev_check['answer_text'][:10]):
    print(i, f'|{txt}|{[ans["text"] for ans in ds_dict_["dev"]["As"][i]]}|')
    




    
    
sep('Data Collator')
data_collator = DefaultDataCollator()  # 这个校对器除了转torch张量，其实什么都没干（问答数据的校对就这样，因为我们tokenize的时候用的是padding='max_length'）
print('data_collator', data_collator)





sep('Metrics')
sep('LTP')
# model_path_ltp = "LTP/small"  # 默认 huggingface 下载，可能需要代理
# WSL
# model_path_ltp = "/home/peiyp2004/.cache/huggingface/hub/models--LTP--small/snapshots/0b3e08649fe02688112fa21e69e3eec38101fcaa"
# New PC
# model_path_ltp = "/home/yunpeng/models/hf/ltp-small/0b3e08"  # small
model_path_ltp = "/home/yunpeng/models/hf/ltp-base2/70c5701"  # base2

ltpo = LTP(model_path_ltp)  # 默认加载 Small 模型
                        # 也可以传入模型的路径，ltp = LTP("/path/to/your/model")
                        # /path/to/your/model 应当存在 config.json 和其他模型文件

# 将模型移动到 GPU 上
if torch.cuda.is_available():
    # ltpo.cuda()
    ltpo.to("cuda:1")
print('ltpo', ltpo)


def remove_punc_core(texts):
    # 使用LTP进行分词和词性标注
    words_dict = ltpo.pipeline(texts, tasks=['cws', 'pos'], return_dict=True)
    segs = words_dict.cws
    poss = words_dict.pos
    
    # 去除标点符号、助词
    results = []
    for i, seg in enumerate(segs):
        no_punctuation = [word for word, flag in zip(seg, poss[i]) if flag not in set(['wp', 'u'])]
        results.append(' '.join(no_punctuation))
    return results


def remove_punc(text):
    if text == '':
        return ''
    xmd5 = md5(text)
    st = rdb.hget(REDIS_KEY, xmd5)
    if st is not None:
        return st.decode('utf8')
    print('**** **** **** **** BROKEN-PROTOCOL **** **** **** ****', flush=True, file=sys.stderr)
    results = remove_punc_core([text])
    result = results[0]
    rdb.hset(REDIS_KEY, xmd5, result)
    return result
    
    
def normalize_answer(s):
    """处理中文文本，包括分词、词性标注、去除标点符号和多余的空格。"""

    def white_space_fix(text):
        # 去除多余的空格
        return ' '.join(text.split())

    # 对文本进行处理
    s = remove_punc(s)
    s = white_space_fix(s)
    s = s.lower()
    
    return s

print('normalize_answer', normalize_answer)


def f1_score(prediction, ground_truth):
    prediction_tokens = normalize_answer(prediction).split()
    ground_truth_tokens = normalize_answer(ground_truth).split()
    common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
    num_same = sum(common.values())
    if num_same == 0:
        return 0
    precision = 1.0 * num_same / len(prediction_tokens)
    recall = 1.0 * num_same / len(ground_truth_tokens)
    f1 = (2 * precision * recall) / (precision + recall)
    return f1


def exact_match_score(prediction, ground_truth):
    return normalize_answer(prediction) == normalize_answer(ground_truth)

def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
    scores_for_ground_truths = []
    for ground_truth in ground_truths:
        score = metric_fn(prediction, ground_truth)
        scores_for_ground_truths.append(score)
    return max(scores_for_ground_truths)
    

compute_score_text_set = set()
compute_score_text_set_new = set()


def compute_score_pass001(dataset, predictions):
    sep('compute_score_pass001')
    for p in predictions:
        compute_score_text_set.add(p)
    for i, article in enumerate(dataset):
        ground_truths = [answer['text'] for answer in article['As']]
        for g in ground_truths:
            compute_score_text_set.add(g)


def compute_score_pass002(dataset, predictions):
    sep('compute_score_pass002')
    xlist = list(compute_score_text_set)
    for text in xlist:
        if text == '':
            continue
        if rdb.hget(REDIS_KEY, md5(text)) is None:
            compute_score_text_set_new.add(text)
            
    xlist = list(compute_score_text_set_new)
    xlen = len(xlist)
    n_batch = math.ceil(xlen / BATCH_SIZE_LTPO)
    for i in range(n_batch):
        texts = xlist[(i*BATCH_SIZE_LTPO):((i+1)*BATCH_SIZE_LTPO)]
        spaced_texts = remove_punc_core(texts)
        for j, st in enumerate(spaced_texts):
            rdb.hset(REDIS_KEY, md5(texts[j]), st.encode('utf8'))


def compute_score(dataset, predictions):
    global compute_score_text_set, compute_score_text_set_new
    
    compute_score_text_set = set()
    compute_score_text_set_new = set()

    sep('compute_score')
    
    compute_score_pass001(dataset, predictions)
    compute_score_pass002(dataset, predictions)
    sep('compute_score_pass003')
    f1 = exact_match = total = 0
    for i, article in enumerate(dataset):
        total += 1

        ground_truths = [answer['text'] for answer in article['As']]
        prediction = predictions[i]
        exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
        f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)

    exact_match = 100.0 * exact_match / total
    f1 = 100.0 * f1 / total

    return {"exact_match": exact_match, "f1": f1}


def compute_metrics(eval_pred):
    pred = eval_pred[0]
    # label = eval_pred[1]
    starts = np.argmax(pred[0], axis=-1)
    ends = np.argmax(pred[1], axis=-1)

    references = ds_dict_['dev']
    
    predictions = []
    for i, ids in enumerate(ds_dev['input_ids']):
        pred_ids = ids[starts[i]:ends[i]+1]
        pred_text = tokenizer.decode(pred_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
        pred_text = regexp_spaces1.sub(r'\1', pred_text)
        pred_text = regexp_spaces2.sub(r'\1', pred_text)
        predictions.append(pred_text)
        
    score = compute_score(dataset=references, predictions=predictions)
    return score

print('compute_metrics', compute_metrics)







sep('Model')
model = AutoModelForQuestionAnswering.from_pretrained(model_path)
print(model)







sep('Configurate training')
ts = tvts.Tvts(
    TVTS_NAME,
    memo=MEMO,
    is_temp=not not TEMP,
    mongo_link=mongo,
    save_dir=SAVE_DIR,
    params = {
        'lr': LR,
        'batch_size': BATCH_SIZE,
        'epochs': N,
        'eval_steps': STEPS_EVAL,
        'save_steps': STEPS_SAVE,
        'decay': WEIGHT_DECAY,
        'train_len': len(ds_train),
        'dev_len': len(ds_dev),
    }
)
print(ts)

# https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments
training_args = TrainingArguments(
    output_dir=SAVE_DIR,
    learning_rate=LR,
    per_device_train_batch_size=BATCH_SIZE,
    per_device_eval_batch_size=BATCH_SIZE,
    gradient_accumulation_steps=GRAD_ACC,
    num_train_epochs=N,
    weight_decay=WEIGHT_DECAY,

    # "no": No evaluation is done during training.
    # "steps": Evaluation is done (and logged) every eval_steps.
    # "epoch": Evaluation is done at the end of each epoch.
    evaluation_strategy="steps",
    eval_steps=STEPS_EVAL,

    # "no": No save is done during training.
    # "epoch": Save is done at the end of each epoch.
    # "steps": Save is done every save_steps.
    save_strategy="steps",
    save_steps=STEPS_SAVE,

    # google搜索: training loss is no log
    # https://github.com/huggingface/transformers/issues/8910
    logging_steps=1,

    # load_best_model_at_end=True,

    # push_to_hub=True, # 是否发布到huggingface
    
    # split_batches=False,
    # split_batches=True,
    
    # **bf16dict,
    
    # warmup_ratio=WARMUP,
)
print('training_args', training_args)

log_callback = LogCallback(ts, logger, STEPS_EVAL, STEPS_SAVE, SAVE_DIR)

decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
    {
        "params": [
            p for n, p in model.named_parameters() if (n in decay_parameters and p.requires_grad)
        ],
        "weight_decay": WEIGHT_DECAY,
    },
    {
        "params": [
            p for n, p in model.named_parameters() if (n not in decay_parameters and p.requires_grad)
        ],
        "weight_decay": 0.0,
    },
]
optimizer_kwargs = {
    'lr': LR,
    "betas": (training_args.adam_beta1, training_args.adam_beta2),
    "eps": training_args.adam_epsilon,
}
print('optimizer_kwargs', optimizer_kwargs)
opt = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)

def lr_lambda(step):
    if not step:
        return 1.0
    if step <= WARMUP_STEPS:
        lr_rate = 1.0 / WARMUP_STEPS * step
        return lr_rate
    else:
        lr_rate = GAMMAR ** (step - WARMUP_STEPS)
        lr_rate = max(lr_rate, LR_MIN_RATE)
        return lr_rate

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=ds_train,
    eval_dataset=ds_dev,
    tokenizer=tokenizer,
    data_collator=data_collator,
    compute_metrics=compute_metrics,
    callbacks=[log_callback],
    optimizers=(opt, torch.optim.lr_scheduler.LambdaLR(opt, lr_lambda=lr_lambda, verbose=True))
)
print('trainer', trainer)





sep('Train it')
trainer.train()

sep('All over')