from PyCmpltrtok.common import sep
sep('import')
import logging
from PyCmpltrtok.auth.mongo.conn import conn
from PyCmpltrtok.common import sep, rand_name_on_now, md5, get_dir_name_ext
from PyCmpltrtok.util_mongo import mongo_get
import tvts.tvts as tvts
import os
import datasets
import os
import sys
import time
import functools
import redis
from transformers import AutoConfig
from transformers import AutoModelForQuestionAnswering
from transformers import AutoTokenizer
import re
from transformers import DefaultDataCollator
import torch
from ltp import LTP
from collections import Counter
import numpy as np
import math
from transformers import Trainer
from transformers import TrainingArguments
from PyCmpltrtok.common_hf import LogCallback
from transformers.optimization import AdamW
from transformers.trainer_pt_utils import get_parameter_names
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS


class QAZH():

    def __init__(
        self,
        TVTS_NAME,
        path2cmrc_hf,
        model_path,
        model_path_ltp,
        force_continue=False,
        TEMP=1,
        TEMP_LEN=64,
        TEMP_LEN_DEV=64,
        logger=logging.getLogger(),
        BATCH_SIZE=4,
        BATCH_SIZE_TGT=None,
        GRAD_ACC=None,
        BATCH_SIZE_EVAL=None,
        BATCH_SIZE_LTPO=16,
        LTPO_IDX=None,
        LR=None,
        LR_MIN_RATE=None,
        GAMMAR=None,
        LR_TYPE=None,
        N=None,
        M=None,
        MEMO='',
        WARM_UP=0.0,
    ) -> None:
        sep('Key arguments')
        self.TVTS_NAME = TVTS_NAME
        self.path2cmrc_hf = path2cmrc_hf
        self.model_path = model_path
        self.model_path_ltp = model_path_ltp
        self.force_continue = force_continue
        print('TVTS_NAME', TVTS_NAME)
        print('paht( of CMRC2018 flatten JSONs):', self.path2cmrc_hf)
        print('model', self.model_path)
        print('LTP', self.model_path_ltp)
        print('self.force_continue', self.force_continue)
                
        self.TEMP = TEMP
        self.TEMP_LEN = TEMP_LEN
        self.TEMP_LEN_DEV = TEMP_LEN_DEV
        print('self.TEMP:', self.TEMP)
        print('TEMP_LEN:', TEMP_LEN)
        print('TEMP_LEN_DEV:', TEMP_LEN_DEV)

        sep('Hyper')
        self.logger = logger
        self.REDIS_KEY = 'ltpo_t2st'  # LTP object for text to spaced-text
        self.BATCH_SIZE = BATCH_SIZE
        self.BATCH_SIZE_LTPO = BATCH_SIZE_LTPO
        self.LTPO_IDX = LTPO_IDX
        self.N_SAVE = 1
        self.N_GPU = torch.cuda.device_count()
        print('self.REDIS_KEY', self.REDIS_KEY)
        print('self.BATCH_SIZE', self.BATCH_SIZE)
        if BATCH_SIZE_EVAL is None:
            self.BATCH_SIZE_EVAL = self.BATCH_SIZE
        else:
            self.BATCH_SIZE_EVAL = BATCH_SIZE_EVAL
        print('self.BATCH_SIZE_EVAL', self.BATCH_SIZE_EVAL)
        print('self.BATCH_SIZE_LTPO', self.BATCH_SIZE_LTPO)
        print('self.LTPO_IDX', self.LTPO_IDX)
        print('self.N_SAVE', self.N_SAVE)
        print('self.N_GPU', self.N_GPU)

        if BATCH_SIZE_TGT is None:
            print('You must provide BATCH_SIZE_TGT.', file=sys.stderr, flush=True)
            sys.exit(1)
        self.BATCH_SIZE_TGT = BATCH_SIZE_TGT
        self.N = N
        self.M = M
        self.GRAD_ACC = GRAD_ACC
        print('self.BATCH_SIZE_TGT', self.BATCH_SIZE_TGT)
        print('self.GRAD_ACC', self.GRAD_ACC)
        print('self.N', self.N)
        print('self.M', self.M)
        effective_batch_size = self.BATCH_SIZE * self.N_GPU * self.GRAD_ACC
        if self.BATCH_SIZE_TGT != effective_batch_size:
            print(f"self.BATCH_SIZE_TGT != effective_batch_size: {self.BATCH_SIZE_TGT} != {effective_batch_size}", file=sys.stderr, flush=True)
            sys.exit(1)
                    
        self.WEIGHT_DECAY = 0.01
        self.LR = LR
        self.LR_TYPE = LR_TYPE
        self.LR_MIN_RATE = LR_MIN_RATE
        self.GAMMAR = GAMMAR
        self.WARM_UP = WARM_UP
        print('self.WEIGHT_DECAY', self.WEIGHT_DECAY)
        print('self.LR', self.LR)
        print('self.LR_TYPE', self.LR_TYPE)
        print('self.LR_MIN_RATE', self.LR_MIN_RATE)
        print('self.GAMMAR', self.GAMMAR)
        print('self.WARM_UP', self.WARM_UP)
        
        sep('TVTS parameters')
        sep('Link to mongodb and test')
        self.mongo = conn('local')
        mdb = self.mongo['tvts']
        mongo_get(mdb, 'test_tbl', 'test_user', 'test_key')
        print('Mongo link test passed')
        save_name = f'{rand_name_on_now()}_temp{self.TEMP}'
        if self.TEMP:
            save_name += f'_train{TEMP_LEN}_dev{TEMP_LEN_DEV}'
        self.SAVE_DIR = os.path.join('/home/yunpeng/checkpoints', self.TVTS_NAME, save_name)
        self.MEMO = MEMO
        print('self.SAVE_DIR:', self.SAVE_DIR)
        print('self.MEMO', self.MEMO)
        print(f'python tvts.py --link local -m "loss|eval_loss,eval_exact_match,eval_f1" --batch_metrics "loss" -k "eval_f1" --hyper "learning_rate" "{self.TVTS_NAME}"')

        self.regexp_spaces1 = re.compile(r'([\u4e00-\u9fa5]) ')
        self.regexp_spaces2 = re.compile(r' ([\u4e00-\u9fa5])')
        self.type2lambda = {
            'warm_up_gammar': self.lr_lambda_method_warm_up_gammar,
            'warm_up_constant': self.lr_lambda_method_warm_up_constant,
            'constant': self.lr_lambda_method_constant,
        }
        self.lr_lambda_method = self.type2lambda[self.LR_TYPE]
        self.is_interrupt()
        
    def is_interrupt(self):
        print('Is this OK? (Y/N): ')
        xinput = input().strip().lower()
        if 'y' != xinput:
            print("Let's stop it.")
            sys.exit(0)
        
    def init(self):
        sep('Init')
        
        sep('Load data')
        self.ds_dict = datasets.DatasetDict.load_from_disk(self.path2cmrc_hf)
        self.ds_dict = self.ds_dict.shuffle(seeds=1)
        self.ds_dict_ = self.ds_dict
        print(self.ds_dict)

        if self.TEMP:
            sep('Limit dataset for self.TEMP')
            self.ds_dict['train'] = self.ds_dict['train'].select(range(self.TEMP_LEN))
            if self.TEMP_LEN_DEV is not None:
                self.ds_dict['dev'] = self.ds_dict['dev'].select(range(self.TEMP_LEN_DEV))
        print(self.ds_dict)

        sep('Preprocess')
        sep('Config')
        self.conf = AutoConfig.from_pretrained(
            self.model_path,
            trust_remote_code=True,
        )
        print(self.conf)
        self.MAX_LEN = self.conf.max_position_embeddings
        print('self.MAX_LEN', self.MAX_LEN)
        sep('self.tokenizer')
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
        print(self.tokenizer)

        def preprocess_function(examples):
            questions = examples["Q"]
            inputs = self.tokenizer(
                questions,
                examples["C"],
                max_length=self.MAX_LEN,
                truncation="only_second",
                return_offsets_mapping=True,
                padding="max_length",
            )

            offset_mapping = inputs.pop("offset_mapping")
            answers = examples["As"]
            start_positions = []
            end_positions = []

            for i, offset in enumerate(offset_mapping):
                answer = answers[i][0]
                # start_char和end_char都是context字符的offset
                start_char = answer["answer_start"]
                end_char = answer["answer_start"] + len(answer["text"])
                sequence_ids = inputs.sequence_ids(i)

                # Find the start and end of the context
                idx = 0  # idx是token的offset
                while sequence_ids[idx] != 1:
                    idx += 1
                context_start = idx
                while sequence_ids[idx] == 1:
                    idx += 1
                context_end = idx - 1

                # If the answer is not fully inside the context, label it (0, 0)
                if offset[context_start][0] > end_char or offset[context_end][1] < start_char:
                    start_positions.append(0)
                    end_positions.append(0)
                else:
                    # Otherwise it's the start and end token positions
                    idx = context_start  # idx是token的offset
                    while idx <= context_end and offset[idx][0] <= start_char:
                        idx += 1
                    start_positions.append(idx - 1)

                    idx = context_end
                    while idx >= context_start and offset[idx][1] >= end_char:
                        idx -= 1
                    end_positions.append(idx + 1)

            inputs["start_positions"] = start_positions
            inputs["end_positions"] = end_positions
            return inputs

        self.ds_train = self.ds_dict['train'].map(preprocess_function, batched=True, remove_columns=self.ds_dict["train"].column_names)
        print('self.ds_train', self.ds_train)

        self.ds_dev = self.ds_dict['dev'].map(preprocess_function, batched=True, remove_columns=self.ds_dict["train"].column_names)
        print('self.ds_dev', self.ds_dev)
        
        sep('Dependent parameters')
        print('self.MAX_LEN', self.MAX_LEN)
        train_batch_size = self.BATCH_SIZE * self.N_GPU
        print('train_batch_size', train_batch_size)
        TRAIN_LEN = len(self.ds_train)
        print('TRAIN_LEN', TRAIN_LEN)
        len_dataloader = math.ceil(TRAIN_LEN / train_batch_size)
        print('len_dataloader', len_dataloader)
        EPOCH_STEPS = len_dataloader // self.GRAD_ACC  # num_update_steps_per_epoch
        print('EPOCH_STEPS', EPOCH_STEPS)
        STEPS_EVAL = math.floor(EPOCH_STEPS / self.M)
        print('STEPS_EVAL:', STEPS_EVAL)
        self.STEPS_EVAL = max(1, STEPS_EVAL)
        print('self.STEPS_EVAL after max(1, x):', self.STEPS_EVAL)
        self.STEPS_SAVE = STEPS_EVAL * self.N_SAVE
        print('self.STEPS_SAVE:', self.STEPS_SAVE)
        self.ALL_STEPS = math.ceil(self.N * EPOCH_STEPS)  # max_steps
        print('self.ALL_STEPS', self.ALL_STEPS)
        self.WARM_UP_STEPS = math.ceil(self.ALL_STEPS * self.WARM_UP)
        print('self.WARM_UP_STEPS', self.WARM_UP_STEPS)
        self.EVAL_STEPS = math.ceil(len(self.ds_dev) / (self.BATCH_SIZE_EVAL * self.N_GPU))
        print('self.EVAL_STEPS', self.EVAL_STEPS)
        self.is_interrupt()
        
        sep('Redis')
        print('Connecting to redis ...')
        self.rdb = redis.Redis('127.0.0.1', 6379, 0, password='lgdz4qEdt/ezElyQnXFYXB80iM3OxEbAWRjMFPcIXH5ni6eQ8QOlfp7G7gvV1svPu2Bv7v')
        self.rdb.get('test')
        print('Connected to redis.')
        
        sep('Data Collator')
        self.data_collator = DefaultDataCollator()  # 这个校对器除了转torch张量，其实什么都没干（问答数据的校对就这样，因为我们tokenize的时候用的是padding='max_length'）
        print('data_collator', self.data_collator)
        
        sep('Metrics')
        sep('LTP')
        self.ltpo = LTP(self.model_path_ltp)
                                # 也可以传入模型的路径，ltp = LTP("/path/to/your/model")
                                # /path/to/your/model 应当存在 config.json 和其他模型文件

        # 将模型移动到 GPU 上
        if torch.cuda.is_available():
            # ltpo.cuda()
            if self.LTPO_IDX is None:
                if self.N_GPU > 1:
                    dev_str = "cuda:1"
                else:
                    dev_str = "cuda:0"
            else:
                dev_str = f"cuda:{self.LTPO_IDX}"
            self.LTPO_DEV_STR = dev_str
            print(f'Putting LTP to {self.LTPO_DEV_STR}')
            self.ltpo.to(self.LTPO_DEV_STR)
        # print('ltpo', self.ltpo)

        # compute_metrics = functools.partial(self.compute_metrics_method, self=self)
        # print('compute_metrics', compute_metrics)        
        
        sep('Model')
        self.model = AutoModelForQuestionAnswering.from_pretrained(self.model_path)
        # print(self.model)
        if self.force_continue:
            for param in self.model.parameters(): 
                param.data = param.data.contiguous()

        
        sep('Configurate training')
        self.ts = tvts.Tvts(
            self.TVTS_NAME,
            memo=self.MEMO,
            is_temp=not not self.TEMP,
            mongo_link=self.mongo,
            save_dir=self.SAVE_DIR,
            params = {
                'lr': self.LR,
                'lr_type': self.LR_TYPE,
                'gammar': self.GAMMAR,
                'batch_size': self.BATCH_SIZE,
                'n_gpu': self.N_GPU,
                'grad_acc': self.GRAD_ACC,
                'batch_size_tgt': self.BATCH_SIZE_TGT,
                'batch_size_eval': self.BATCH_SIZE_EVAL,
                'epochs': self.N,
                'M': self.M,
                'epoch_steps': EPOCH_STEPS,
                'eval_steps': self.STEPS_EVAL,
                'save_steps': self.STEPS_SAVE,
                'decay': self.WEIGHT_DECAY,
                'train_len': len(self.ds_train),
                'dev_len': len(self.ds_dev),
                'all_steps': self.ALL_STEPS,
                'eval_steps': self.EVAL_STEPS,
                'warm_up': self.WARM_UP,
                'warm_up_steps': self.WARM_UP_STEPS,
            }
        )
        print(self.ts)

        bf16dict = {
            # ValueError: Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0
            'bf16': True,
        }

        # https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments
        self.training_args = TrainingArguments(
            output_dir=self.SAVE_DIR,
            learning_rate=self.LR,
            per_device_train_batch_size=self.BATCH_SIZE,
            per_device_eval_batch_size=self.BATCH_SIZE_EVAL,
            gradient_accumulation_steps=self.GRAD_ACC,
            num_train_epochs=self.N,
            weight_decay=self.WEIGHT_DECAY,

            # "no": No evaluation is done during training.
            # "steps": Evaluation is done (and logged) every eval_steps.
            # "epoch": Evaluation is done at the end of each epoch.
            evaluation_strategy="steps",
            eval_steps=self.STEPS_EVAL,

            # "no": No save is done during training.
            # "epoch": Save is done at the end of each epoch.
            # "steps": Save is done every save_steps.
            save_strategy="steps",
            save_steps=self.STEPS_SAVE,

            # google搜索: training loss is no log
            # https://github.com/huggingface/transformers/issues/8910
            logging_steps=1,

            # load_best_model_at_end=True,

            # push_to_hub=True, # 是否发布到huggingface
            
            # split_batches=False,
            # split_batches=True,
            
            **bf16dict,
            
            # warm_up_ratio=self.WARM_UP,
            
            report_to=['tensorboard', 'wandb', ]
        )
        print('training_args', self.training_args)
        
        self.log_callback = LogCallback(self.ts, self.logger, self.STEPS_EVAL, self.STEPS_SAVE, self.SAVE_DIR)

        decay_parameters = get_parameter_names(self.model, ALL_LAYERNORM_LAYERS)
        decay_parameters = [name for name in decay_parameters if "bias" not in name]
        optimizer_grouped_parameters = [
            {
                "params": [
                    p for n, p in self.model.named_parameters() if (n in decay_parameters and p.requires_grad)
                ],
                "weight_decay": self.WEIGHT_DECAY,
            },
            {
                "params": [
                    p for n, p in self.model.named_parameters() if (n not in decay_parameters and p.requires_grad)
                ],
                "weight_decay": 0.0,
            },
        ]
        optimizer_kwargs = {
            'lr': self.LR,
            "betas": (self.training_args.adam_beta1, self.training_args.adam_beta2),
            "eps": self.training_args.adam_epsilon,
        }
        print('optimizer_kwargs', optimizer_kwargs)
        self.opt = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)

        # lr_lambda = functools.partial(self.lr_lambda_method, self=self)
        
        self.trainer = Trainer(
            model=self.model,
            args=self.training_args,
            train_dataset=self.ds_train,
            eval_dataset=self.ds_dev,
            tokenizer=self.tokenizer,
            data_collator=self.data_collator,
            compute_metrics=self.compute_metrics_method,
            callbacks=[self.log_callback],
            optimizers=(self.opt, torch.optim.lr_scheduler.LambdaLR(self.opt, lr_lambda=self.lr_lambda_method, verbose=True))
        )
        print('trainer', self.trainer)

    def start_training(self):
        sep('Train it')
        self.trainer.train()

    def lr_lambda_method_warm_up_gammar(self, step):
        if not step:
            return 1.0
        if step <= self.WARM_UP_STEPS:
            lr_rate = 1.0 / self.WARM_UP_STEPS * step
            return lr_rate
        else:
            lr_rate = self.GAMMAR ** (step - self.WARM_UP_STEPS)
            lr_rate = max(lr_rate, self.LR_MIN_RATE)
            return lr_rate
        
    def lr_lambda_method_warm_up_constant(self, step):
        if not step:
            return 1.0
        if step <= self.WARM_UP_STEPS:
            lr_rate = 1.0 / self.WARM_UP_STEPS * step
            return lr_rate
        else:
            return 1.0
        
    def lr_lambda_method_constant(self, step):
        return 1.0

    def remove_punc_core(self, texts):
        # 使用LTP进行分词和词性标注
        words_dict = self.ltpo.pipeline(texts, tasks=['cws', 'pos'], return_dict=True)
        segs = words_dict.cws
        poss = words_dict.pos
        
        # 去除标点符号、助词
        results = []
        for i, seg in enumerate(segs):
            no_punctuation = [word for word, flag in zip(seg, poss[i]) if flag not in set(['wp', 'u'])]
            results.append(' '.join(no_punctuation))
        return results


    def remove_punc(self, text):
        if text == '':
            return ''
        xmd5 = md5(text)
        st = self.rdb.hget(self.REDIS_KEY, xmd5)
        if st is not None:
            return st.decode('utf8')
        print('**** **** **** **** BROKEN-PROTOCOL **** **** **** ****', flush=True, file=sys.stderr)
        results = self.remove_punc_core([text])
        result = results[0]
        self.rdb.hset(self.REDIS_KEY, xmd5, result)
        return result
        
    def normalize_answer(self, s):
        """处理中文文本，包括分词、词性标注、去除标点符号和多余的空格。"""

        def white_space_fix(text):
            # 去除多余的空格
            return ' '.join(text.split())

        # 对文本进行处理
        s = self.remove_punc(s)
        s = white_space_fix(s)
        s = s.lower()
        
        return s

    def f1_score(self, prediction, ground_truth):
        prediction_tokens = self.normalize_answer(prediction).split()
        ground_truth_tokens = self.normalize_answer(ground_truth).split()
        cp = Counter(prediction_tokens)
        cg = Counter(ground_truth_tokens)
        common = cp & cg
        num_same = sum(common.values())
        if num_same == 0:
            return 0
        precision = 1.0 * num_same / len(prediction_tokens)
        recall = 1.0 * num_same / len(ground_truth_tokens)
        f1 = (2 * precision * recall) / (precision + recall)
        return f1

    def exact_match_score(self, prediction, ground_truth):
        p = self.normalize_answer(prediction)
        g = self.normalize_answer(ground_truth)
        return p == g

    def metric_max_over_ground_truths(self, metric_fn, prediction, ground_truths):
        scores_for_ground_truths = []
        for ground_truth in ground_truths:
            score = metric_fn(prediction, ground_truth)
            scores_for_ground_truths.append(score)
        return max(scores_for_ground_truths)
        
    def compute_score_pass001(self, dataset, predictions):
        sep('compute_score_pass001')
        for p in predictions:
            self.compute_score_text_set.add(p)
        for i, article in enumerate(dataset):
            ground_truths = [answer['text'] for answer in article['As']]
            for g in ground_truths:
                self.compute_score_text_set.add(g)

    def compute_score_pass002(self, dataset, predictions):
        sep('compute_score_pass002')
        xlist = list(self.compute_score_text_set)
        for text in xlist:
            if text == '':
                continue
            if self.rdb.hget(self.REDIS_KEY, md5(text)) is None:
                self.compute_score_text_set_new.add(text)
                
        xlist = list(self.compute_score_text_set_new)
        xlen = len(xlist)
        n_batch = math.ceil(xlen / self.BATCH_SIZE_LTPO)
        for i in range(n_batch):
            texts = xlist[(i*self.BATCH_SIZE_LTPO):((i+1)*self.BATCH_SIZE_LTPO)]
            spaced_texts = self.remove_punc_core(texts)
            for j, st in enumerate(spaced_texts):
                self.rdb.hset(self.REDIS_KEY, md5(texts[j]), st.encode('utf8'))


    def compute_score(self, dataset, predictions):
        self.compute_score_text_set = set()
        self.compute_score_text_set_new = set()

        sep('compute_score')
        
        self.compute_score_pass001(dataset, predictions)
        self.compute_score_pass002(dataset, predictions)
        sep('compute_score_pass003')
        f1 = exact_match = total = 0
        for i, article in enumerate(dataset):
            total += 1

            ground_truths = [answer['text'] for answer in article['As']]
            prediction = predictions[i]
            exact_match += self.metric_max_over_ground_truths(self.exact_match_score, prediction, ground_truths)
            f1 += self.metric_max_over_ground_truths(self.f1_score, prediction, ground_truths)

        exact_match = 100.0 * exact_match / total
        f1 = 100.0 * f1 / total

        return {"exact_match": exact_match, "f1": f1}

    def compute_metrics_method(self, eval_pred):
        pred = eval_pred[0]
        # label = eval_pred[1]
        starts = np.argmax(pred[0], axis=-1)
        ends = np.argmax(pred[1], axis=-1)

        references = self.ds_dict_['dev']
        
        predictions = []
        for i, ids in enumerate(self.ds_dev['input_ids']):
            pred_ids = ids[starts[i]:ends[i]+1]
            pred_text = self.tokenizer.decode(pred_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
            pred_text = self.regexp_spaces1.sub(r'\1', pred_text)
            pred_text = self.regexp_spaces2.sub(r'\1', pred_text)
            predictions.append(pred_text)
            
        score = self.compute_score(dataset=references, predictions=predictions)
        return score

    def check_preprocess(self):
        sep('Check the preprocessing')

        sep('Data before preprocess')
        print("self.ds_dict_['train'][:3]", self.ds_dict_['train'][:3])
        print("self.ds_dict_['dev'][:3]", self.ds_dict_['dev'][:3])

        sep('Data after preprocess')
        print("self.ds_train[:3]", self.ds_train[:3])
        print("self.ds_dev[:3]", self.ds_dev[:3])

        sep('Compare answers')

        def check_answer(examples):
            input_ids = examples['input_ids']
            starts = examples['start_positions']
            ends = examples['end_positions']
            id_list = []
            for i, ids in enumerate(input_ids):
                ans = ids[starts[i]:ends[i]+1]
                # ans = self.tokenizer.decode(ans, skip_special_tokens=True, clean_up_tokenization_spaces=True)
                id_list.append(ans)
            answer_list = self.tokenizer.batch_decode(id_list, skip_special_tokens=True, clean_up_tokenization_spaces=True)
            answer_list = [ self.regexp_spaces1.sub(r'\1', text) for text in answer_list ]
            answer_list = [ self.regexp_spaces2.sub(r'\1', text) for text in answer_list ]
            return {
                'answer_text': answer_list
            }
            
        self.ds_train_check = self.ds_train.map(check_answer, batched=True)
        print('ds_train_check', self.ds_train_check)

        self.ds_dev_check = self.ds_dev.map(check_answer, batched=True)
        print('ds_dev_check', self.ds_dev_check)

        sep('check training data')
        for i, txt in enumerate(self.ds_train_check['answer_text'][:10]):
            print(i, f'|{txt}|{[ans["text"] for ans in self.ds_dict_["train"]["As"][i]]}|')
            
        sep('check dev data')
        for i, txt in enumerate(self.ds_dev_check['answer_text'][:10]):
            print(i, f'|{txt}|{[ans["text"] for ans in self.ds_dict_["dev"]["As"][i]]}|')


if '__main__' == __name__:

    def main():
        sep('Start')
        TVTS_NAME = 'cmrc2018-bert-large-zh'
        print('TVTS_NAME:', TVTS_NAME)

        sep('Logger')
        XDIR, _, _ = get_dir_name_ext(os.path.abspath(__file__))
        log_path = os.path.join(XDIR, f'{TVTS_NAME}.log')
        print('log_path', log_path)
        LOG_FORMAT = "%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(name)s: %(message)s"
        logging.basicConfig(
            level=logging.DEBUG,
            filename=log_path,
            format=LOG_FORMAT,
        )
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.DEBUG)
        print(logger)
        logger_name = "transformers.trainer"
        sep(f'logger: {logger_name}')
        [logging.getLogger(logger_name).addHandler(hd) for hd in logging.getLogger().handlers]
        logging.getLogger(logger_name).setLevel(logging.DEBUG)

        
        path2cmrc_hf = '/home/yunpeng/code_github/cmrc2018/squad-style-data/hf'  # New PC,
        
        model_path = '/home/yunpeng/models/hf/bert-large-chinese/cde4a45'  # New PC
        
        # model_path_ltp = "LTP/small"  # 默认 huggingface 下载，可能需要代理
        # WSL
        # model_path_ltp = "/home/peiyp2004/.cache/huggingface/hub/models--LTP--small/snapshots/0b3e08649fe02688112fa21e69e3eec38101fcaa"
        # New PC
        # model_path_ltp = "/home/yunpeng/models/hf/ltp-small/0b3e08"  # small
        model_path_ltp = "/home/yunpeng/models/hf/ltp-base2/70c5701"  # base2
        
        is_temp = 1
        MEMO = 'OOP code: batch 16, lr 2.5e-5 / 2, bf16'
        LR = 2.5e-5 / 2  # 学习率
        LR_TYPE='warm_up_constant'
        LR_MIN_RATE = 0.33
        GAMMAR = 0.99
        if is_temp:
            warm_up = 0.5
            M = 2
            N = 2
        else:
            warm_up = 0.1
            M = 5
            N = 2
            
        qazh = QAZH(
            TVTS_NAME=TVTS_NAME,
            path2cmrc_hf=path2cmrc_hf,
            model_path=model_path,
            model_path_ltp=model_path_ltp,
            LTPO_IDX=0,
            TEMP=is_temp,
            TEMP_LEN=300,
            TEMP_LEN_DEV=300,
            BATCH_SIZE=8,
            BATCH_SIZE_TGT=16,
            GRAD_ACC=1,
            BATCH_SIZE_EVAL=16,
            BATCH_SIZE_LTPO=16,
            LR=LR,
            LR_MIN_RATE=LR_MIN_RATE,
            GAMMAR=GAMMAR,
            LR_TYPE=LR_TYPE,
            WARM_UP=warm_up,
            N=N,
            M=M,
            logger=logger,
            MEMO=MEMO,
        )
        qazh.init()
        qazh.check_preprocess()
        print('LTPO DEV', qazh.LTPO_DEV_STR)
        qazh.is_interrupt()
        qazh.start_training()
        
    
    main()
    sep('All over')
