from PyCmpltrtok.common import sep
sep('import')
import logging
from PyCmpltrtok.common import sep, get_dir_name_ext
import os
from cmrc2018_bert_large_chinese_OOP_ckpt import QAZH

if '__main__' == __name__:

    def main():
        sep('Start')
        TVTS_NAME = 'cmrc2018-bert-large-zh'
        print('TVTS_NAME:', TVTS_NAME)

        sep('Logger')
        XDIR, _, _ = get_dir_name_ext(os.path.abspath(__file__))
        log_path = os.path.join(XDIR, f'{TVTS_NAME}.log')
        print('log_path', log_path)
        LOG_FORMAT = "%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(name)s: %(message)s"
        logging.basicConfig(
            level=logging.DEBUG,
            filename=log_path,
            format=LOG_FORMAT,
        )
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.DEBUG)
        print(logger)
        logger_name = "transformers.trainer"
        sep(f'logger: {logger_name}')
        [logging.getLogger(logger_name).addHandler(hd) for hd in logging.getLogger().handlers]
        logging.getLogger(logger_name).setLevel(logging.DEBUG)

        
        path2cmrc_hf = '/home/yunpeng/code_github/cmrc2018/squad-style-data/hf'  # New PC,
        
        model_path = '/home/yunpeng/checkpoints/cmrc2018-bert-large-zh/2024_11_09_19_38_34_516629_temp0/checkpoint-1260'  # New PC
        
        """
        {'eval_loss': 1.1583034992218018, 'eval_model_preparation_time': 0.005, 'eval_exact_match': 61.01273687480584, 'eval_f1': 77.44823305384921, 'eval_runtime': 35.9364, 'eval_samples_per_second': 89.575, 'eval_steps_per_second': 2.811}
        """
        
        # model_path_ltp = "LTP/small"  # 默认 huggingface 下载，可能需要代理
        # WSL
        # model_path_ltp = "/home/peiyp2004/.cache/huggingface/hub/models--LTP--small/snapshots/0b3e08649fe02688112fa21e69e3eec38101fcaa"
        # New PC
        # model_path_ltp = "/home/yunpeng/models/hf/ltp-small/0b3e08"  # small
        model_path_ltp = "/home/yunpeng/models/hf/ltp-base2/70c5701"  # base2
        
        MEMO = 'OOP code with bert-large-zh Evaluation'
        is_temp = 0  # <----------------------
        
        LR = 2.5e-5 / 2  # 学习率
        if 0:
            # LR_TYPE='warm_up_constant'
            LR_TYPE='warm_up_gammar'
        else:
            LR_TYPE=None
            
        LR_MIN_RATE = 0.33
        GAMMAR = 0.99
        
        if is_temp:
            warm_up = 0.5
            M = 2
            N = 2
        else:
            warm_up = 0.1
            M = 4
            N = 2
        
        qazh = QAZH(
            only_eval=True,  # <----------------------
            force_continue=False,  # <----------------------
            TVTS_NAME=TVTS_NAME,
            path2cmrc_hf=path2cmrc_hf,
            model_path=model_path,
            model_path_ltp=model_path_ltp,
            TEMP=is_temp,
            TEMP_LEN=64,
            TEMP_LEN_DEV=64,
            BATCH_SIZE=4,
            BATCH_SIZE_TGT=8,
            GRAD_ACC=1,
            BATCH_SIZE_EVAL=32,
            BATCH_SIZE_LTPO=32,
            WARM_UP=warm_up,
            logger=logger,
            MEMO=MEMO,
            M=M,
            N=N,
            LR=LR,
            LR_TYPE=LR_TYPE,
            LR_MIN_RATE=LR_MIN_RATE,
            GAMMAR=GAMMAR,
        )
        qazh.init()
        qazh.check_preprocess()
        print('LTPO DEV', qazh.LTPO_DEV_STR)
        qazh.is_interrupt()
        qazh.start_eval()
        
    
    main()
    sep('All over')
