"""

['<|im_start|>!!!!!!!!<|im_end|>\n<|im_start|>!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!<|im_end|>\n<|im_start|>!!你应该尽早手术治疗了，首选微创腹腔镜胆囊切除术，你做过甲状腺手术，对本次手术没有影响。至于要我主刀，你可以与我的助手王维东主任联系，请与王维东医师联系：4006606120－62761。<|im_end|>\n<|im_start|>!!!!!!!!!!!<|im_end|>\n<|im_start|>!!欢迎！<|im_end|>\n<|im_start|>!!!!!!!!!!!!!!!!!!!!!!!!!<|im_end|>\n<|im_start|>!!总费用12000元.夹子或可吸收缝线均有,都是可行的.我们不用金属夹子.<|im_end|>\n!!!!!!!!!',
 '<|im_start|>!!!!!!!!<|im_end|>\n<|im_start|>!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!<|im_end|>\n<|im_start|>!!这就是浆细胞性乳腺炎，简称浆乳，必需手术治疗，但不是切开引流术啊。中药或消炎治疗，还会反复发作，几年不好，建议看看本网站浆乳的文章或。<|im_end|>\n<|im_start|>!!!!!!!!!!!!!!!!!!<|im_end|>\n<|im_start|>!!也叫导管周围炎，必需手术才能根治。<|im_end|>\n<|im_start|>!!!!!!!!!!!!!!!!!!!!!!!!!!!!!<|im_end|>\n<|im_start|>!!现在就有一位岳阳的病人住院，我们有长沙的多位患者，您说呢？像这类问题不要浪费咨询次数。<|im_end|>\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!']

"""

import sys
# sys.path.insert(0, r'C:\Users\peter\PycharmProfProj\VenvP38Tf23Pt\gitee')
sys.path.insert(0, '/home/peiyp2004/code')
from python_nlp.datasets.load_large_json import load_large_json
from PyCmpltrtok.common import sep, uuid
print('Importing transformers and Qwen ...')
from transformers import AutoConfig, GenerationConfig, AutoTokenizer
from Qwen.finetune import preprocess
print('Importing over.')


def load_tokenizer():
    print('-------------------------------------------------------')
    print('正在加载模型……')
    # model_name = "THUDM/chatglm2-6b-int4"
    # model_name = "/root/.cache/huggingface/hub/models--THUDM--chatglm2-6b-int4/snapshots/66ecaf1db3a5085714e133357ea4824b69698743"
    # model_name = "Qwen/Qwen-1_8B-Chat"
    # model_name = "/mnt/d/_dell7590_root/sync/1_usb/N1/large_sci.com.models/hf/Qwen-1_8B-Chat"  # WSL
    model_name = "/home/yunpeng/models/hf/Qwen-1_8B-Chat"  # ASUS NEW
    # model_name = "models/hf/Qwen-1_8B-Chat"  # ASUS NEW, ln -s
    
    def float_set(config, option):
        config.bf16 = False
        config.fp16 = False
        config.fp32 = False

        if option == "bf16":
            config.bf16 = True
        elif option == "fp16":
            config.fp16 = True
        elif option == "fp32":
            config.fp32 = True
        else:
            print("Invalid option. Please choose one from 'bf16', 'fp16' and 'fp32'.")
            
    config = AutoConfig.from_pretrained(
        model_name,
        trust_remote_code=True,
    )
    # NOTE: if you use the old version of model file, please remove the comments below
    # config.use_flash_attn = False
    float_set(config, "fp16")
    generation_config = GenerationConfig.from_pretrained(
        model_name, trust_remote_code=True
    )
    
    tokenizer = AutoTokenizer.from_pretrained(
        model_name, trust_remote_code=True
    )
    tokenizer.eos_token_id = config.eos_token_id
    tokenizer.bos_token_id = config.bos_token_id
    tokenizer.pad_token_id = generation_config.pad_token_id
    
    sep('tokenizer')
    print(tokenizer)
    sep()
    print('模型已经加载完毕。')
    return tokenizer


if '__main__' == __name__:

    from make_dataset_json import med_dialog_list2qwen_format

    def main(n_turn=1, num=4):
        
        tokenizer = load_tokenizer()
        
        # xfile_path = r'D:\_dell7590_root\local\LNP_datasets\med\med_dialog\MedDialog_processed\MedDialog_processed\validate_data.json'
        # xfile_path = '/mnt/d/_dell7590_root/local/LNP_datasets/med/med_dialog/MedDialog_processed/MedDialog_processed/validate_data.json'
        # xfile_path = '/home/yunpeng/download/validate_data.json'
        # xfile_path = '/home/yunpeng/datasets/med_dialog/train_data.3turn-x4.tmp.json'
        xfile_path = '/home/yunpeng/datasets/med_dialog/train_data.1turn-x4.tmp.json'
        
        i = 0
        cnt = 0
        raw_data = []
        for x in load_large_json(xfile_path, limit=1000):
            i += 1
            xlen = len(x)
            if xlen == n_turn * 2:
                sep()
                print(i, x)
                sep()
                print('\n'.join(x))
                sep()
                source = med_dialog_list2qwen_format(x)
                raw_data.append(source)
                cnt += 1
                if cnt >= num:
                    break
        sources = [example["conversations"] for example in raw_data]
        print(sources)
        max_len = 512
        data_dict = preprocess(sources, tokenizer, max_len)
        print(data_dict)
        
        data_dict['labels'][data_dict['labels'] == -100] = 0
        for i, (ids, label, mask) in enumerate(zip(data_dict['input_ids'], data_dict['labels'], data_dict['attention_mask'])):
            decoded = tokenizer.decode(ids[:(mask.long().sum() + 5)], skip_special_tokens=False)
            print(i, decoded)
            decoded02 = tokenizer.decode(label[:(mask.long().sum() + 5)], skip_special_tokens=False)
            print(i, decoded02)

    main()
