import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from gxl_ai_utils.utils import utils_file
import os
# os.environ['HF_ENDPOINT']="https://hf-mirror.com"  # 在命令行里面加入就可以了，别的代码完全不用动
# export HF_ENDPOINT=https://hf-mirror.com
#export HF_HOME=/mnt/sfs/asr/ckpt
#export TRANSFORMERS_CACHE=/mnt/sfs/asr/ckpt

# 加载 tokenizer 和模型，并指定 cache_dir 保存模型文件
# model_path = "/home/work_nfs15/asr_data/ckpt/Phi-3.5-mini-instruct/models--microsoft--Phi-3.5-mini-instruct/snapshots/af0dfb8029e8a74545d0736d30cb6b58d2f0f3f0"
# model_path ="/mnt/sfs/.cache/huggingface/hub/models--Qwen--Qwen2-7B/snapshots/453ed1575b739b5b03ce3758b23befdb0967f40e"
# model_path = "/mnt/sfs/asr/env/.cache/transformers/models--Qwen--Qwen2.5-7B-Instruct-1M/models--Qwen--Qwen2.5-7B-Instruct-1M/snapshots/e28526f7bb80e2a9c8af03b831a9af3812f18fba"
model_path = "/mnt/sfs/asr/env/.cache/transformers/models--Qwen--Qwen2.5-3B-Instruct/snapshots/aa8e72537993ba99e69dfaafa59ed015b17504d1"
# 7B-2.5-instruct  : bos:151643 , eos:151645, 词表： 152064
# text: <|endoftext|>,id: tensor([[151643]], device='npu:6')
# id: 151643,text: <|endoftext|>
# id: 151645,text: <|im_end|>
# 3B  : eos: bos:151643 , eos:151645,  词表： 151936
# text: <|endoftext|>,id: tensor([[151643]], device='npu:6')
# id: 151643,text: <|endoftext|>
# id: 151645,text: <|im_end|>
# model_path = "Qwen/Qwen2.5-3B-Instruct"
# model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True,torch_dtype=torch.bfloat16,)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True,)
# device = torch.device("npu:6")
# model
# print(model)

def chat(input_q_text):
    prompt = input_q_text
    # messages = [
    #     {"role": "system", "content": "You are a helpful assistant."},
    #     {"role": "user", "content": prompt}
    # ]
    messages = [
        {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    # print(f'text: {text}')
    print(f'text repr: {repr(text)}')
    model_inputs = tokenizer([text], return_tensors="pt")
    print(f'model_inputs: {model_inputs.input_ids}')

    generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=512
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    print(f'generated_ids: {generated_ids}')

    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return response

# 获取词表大小
# vocab_size = model.lm_head.weight.shape[1]
# print(f"词表大小: {vocab_size}")
# vocab_size = model.lm_head.weight.shape[0]
# print(f"词表大小: {vocab_size}")

# 获取EOS的ID
eos_token_id = tokenizer.eos_token_id
print(f"EOS的ID: {eos_token_id}")

text = "<|endoftext|>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")

text = "&"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "sfs&erwe&哈哈&试点范围方式&《sfs &fse <&><&&>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")

id = 151643
text = tokenizer.decode(id)
print(f"id: {id},text: {text}")
id = 151645
text = tokenizer.decode(id)
print(f"id: {id},text: {text}")
text ="<"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text =">"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<CHILD>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<ADULT>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<MALE>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "我是更重撒飞洒胜多负少胜多负少是的是的<FEMALE>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")

text = "我是更重撒飞洒胜多负少胜多负少是的是的<&&>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")

text = "我是更重撒飞洒胜多负少胜多负少是的是的<&>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")


print('===================================================')
text ="<"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text =">"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<CHILD>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<ADULT>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<OLD>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")


text = "你好歹撒发生阿萨是否<CHILD>首发式发生"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "后三段飞洒发啊水水<ADULT>发生发撒"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "手动阀十分啊<OLD>撒法发撒"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")


text = "hello world<CHILD>xxix"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "hwfsdf<ADULT>sfdds"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "hello<OLD>sf撒发生"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")


text = ",年龄为:<CHILD>xxix"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = ",年龄为:<ADULT>sfdds"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = ",年龄为:<OLD>sf撒发生"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = ":<"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text=",年龄为:<think end>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<think end>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "think end>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")

text=",年龄为:<ADULT>,"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = ",年龄为:<ADULT>,"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")

text = "<think>用户说的话是:日本和中国之间发生过战争吗?,年龄为:<ADULT>,性别为:<MALE>,风格为:<日常口语>,情感为:<NEUTRAL>,声音事件为:<OTHER>,推测使用的回复情感为:<NEUTRAL>,我应该综合用户的语义和副语言信息给出专业且对应的回答<think end>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<think>用户说的话是:日本和中国之间发生过战争吗? ,年龄为:<ADULT>,性别为:<MALE>,风格为:<日常口语>,情感为:<NEUTRAL>,声音事件为:<OTHER>,推测使用的回复情感为:<NEUTRAL>,我应该综合用户的语义和副语言信息给出专业且对应的回答<think end>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<think>用户说的话是:日本和中国之间发生过战争吗?.,年龄为:<ADULT>,性别为:<MALE>,风格为:<日常口语>,情感为:<NEUTRAL>,声音事件为:<OTHER>,推测使用的回复情感为:<NEUTRAL>,我应该综合用户的语义和副语言信息给出专业且对应的回答<think end>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
text = "<think>用户说的话是:日本和中国之间发生过战争吗?. ,年龄为:<ADULT>,性别为:<MALE>,风格为:<日常口语>,情感为:<NEUTRAL>,声音事件为:<OTHER>,推测使用的回复情感为:<NEUTRAL>,我应该综合用户的语义和副语言信息给出专业且对应的回答<think end>"
id = tokenizer([text], return_tensors="pt").input_ids
print(f"text: {text},id: {id}")
"""
text: <,id: tensor([[27]])
text: >,id: tensor([[29]])
text: <CHILD>,id: tensor([[   27, 99119,    29]])
text: <ADULT>,id: tensor([[  27, 1808, 3532,   29]])
text: <MALE>,id: tensor([[  27, 4835,  867,   29]])
text: 我是更重撒飞洒胜多负少胜多负少是的是的<FEMALE>,id: tensor([[104198,  33126,  29258, 104856,  99723, 102737,  99813,  42140,  99393,
          82647,  99813,  42140,  99393,  82647,  20412, 100146,   9370,  30499,
          88299,     29]])
text: 我是更重撒飞洒胜多负少胜多负少是的是的<&&>,id: tensor([[104198,  33126,  29258, 104856,  99723, 102737,  99813,  42140,  99393,
          82647,  99813,  42140,  99393,  82647,  20412, 100146,   9370,     27,
           7672,     29]])
text: 我是更重撒飞洒胜多负少胜多负少是的是的<&>,id: tensor([[104198,  33126,  29258, 104856,  99723, 102737,  99813,  42140,  99393,
          82647,  99813,  42140,  99393,  82647,  20412, 100146,   9370,  52244,
             29]])
===================================================
text: <,id: tensor([[27]])
text: >,id: tensor([[29]])
text: <CHILD>,id: tensor([[   27, 99119,    29]])
text: <ADULT>,id: tensor([[  27, 1808, 3532,   29]])
text: <OLD>,id: tensor([[  27, 7863,   29]])
text: 你好歹撒发生阿萨是否<CHILD>首发式发生,id: tensor([[108386, 119026, 104856,  99726,  99727, 100841,  64471,     27,  99119,
             29, 107221,  28330,  99726]])
text: 后三段飞洒发啊水水<ADULT>发生发撒,id: tensor([[ 33447,  44991,  37474,  99723, 102737,  28291, 103924,  52510,  52510,
             27,   1808,   3532,     29,  99726,  28291, 104856]])
text: 手动阀十分啊<OLD>撒法发撒,id: tensor([[110867, 102860, 101918, 103924,     27,   7863,     29, 104856,  24339,
          28291, 104856]])
text: hello world<CHILD>xxix,id: tensor([[14990,  1879,    27, 99119,    29,  4146,   941]])
text: hwfsdf<ADULT>sfdds,id: tensor([[27827,  3848,  2940,    27,  1808,  3532,    29, 17246, 33650]])
text: hello<OLD>sf撒发生,id: tensor([[ 14990,     27,   7863,     29,  17246, 104856,  99726]])
text: ,年龄为:<CHILD>xxix,id: tensor([[    11, 102185,  17714,  31252,  99119,     29,   4146,    941]])
text: ,年龄为:<ADULT>sfdds,id: tensor([[    11, 102185,  17714,  31252,   1808,   3532,     29,  17246,  33650]])
text: ,年龄为:<OLD>sf撒发生,id: tensor([[    11, 102185,  17714,  31252,   7863,     29,  17246, 104856,  99726]])
text: :<,id: tensor([[31252]])
text: ,年龄为:<think end>,id: tensor([[    11, 102185,  17714,  31252,  26865,    835,     29]])
text: <think end>,id: tensor([[13708,   766,   835,    29]])
text: think end>,id: tensor([[26865,   835,    29]])
"""
