'''

推理 基于transformers库
'''
import json
import os
import sys
import time
from threading import Thread

from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig, TextIteratorStreamer

import torch

def empt_fun(print_str):
    pass
print = empt_fun

# # 设置设备参数
# DEVICE = "cuda"  # 使用CUDA
# DEVICE_ID = "0"  # CUDA设备ID，如果未设置则为空
# CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE  # 组合CUDA设备信息
#
#
# # 清理GPU内存函数
# def torch_gc():
#     if torch.cuda.is_available():  # 检查是否可用CUDA
#         with torch.cuda.device(CUDA_DEVICE):  # 指定CUDA设备
#             torch.cuda.empty_cache()  # 清空CUDA缓存
#             torch.cuda.ipc_collect()  # 收集CUDA内存碎片


def get_database():
    # 制作文档数据库
    sys.path.append(r'D:\code\other\LLMs\third\tiny-universe\content\TinyRAG')
    rag_datafiles_path = r'D:\code\other\LLMs\third\tiny-universe\content\TinyRAG\data\datafiles'
    rag_storage_path = r'D:\code\other\LLMs\third\tiny-universe\content\TinyRAG\data\storage'  # todo
    from RAG.VectorBase import VectorStore
    from RAG.utils import ReadFiles
    from RAG.Embeddings import JinaEmbedding, ZhipuEmbedding, UserEmbedding
    embedding = UserEmbedding()  # 转换特征向量
    vector = VectorStore(bd_model=embedding)
    if os.path.exists(rag_storage_path):
        vector.load_vector(rag_storage_path)
    else:
        # [chunk_str]
        docs = ReadFiles(rag_datafiles_path).get_content(max_token_len=100, cover_content=20)
        print(docs)
        # embedding = ZhipuEmbedding() # 创建EmbeddingModel
        vector.document = docs
        vector.get_vector()  # chunk 转 特征向量
        vector.persist(path=rag_storage_path)  # 将向量和文档内容保存到storage目录下，下次再用就可以直接加载本地的数据库
    return vector


# # 格式https://qwen.readthedocs.io/zh-cn/latest/framework/function_call.html
# # k1.k2.kn:value 例子 function的parameters的properties的query的description是 'search query to look up'
# tools_schema =\
# [{'type': 'function',
#   'function': {
#       'name': 'tavily_search_results_json',
#       'description': 'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events. Input should be a search query.',
#       'parameters': {
#           'properties': {
#               'query': {
#                   'description': 'search query to look up',
#                   'type': 'string'}
#           },
#           'required': ['query'],
#           'type': 'object'}
#   }
#   },
#  {'type': 'function',
#   'function': {
#       'name': 'human_assistance_tool3',
#       'description': 'Request assistance from a human.',
#       'parameters': {
#           'properties': {
#               'name': {
#                   'type': 'string'},
#               'birthday': {
#                   'type': 'string'}},
#           'required': [
#               'name',
#               'birthday'],
#           'type': 'object'}}},
#  {'type': 'function',
#   'function': {
#         'name': 'open_pycharm',
#         'description': '这是一个工具，可以打开pycharm编程软件,工具执行结果为True或false,表示打开成功或打开失败',
#         'parameters': {
#             'properties': {},
#             'type': 'object'}
#   }}]
#
# # 用于DeepSeek-R1-Distill-Qwen-1.5B
# system_prompt = "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>"
# for tool_schema in tools_schema:
#     system_prompt+= '\n'+ json.dumps(tool_schema)
# system_prompt += "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n"
def get_llm_infer(lora_path=None, is_rag=False, model_path=None, model_name=''):
    # 加载预训练的分词器和模型
    model_name_or_path = model_path
    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
    model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto", torch_dtype=torch.bfloat16)
    # 加载lora权重
    if lora_path:
        from peft import PeftModel
        model = PeftModel.from_pretrained(model, model_id=lora_path)
    # tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False, trust_remote_code=True)
    # model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.bfloat16, device_map="auto",
    #                                              trust_remote_code=True)
    rag_database = None
    if is_rag:
        rag_database = get_database()

    def infer(prompt_str='', messages=None, is_streamer=False, tools_schema=None, max_new_tokens=512):
        print(f'xx1')
        if messages is None:

            messages = [
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": prompt_str}
            ]
        if rag_database:
            query = messages[-1]['content']
            aug_str = rag_database.query(query, k=1)[0]  # 检索
            messages[-1]['content'] = query + ' 基于以下信息回答：' + aug_str  # 增强
            # messages[-1]['rag'] = aug_str
            print(messages)
        # 预处理
        # input_ids =
        # '<|im_start|>system
        # You are a helpful assistant.<|im_end|>
        # <|im_start|>user
        # 请问如何学习大模型<|im_end|>
        # <|im_start|>assistant
        # '
        input_ids = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True,
                                                  tools=tools_schema, chat_template=None)  # QWen2Tokenizer  含tools2seq逻辑， toolcall2seq逻辑
        print(input_ids)
        model_inputs = tokenizer([input_ids], return_tensors="pt").to('cuda') # 分词，转tokenid

        print(f'xx2')
        if not is_streamer:  #
            # 模型推理
            print(f'xx2.1')
            ids = model_inputs.input_ids
            generated_ids = model.generate(ids, max_new_tokens=max_new_tokens, synced_gpus=False)  # 推理
            print(f'xx2.2')
            # 后处理
            # generated_ids = [
            #     output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
            # ]
            generated_idsx = []
            for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids):  # 按行
                it = output_ids[len(input_ids):]  # 去除输入序列 （）
                generated_idsx.append(it)
            # generated_idsx = generated_ids
            res = tokenizer.batch_decode(generated_idsx, skip_special_tokens=True)[0]  # id序列转文本序列
            res = [res]

        else:  # 非阻塞的实时输出

            streamer = TextIteratorStreamer(tokenizer=tokenizer, skip_special_tokens=True, skip_prompt=True,
                                            timeout=10)  # s 生成器？
            kwargs = {'inputs': model_inputs.input_ids, 'streamer': streamer, 'max_new_tokens': max_new_tokens,
                      "synced_gpus": False}  # 格式model_inputs.input_ids

            # Generation
            tmp_thread = Thread(target=model.generate, kwargs=kwargs)  # 结果依次放入streamer内部队列
            tmp_thread.start()  # todo 生命周期
            # # 实时输出生成的文本
            # for text in streamer: # __next__ 出队
            #     print(text)
            res = streamer
            # thread.join() # 等待线程结束
        print(f'xx3')
        # torch_gc()  # 执行GPU内存清理
        return res  #

    return infer


def get_llm_infer_vLLM():
    from vllm import LLM, SamplingParams  # pip install vllm==0.6.3.post1  只支持linux todo
    # 加载预训练的分词器和模型
    model_name_or_path = r'D:\code\other\LLMs\models\Qwen2.5-Coder-0.5B-Instruct'
    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
    # 初始化 vLLM 推理引擎
    llm = LLM(model=model_name_or_path, tokenizer=model_name_or_path, max_model_len=2048, trust_remote_code=True)

    # model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto", torch_dtype=torch.bfloat16)

    # tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False, trust_remote_code=True)
    # model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.bfloat16, device_map="auto",
    #                                              trust_remote_code=True)

    def infer(prompt_str='', messages=None, is_streamer=False):
        if messages is None:
            messages = [
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": prompt_str}
            ]
        #

        # 预处理
        # input_ids =
        # '<|im_start|>system
        # You are a helpful assistant.<|im_end|>
        # <|im_start|>user
        # 请问如何学习大模型<|im_end|>
        # <|im_start|>assistant
        # '
        input_ids = tokenizer.apply_chat_template(messages, tokenize=False,
                                                  add_generation_prompt=True)  # QWen2Tokenizer
        # model_inputs = tokenizer([input_ids], return_tensors="pt").to('cuda')

        # 模型推理
        stop_token_ids = [151329, 151336, 151338]
        # 创建采样参数。temperature 控制生成文本的多样性，top_p 控制核心采样的概率
        sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=512,
                                         stop_token_ids=stop_token_ids)
        # 初始化 vLLM 推理引擎
        outputs = llm.generate(input_ids, sampling_params)
        # generated_ids = model.generate(model_inputs.input_ids, max_new_tokens=512)  # 推理

        # # 后处理
        # # generated_ids = [
        # #     output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        # # ]
        # generated_idsx = []
        # for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids):  # 按行
        #     it = output_ids[len(input_ids):]  # 去除输入序列 （）
        #     generated_idsx.append(it)
        # # generated_idsx = generated_ids
        # res = tokenizer.batch_decode(generated_idsx, skip_special_tokens=True)[0]  # id序列转文本序列
        # res = [res]
        print(outputs)
        res = outputs
        # torch_gc()  # 执行GPU内存清理
        return res  #

    return infer


import requests
def get_remote_llm_infer(url='http://192.168.6.14:6006/fastapi_llminfer'):
    '''
    远程llm推理
    python test_fastapi_server.py  --model_path='/home/ps/zhangxiancai/llm_deploy/bigfiles/models/DeepSeek-R1-Distill-Qwen-7B'
    :param url:
    :return:
    '''

    def call_llminfer(prompt: str):
        input_dict = {"prompt": prompt}
        # response = requests.post(url='http://127.0.0.1:6006/fastapi_llminfer', json=input_dict)
        response = requests.post(url=url, json=input_dict)
        output_dict = response.json()
        return output_dict

    return call_llminfer


def test_get_llm_infer():
    # model_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/DeepSeek-R1-Distill-Qwen-7B'

    model_path = r'D:\code\other\LLMs\models\DeepSeek-R1-Distill-Qwen-1.5B'
    # model_path = r'D:\code\other\LLMs\models\Qwen2.5-Coder-0.5B-Instruct' # 含tool
    # model_name = 'DeepSeek-R1'
    # model_name = ''
    llm_infer = get_llm_infer(is_rag=False, model_path=model_path)

    t1 = time.time()
    # res = llm_infer("打开pycharm编程软件")
    res = llm_infer("Today's weather in Shanghai")
    # res = llm_infer("请问如何学习大模型",is_streamer=True)
    # res = llm_infer("为我用python写一个简单的猜拳小游戏，三局两胜", is_streamer=True)
    # res = llm_infer("特朗普哪一年出生的？", is_streamer=False)
    print(f'time {time.time() - t1}s')
    print(res)
    # for r in res:
    #     print(r)


def test_get_llm_infer_vLLM():
    llm_infer = get_llm_infer_vLLM()
    res = llm_infer("打开pycharm编程软件")
    print(res)


# 主函数入口
if __name__ == '__main__':
    test_get_llm_infer()
    # test_get_llm_infer_vLLM()
