from llama_index.llms.huggingface import HuggingFaceLLM
import torch
from pydantic import BaseModel


def qwen2_5_7B_Instruct():
    model = HuggingFaceLLM(
        context_window=4096,
        max_new_tokens=2048,
        generate_kwargs={"temperature": 0.0, "do_sample": False},
        query_wrapper_prompt="你是一个厉害的智能助手",
        tokenizer_name='/mnt/workspace/models/Qwen2.5-7B-Instruct',
        model_name='/mnt/workspace/models/Qwen2.5-7B-Instruct',
        device_map="auto",
        model_kwargs={"torch_dtype": torch.float16}
    )

    # 自定义 __repr__ 方法，避免递归
    def custom_repr(self):
        return f"HuggingFaceLLM(model_name='{self.model_name}', context_window={self.context_window})"

    model.__class__.__repr__ = custom_repr
    return model

# if __name__ == "__main__":
#     model = qwen2_5_7B_Instruct()
#     print(model)  # 现在会打印简化后的表示