# qwen.readthedocs.io/zh-cn/latest/framework/Langchain.html
from transformers import AutoModelForCausalLM, AutoTokenizer
from abc import ABC
from langchain.llms.base import LLM
from typing import Any,List,Mapping,Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun

model_name = "Qwen/qwen2.5:0.5b"

model = AutoModelForCausalLM.from_pretrained(model_name,torch_dtype="auto",device_map="auto")

tokenizer = AutoTokenizer.from_pretrained(model_name)

class Qwen(LLM,ABC):
    max_token: int = 10000
    temperature: float = 0.01
    top_p = 0.9
    history_len: int = 3

    def __init__(self):
        super.__init__()
    
    @property
    def _llm_type(self) -> str:
        return "Qwen"
    
    @property
    def _history_len(self) -> int:
        return self.history_len
    
    def set_history_len(self,history_len:int = 10)->None:
        self.history_len = history_len
    
    def call(self,
            prompt: str, 
            stop: Optional[List[str]] = None, 
            run_manager: Optional[CallbackManagerForLLMRun] = None
        ) -> str:
        messages =  [
            {"role":"system","content":"You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
            {"role":"user","content":prompt}
        ]
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
        generated_ids = model.generate(
            **model_inputs, 
            max_new_tokens=512
        )
        generated_ids = [
            output_ids[len(input_ids):]
        ]