import chatglm_cpp
from typing import Any, List, Mapping, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM


class ChatglmCppAI(LLM):
    max_token: int = 8192
    do_sample: bool = False
    temperature: float = 0.8
    top_p = 0.8
    tokenizer: object = None
    model: object = None
    history: List = []
    tool_names: List = []
    has_search: bool = False

    def __init__(self):
        super().__init__()
        self.model = chatglm_cpp.Pipeline("../../chatglm-ggml.bin")

    @property
    def _llm_type(self) -> str:
        return "ChatglmCpp"
    
    def _call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> str:

        user_messages = [chatglm_cpp.ChatMessage(role='user', content=prompt)]
        chatMessage = self.model.chat(
                user_messages,
                max_length=2048,
                max_context_length=2048,
                do_sample=0.95,
                top_k=0,
                top_p=self.top_p,
                temperature=self.temperature,
                repetition_penalty=1.0,
                num_threads=0,
                stream=False,
            )
        final_response = chatMessage.content
        return final_response
