import time
import logging
import requests
from typing import Optional, List, Dict, Mapping, Any
import langchain
import torch
from langchain.llms.base import LLM
from langchain.cache import InMemoryCache
logging.basicConfig(level=logging.INFO)
# 启动llm的缓存
langchain.llm_cache = InMemoryCache()

class ChatGLM(LLM):
    # 模型服务url，这里地址一定要与
    url = "http://127.0.0.1:7866/chat"	 	#本机地址
    #url = "http://192.168.3.20:7866/chat"  #内网其他机器上的
    history = []

    @property
    def _llm_type(self) -> str:
        return "chatglm"

    def _construct_query(self, prompt: str) -> Dict:
        """构造请求体
        """
        query = {            "human_input": prompt        }

        query = {"query": prompt, "history": self.history}
        import json
        query = json.dumps(query)  # 对请求参数进行JSON编码
        print("query"+query)
        return query

    @classmethod
    def _post(self, url: str,              query: Dict) -> Any:

        """POST请求"""
        response = requests.post(url, data=query).json()

        return response

    def _call(self, prompt: str,              stop: Optional[List[str]] = None) -> str:
        """_call"""
        # construct query
        query = self._construct_query(prompt=prompt)

        # post
        response = self._post(url=self.url,query=query)
        response_chat = response["response"];
        self.history = response["history"]
        return response_chat

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        """Get the identifying parameters.
        """
        _param_dict = {
            "url": self.url
        }
        return _param_dict
if __name__ == "__main__":
    llm = ChatGLM()
    while True:
        human_input = input("Human: ")
        begin_time = time.time() * 1000
        # 请求模型
        response = llm(human_input)
        end_time = time.time() * 1000
        used_time = round(end_time - begin_time, 3)
        logging.info(f"chatGLM process time: {used_time}ms")
        print(f"ChatGLM: {response}")
