# -*- coding: utf-8 -*-

import langchain
import time
import logging
import requests
from typing import Optional, List, Dict, Mapping, Any

import torch
from langchain.llms.base import LLM
# from langchain.cache import InMemoryCache

logging.basicConfig(level=logging.INFO)
# 启动llm的缓存
# langchain.llm_cache = InMemoryCache()

class ChatGLM(LLM):
    # 模型服务URL，这里地址一定要开启GLM服务的URL地址相同
    url: str = "http://127.0.0.1:7866/chat" # 内网其他机器上的地址
    history: List = []

    @property
    def _llm_type(self) -> str:
        return "chatglm"
    
    def _construct_query(self, prompt: str) -> Dict:
        """
        构造请求参数"""
        query = {"human_iput": prompt}

        query = {"query": prompt, "history": self.history}
        import json
        query = json.dumps(query) # 一定要转成json字符串
        # print("query"+query)
        return query
    
    @classmethod
    def _post(self, url: str, query: Dict) -> Any:
        """POST请求"""
        response = requests.post(url, data=query)
        response = response.json()

        return response
    
    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:  
        """_call"""
        # construct query
        query = self._construct_query(prompt=prompt)

        # post request
        response = self._post(url=self.url, query=query)
        response_chat = response["response"]
        self.history = response["history"]
        return response_chat
    
    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        """Get the identifying parameters."""
        _param_dict = {
            "url": self.url
        }
        return _param_dict


if __name__ == "__main__":
    llm = ChatGLM()

    # human_input = input("Human: ")

    # begin_time = time.time() * 1000
    # # 请求模型
    # response = llm(human_input)
    # end_time = time.time() * 1000
    # used_time = round(end_time - begin_time, 3)
    # logging.info(f"chatGLM process time: {used_time}ms")

    # print(f"ChatGLM: {response}")

    # while True:
    #     human_input = input("Human: ")

    #     begin_time = time.time() * 1000
    #     # 请求模型
    #     response = llm(human_input)
    #     end_time = time.time() * 1000
    #     used_time = round(end_time - begin_time, 3)
    #     logging.info(f"chatGLM process time: {used_time}ms")

    #     print(f"ChatGLM: {response}")
