from typing import Dict, List, Optional, Tuple, Union
import os
from dotenv import load_dotenv, find_dotenv

from zhipuai import ZhipuAI



class ZhipuChat():
    def __init__(self) -> None:
        _ = load_dotenv(find_dotenv())
        self.client = ZhipuAI(api_key=os.environ["ZHIPUAI_API_KEY"])

    def gen_glm_params(self, prompt, history=None, meta_instruction=None):
        '''
        构造 GLM 模型请求参数 messages

        请求参数：
            prompt: 对应的用户提示词
        '''
        if history is not None:
            messages = history.extend([{"role": "user", "content": prompt}])
        else:
            if meta_instruction is not None:
                messages = [{"role": "system", "content": meta_instruction}, {"role": "user", "content": prompt}]
            else:
                messages = [{"role": "user", "content": prompt}]
        return messages


    def get_completion(self, prompt, model="glm-4", temperature=0.95, history=None, meta_instruction=None):
        '''
        获取 GLM 模型调用结果

        请求参数：
            prompt: 对应的提示词
            model: 调用的模型，默认为 glm-4，也可以按需选择 glm-3-turbo 等其他模型
            temperature: 模型输出的温度系数，控制输出的随机程度，取值范围是 0~1.0，且不能设置为 0。温度系数越低，输出内容越一致。
        '''
        # print(prompt)
        # print(history)
        # print(meta_instruction)

        messages = self.gen_glm_params(prompt, history, meta_instruction)
        # print(messages)
        response = self.client.chat.completions.create(
            model=model,
            messages=messages,
            temperature=temperature
        )
        if len(response.choices) > 0:
            return response.choices[0].message.content
        return "generate answer error"

    def chat(self, prompt: str, history: List[dict], meta_instruction = None):
        response = self.get_completion(prompt, history = history, temperature=0.1, meta_instruction=meta_instruction)
        if history is None:
            history = []
        history.append({"role": "assistant", "content": response})
        return response, history
    
# if __name__ == '__main__':
#     model = InternLM2Chat('/root/share/model_repos/internlm2-chat-7b')
#     print(model.chat('Hello', []))
