from abc import abstractmethod, ABCMeta
import sys
sys.path.append('/datas/projects/python_backends/src')
from utils.db import client
import json
from openai import OpenAI


C = client()

class chatModel:

    def __init__(self):
        # self.db = client()
        pass
    
    def check(self, params):
        ## 用户，模型校验，
        calls = C.check_calls(params)
        print(calls, '1'*30)
        if calls < 1:
            return False
        return True

    def get_key(self, params):
        data = C.get_apikey_for_model(params)
        return data[0][0]

    def get_context(self, params):
        ## 组装message
        profile = C.get_role_setting(params)
        ## history
        messages = list()
        messages.append({
            'role':'system',
            'content':profile[0][0]
        })

        if params.get('use_context') and params.get('use_context') == 'true' :
            history = C.get_history_for_context(params)
            messages += [
                {'role': 'user' if item[0] == 'prompt' else 'assistant',
                'content': item[1]} for item in history[::-1]
            ]

        return messages


    def reset_calls(self, params):
        # 计费逻辑
        C.reset_calls(params)

    def get_response(self, query, stream=True):
        return NotImplemented


class GLModel(chatModel):
    def __init__(self, name):
        super().__init__()
        self.name = name

    def _get_response(self, params, messages, stream=True):
        from zhipuai import ZhipuAI
        client = ZhipuAI(api_key=self.get_key(params)) # 请填写您自己的APIKey
        response = client.chat.completions.create(
            model="glm-4",  # 填写需要调用的模型名称
            messages=messages,
            stream=stream,
        )
        ## 回复成功，计算calls
        if response.response.status_code == 200:
            self.reset_calls(params)
        return response

    def get_response(self, params):
        if not self.check(params):
            return '调用次数不足，请充值'

        messages = self.get_context(params)
        messages.append(
            {'role': 'user', 'content': params.get('prompt')}
        )
        return self._get_response(params, messages)


class QwenModel(chatModel):
    def __init__(self, name):
        super().__init__()
        self.qwen_model_map = {
            '通义千问-Turbo': 'qwen-turbo',
            '通义千问-Plus': 'qwen-plus',
            '通义千问-Max': 'qwen-max'
        }
        self.name = name

    def _get_response(self, params, messages, stream=True):
        model_name = self.qwen_model_map.get(self.name)
        client = OpenAI(
            api_key=self.get_key(params),
            # api_key='sk-9fecba6b0e9e40b4ac1b1c2d68e50acb',
            base_url='https://dashscope.aliyuncs.com/compatible-mode/v1'
        )

        response = client.chat.completions.create(
            model=model_name,
            messages=messages,
            stream=stream,
        )

        return response

    def get_response(self, params):
        if not self.check(params):
            return '调用次数不足，请充值'

        messages = self.get_context(params)
        messages.append(
            {'role': 'user', 'content': params.get('prompt')}
        )
        return self._get_response(params, messages)


class ErnieModel(chatModel):
    pass

class SparkModel(chatModel):
    pass


if __name__ == '__main__':
    params = {'session_id': '73nf1wdbpmcyc1wr5uf1kxpskj4f9ny4ko2abkms1q', 'role_id': '1', 'chat_id': '010d4518-ab81-46ea-80de-1629d6f40538', 'model_id': '17', 'type': 'prompt', 'prompt': '你是个机器人吗？', 'user_id': '115', 'icon': 'http://static.nilco2.com/headimgs/00000-2342545241.png', 'use_context': 'true', 'model': 'chatglm-pro'}

    # m = GLModel('chatglm-pro')
    m = QwenModel('通义千问-Turbo')
    for i in m.get_response(params):
        print(dict(i))