import importlib
import json
from .base import GLModel, QwenModel
from utils.db import client
from copy import deepcopy
import datetime

C = client()

modules = {
    'chatglm-pro': GLModel('chatglm-pro'),
    "glm-4": GLModel('glm-4'),
    '通义千问-Turbo': QwenModel('通义千问-Turbo'),
    '通义千问-Plus': QwenModel('通义千问-Plus'),
    '通义千问-Max': QwenModel('通义千问-Max')
}

def get_response(params):
    ## 根据参数获取模型结果
    model_name = C.get_model_by_id(params.get('model_id', 1))
    params['model'] = model_name
    module = modules[model_name]
    # module = importlib.import_module()
    ## TODO 各种校验
    response = module.get_response(params)
    chcontent = ''
    token_info = None

    ## 调用模型失败
    if isinstance(response, str):
        yield f"event: message\ndata: {json.dumps({'type':'failed', 'msg':response}, ensure_ascii=False)} \n\n"
    else:
        if params.get('stream', True):
            yield f"event: message\ndata: {json.dumps({'type':'start'}, ensure_ascii=False)} \n\n"
            for item in response:
                if hasattr(item, 'choices') and item.choices:
                    if hasattr(item.choices[0].delta, 'content'):
                        chcontent += item.choices[0].delta.content
                if item.usage:
                    token_info = dict(item.usage)

                yield f"event: message\ndata: {json.dumps(item.dict(), ensure_ascii=False)} \n\n"
            
            # 回复结束再存储，防止没回复的情况
            params['created_at'] = datetime.datetime.now()
            params['updated_at'] = params['created_at']
            params['content'] = params['prompt']
            params['tokens'] = token_info.get('prompt_tokens')
            params['use_context'] =1 if params.get('use_context') == 'true' else 0
            C.save_chat_history(params)

            ## 存储回复
            reply = deepcopy(params)
            reply['type'] = 'reply'
            reply['content'] = chcontent
            reply['created_at'] = datetime.datetime.now()
            reply['updated_at'] = reply['created_at']
            reply['tokens'] = token_info.get('completion_tokens')
            reply['icon'] = C.get_role_icon_by_id(reply.get('role_id'))
            C.save_chat_history(reply)

            ## 存储item
            item = deepcopy(params)
            item['title'] = item['prompt']
            C.save_chat_item(item)



