from plugins.common import settings, success_print, is_json
from tools.tools \
    import \
        search_water_account, search_water_bill, \
        search_water_notice, query_fee_standards #tools内的函数都要列出来

def load_model():
    global model, tokenizer
    from transformers import AutoModel, AutoTokenizer

    tokenizer = AutoTokenizer.from_pretrained(settings.model.chatglm.path, local_files_only=True, trust_remote_code=True)
    _model = AutoModel.from_pretrained(settings.model.chatglm.path, local_files_only=True, trust_remote_code=True, device='cuda')

    if not (settings.model.chatglm.lora == '' or settings.model.chatglm.lora == None): #如果有微调包
        from peft import PeftModel
        _model = PeftModel.from_pretrained(model, settings.model.chatglm.lora,adapter_name=settings.model.chatglm.lora, device='cuda')

    model = _model.eval()

def chat_one(prompt, history_formatted, max_length, top_p, temperature):
    response, history = model.chat(tokenizer, prompt, history=history_formatted,
        max_length=max_length, top_p=top_p, temperature=temperature)

    #判断 response 是否为josn 格式. 如果是 转换成对象, 判断 是否存在子元素是否存在 name 和 parameters. 如果存在 调用 response['name'] 函数 参数 是 response['parameters']
    if is_json(response):
        try:
            response_obj = json.loads(response)
            if 'name' in response_obj and 'parameters' in response_obj:
                result = response_obj['name'](response_obj['parameters'])

                #调用外部函数后, 继续再调用chatglm 
                response, history = model.chat(tokenizer, result, history=history, role="observation")

        except json.JSONDecodeError:
            pass

    return {"response":response, "history":history}
 

