#!/usr/bin/python3
# -*coding:utf-8 -*-

'''
# __license__ = "MIT"
__author__    = "Z"
__version__   = "0.0.1"
__copyright__ = "Copyright (C) 2025 z"

@ 修改日期:2025年02月20日
@ 修改描述:调用anythingllmAPI接口
@ 功能描述:调用anythingllmAPI接口
'''

import json
import requests
from config.system import ANYTHINGLLM_CHAT_WEBHOOK, ANYTHINGLLM_APP_KEY

class AnythingLLM:

    def __init__(self):
        pass

    def getcode_ai(self, taskName="你好"):
        try:
          # while True:
            print('Send message >>>')
            # message = input()
            message = taskName
            json_data = {
              'message': message,
              'mode':'chat',
            }
            headers = {
              'Content-Type':'application/json',
              'Authorization': f'Bearer {ANYTHINGLLM_APP_KEY}',
              'accept':'application/json'
            }
            # response = requests.post('http://localhost:3001/api/v1/workspace/<小写的工作区名称>/chat', headers=headers ,json=json_data)
            response = requests.post(ANYTHINGLLM_CHAT_WEBHOOK, headers=headers ,json=json_data)
            answer_dict = json.loads(response.content)
            # answer_dict已经拿到了AI返回的结果了，以下两行是去除掉输出的思考过程
            answer = answer_dict['textResponse'].__str__()

            # 检查 '</think>' 是否存在
            if '</think>' in answer:
              final_answer = answer[answer.index('</think>') + len('</think>') + 1:]
            else:
              final_answer = answer  # 如果不存在，直接使用完整的响应

            # 打印结果
            print("AI的回答:", final_answer)
            return str(final_answer)

        except Exception as e:
            print(f"运行出错: {e}")
        finally:    
            print(f"运行结束")
