import requests
import json
import os

def ask_glm(content):
    url = os.getenv("GLM_API_URL", "http://127.0.0.1:11434/api/generate")
    data = {
        "model": "qwen:1.8b",
        "prompt": content,  # 确保将用户的问题传递给大模型
        "stream": False
    }

    try:
        response = requests.post(url, json=data)
        if response.status_code == 200:
            response_json = response.json()
            return response_json
        else:
            print(f"请求失败，状态码：{response.status_code}, 响应内容：{response.text}")
    except Exception as e:
        print(f"请求大模型API时发生错误：{e}")
    return None

def main():
    while True:
        user_question = input("请输入你的问题（输入'q'退出）：")
        if user_question.lower() == 'q':
            print("退出程序")
            break

        answer = ask_glm(user_question)
        if answer and 'response' in answer:
            message_content = answer.get('response', '')
            if not message_content:
                message_content = '无法回答问题'
        else:
            message_content = '无法获取答案，请稍后重试。'

        print(f"【用户提问】\n{user_question}")
        print(f"【模型回答】\n{message_content}")

if __name__ == "__main__":
    main()