import os
import requests
import json
from dotenv import load_dotenv

load_dotenv()

API_URL = os.getenv("GLM_API_BASE_URL")
HEADERS = {
    'Content-Type': 'application/json',
    'Authorization': f'Bearer {os.getenv("GLM_API_KEY")}'
}

def call_glm(prompt: str) -> str:
    """调用本地 GLM-4 模型并返回生成的代码"""
    if not API_URL:
        raise ValueError("GLM_API_BASE_URL not found in .env file")

    payload = {
        "model": "GLM-LATEST-AWQ", # 根据你的模型服务调整, 应该进.env
        "messages": [
            {"role": "user", "content": f"/nothink {prompt} /nothink"}
        ],
        "stream": False # 演示项目使用非流式
    }

    try:
        response = requests.post(API_URL, headers=HEADERS, data=json.dumps(payload), timeout=180)
        response.raise_for_status() # 如果请求失败则抛出异常
        
        result = response.json()
        content = result['choices'][0]['message']['content']
        
        # 清理代码块标记 (LLM有时会返回 ```python ... ```)
        if content.strip().startswith("```python"):
            content = content.strip()[9:-3].strip()

        return content
    except requests.exceptions.RequestException as e:
        return f"Error calling GLM API: {e}"
    except (KeyError, IndexError) as e:
        return f"Error parsing GLM response: {e}\nResponse: {response.text}"