# qwen_client.py
import requests
from .config_manager import config_manager

def call_qwen(messages, model=None, temperature=None, stream=None):
    """通用Qwen模型调用函数"""
    # 获取当前配置 - 使用与其他模块一致的方式
    current_provider = config_manager.get_current_provider()
    providers = config_manager._config.get('providers', {})
    provider_config = providers.get(current_provider, {})
    
    if not provider_config:
        print(f"❌ 未找到提供商配置: {current_provider}")
        return None
    
    # 获取基础配置
    api_url = provider_config.get('api_url')
    final_model = model or provider_config.get('model', 'qwen7b:latest')
    api_key = provider_config.get('api_key', '')
    
    if not api_url or not final_model:
        print("❌ AI配置不完整")
        return None
    
    # 构建请求头
    headers = {
        'Content-Type': 'application/json'
    }
    if api_key:
        headers['Authorization'] = f'Bearer {api_key}'
    
    # 构建请求数据
    data = {
        'model': final_model,
        'messages': messages,
        'temperature': temperature if temperature is not None else config_manager._config.get('temperature', 0.1),
        'max_tokens': config_manager._config.get('max_tokens', 2048),
        'stream': False  # 强制设置为False，避免流式响应解析问题
    }
    
    # 添加其他可选参数
    if config_manager._config.get('top_p') is not None:
        data['top_p'] = config_manager._config.get('top_p')
    if config_manager._config.get('frequency_penalty') is not None:
        data['frequency_penalty'] = config_manager._config.get('frequency_penalty')
    if config_manager._config.get('presence_penalty') is not None:
        data['presence_penalty'] = config_manager._config.get('presence_penalty')

    try:
        # 与其他模块保持一致：手动拼接端点
        if provider_config.get('type') == 'openai':
            full_url = f"{api_url}/v1/chat/completions"
        elif provider_config.get('type') == 'ollama':
            full_url = f"{api_url}/api/chat"
        else:
            full_url = f"{api_url}/api/chat"  # 默认使用ollama格式
        
        print(f"🔗 调用API: {full_url}")
        print(f"🤖 使用模型: {final_model}")
        
        response = requests.post(
            full_url, 
            headers=headers, 
            json=data,
            timeout=config_manager._config.get('timeout', 60)
        )
        
        if response.status_code == 200:
            response_text = response.text.strip()
            if not response_text:
                print("❌ API返回空响应")
                return None
            
            try:
                result = response.json()
                
                # 支持OpenAI格式 (DeepSeek等)
                if 'choices' in result and len(result['choices']) > 0:
                    content = result['choices'][0]['message']['content']
                    print(f"✅ AI响应成功，内容长度: {len(content)}")
                    return content
                
                # 支持Ollama格式
                elif 'message' in result and 'content' in result['message']:
                    content = result['message']['content']
                    print(f"✅ AI响应成功，内容长度: {len(content)}")
                    return content
                
                else:
                    print(f"❌ 响应格式不符合预期")
                    return None
            except ValueError as json_err:
                print(f"❌ JSON解析失败: {json_err}")
                return None
        else:
            print(f"❌ API返回错误状态码: {response.status_code}")
            return None

    except requests.exceptions.RequestException as e:
        print(f"❌  API请求失败: {e}")
        return None
    except Exception as e:
        print(f"❌ API调用异常: {e}")
        return None