import requests
import json
import time
from backend.knowledge_service import knowledge_service


def call_openai_api(conversation_history):
    """
    调用OpenAI API获取回复
    """
    try:
        import os
        openai_api_key = os.getenv('OPENAI_API_KEY')
        if not openai_api_key:
            raise ValueError("未配置OpenAI API密钥")
        
        response = requests.post(
            'https://api.openai.com/v1/chat/completions',
            headers={
                'Content-Type': 'application/json',
                'Authorization': f'Bearer {openai_api_key}'
            },
            json={
                'model': 'gpt-3.5-turbo',
                'messages': conversation_history,
                'max_tokens': 500,
                'temperature': 0.7
            }
        )
        
        if response.ok:
            data = response.json()
            return data['choices'][0]['message']['content'].strip()
        else:
            raise Exception(f"OpenAI API调用失败: {response.text}")
    
    except Exception as e:
        print(f"调用OpenAI API异常: {str(e)}")
        # 返回错误信息
        return f"OpenAI API调用失败: {str(e)}"


def call_ollama_api(conversation_history):
    """
    调用本地Ollama API获取回复
    """
    try:
        # 构建Ollama需要的prompt格式
        prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in conversation_history])
        
        response = requests.post(
            'http://localhost:11434/api/generate',
            headers={'Content-Type': 'application/json'},
            json={
                'model': 'deepseek-r1:1.5b',
                'prompt': prompt,
                'stream': False
            }
        )
        
        if response.ok:
            data = response.json()
            return data.get('response', '').strip()
        else:
            raise Exception(f"Ollama API调用失败: {response.text}")
    
    except Exception as e:
        print(f"调用Ollama API异常: {str(e)}")
        # 返回错误信息
        return f"Ollama API调用失败: {str(e)}"


def simulate_llm_response(user_message, use_knowledge=True):
    """
    模拟LLM的回复，基于知识库内容生成更准确的响应
    """
    # 模拟思考时间
    time.sleep(1 + (time.time() % 1))
    
    # 搜索相关知识
    knowledge_responses = []
    
    if use_knowledge:
        try:
            # 使用knowledge_service搜索
            search_result = knowledge_service.search_knowledge(user_message, k=3)
            if search_result['success'] and search_result['results']:
                for result in search_result['results']:
                    if result['similarity_score'] > 50:  # 只使用相似度较高的文档
                        knowledge_responses.append(result['content'])
        except Exception as e:
            print(f"搜索知识库时出错: {str(e)}")
    
    # 如果有足够相关的知识且使用知识库，基于这些知识生成回复
    if use_knowledge and knowledge_responses:
        # 合并相关知识并生成回复
        combined_knowledge = " ".join(knowledge_responses)
        
        # 根据问题类型生成不同的回复
        lower_message = user_message.lower()
        
        if any(keyword in lower_message for keyword in ['产品', '提供', '哪些', '有什么']):
            return f"根据我们的知识库，{combined_knowledge} 您对哪款产品更感兴趣？"
        elif any(keyword in lower_message for keyword in ['功能', '怎么用', '如何', '支持']):
            return f"根据我们的知识库，{combined_knowledge} 您对哪款产品更感兴趣？"
        elif any(keyword in lower_message for keyword in ['售后', '保修', '退换', '服务']):
            return f"根据我们的知识库，{combined_knowledge} 您对哪款产品更感兴趣？"
        elif any(keyword in lower_message for keyword in ['价格', '多少钱', '费用']):
            return f"关于价格方面，{combined_knowledge} 您需要了解具体价格详情吗？"
        else:
            return f"根据我们的信息，{combined_knowledge} 您还有其他问题吗？"
    
    # 如果不使用知识库或没有足够相关的知识，使用通用回复
    lower_message = user_message.lower()
    
    if '你好' in lower_message or '您好' in lower_message or '嗨' in lower_message:
        return '您好！我是智能客服助手，很高兴为您提供帮助。请问有什么可以帮您的吗？'
    elif any(keyword in lower_message for keyword in ['谢谢', '感谢']):
        return '不客气！如果您有任何其他问题，随时可以向我咨询。'
    elif '再见' in lower_message or '拜拜' in lower_message:
        return '祝您有愉快的一天！如有需要，请随时联系我们。'
    else:
        # 不使用知识库时的通用回复
        return f'感谢您的咨询。关于"{user_message}"，我可以为您提供更详细的信息。您还有什么具体想了解的吗？'


def call_llm_api(conversation_history, use_ollama=False, use_knowledge=True):
    """
    统一的LLM API调用接口
    """
    # 获取用户最后一条消息
    user_message = conversation_history[-1]['content']
    
    if use_ollama:
        return call_ollama_api(conversation_history)
    else:
        return call_openai_api(conversation_history)