from flask import request, Response, stream_with_context, current_app
from app.api import bp
from app.models import SystemPrompt
from app.models.knowledge_base import KnowledgeBase, Document
from app.services.chat_service import ChatService
from app.services.embedding_service import EmbeddingService
from app.core.vector_store import VectorStore
from app.utils.response import APIResponse
from app.utils.errors import ValidationError
import json
import requests
import logging

logger = logging.getLogger(__name__)

@bp.route('/chat/<int:kb_id>', methods=['POST'])
def chat(kb_id):
    """聊天接口"""
    try:
        # 验证知识库是否存在
        kb = KnowledgeBase.query.get_or_404(kb_id)
        
        # 获取请求参数
        data = request.get_json()
        if not data or 'query' not in data:
            raise ValidationError('Query is required')
            
        query = data['query']
        model = data.get('model', 'openchat')  # 默认使用 openchat 模型
        system_prompt_id = data.get('system_prompt')
        
        # 生成查询向量
        embedding_service = EmbeddingService()
        try:
            query_vector = embedding_service.generate_embedding(query)
            logger.info(f"Generated embedding for query: {query[:50]}...")
        except Exception as e:
            logger.error(f"Error generating embedding: {str(e)}")
            raise ValidationError(f"Error generating embedding: {str(e)}")
        
        # 搜索相关文档
        try:
            docs = VectorStore.search(
                query_vector=query_vector,
                kb_id=kb_id,
                max_results=current_app.config['VECTOR_SEARCH'].get('top_k_results', 5),
                similarity_threshold=current_app.config['VECTOR_SEARCH'].get('similarity_threshold', 0.5)
            )
            logger.info(f"Found {len(docs)} relevant documents")
        except Exception as e:
            logger.error(f"Error searching documents: {str(e)}")
            raise ValidationError(f"Error searching documents: {str(e)}")
        
        # 构建上下文
        context = "\n\n".join([doc.content for doc in docs]) if docs else ""
        
        # 获取系统提示词
        if system_prompt_id:
            try:
                # 从数据库获取系统提示词
                system_prompt = SystemPrompt.query.get(system_prompt_id)
                if system_prompt:
                    system_message = system_prompt.content
                else:
                    system_message = (
                        "你是一个智能助手，请基于给定的上下文信息回答用户问题。"
                        "如果无法从上下文中找到答案，请明确告知。"
                    )
            except Exception as e:
                logger.error(f"Error getting system prompt: {e}")
                system_message = (
                    "你是一个智能助手，请基于给定的上下文信息回答用户问题。"
                    "如果无法从上下文中找到答案，请明确告知。"
                )
        else:
            system_message = (
                "你是一个智能助手，请基于给定的上下文信息回答用户问题。"
                "如果无法从上下文中找到答案，请明确告知。"
            )
        
        # 构建消息列表
        messages = [
            {"role": "system", "content": system_message},
            {"role": "system", "content": f"以下是<Reference></Reference>涉及的上下文信息：\n\n{context}"},
            {"role": "user", "content": query}
        ]
        
        def generate():
            try:
                # 调用 Ollama API
                ollama_url = current_app.config['OLLAMA'].get('base_url', 'http://localhost:11434')
                response = requests.post(
                    f"{ollama_url}/api/chat",
                    json={
                        "model": model,
                        "messages": messages,
                        "stream": True
                    },
                    stream=True
                )
                
                if response.status_code != 200:
                    error_msg = json.dumps({"error": "Chat service unavailable"})
                    yield f"data: {error_msg}\n\n"
                    return
                
                for line in response.iter_lines():
                    if line:
                        try:
                            # 解析 Ollama 的响应
                            data = json.loads(line)
                            if "error" in data:
                                error_msg = json.dumps({"error": data["error"]})
                                yield f"data: {error_msg}\n\n"
                                return
                            
                            # 提取生成的文本
                            if "message" in data:
                                text = data["message"]["content"]
                                chunk = json.dumps({"text": text})
                                yield f"data: {chunk}\n\n"
                            
                        except json.JSONDecodeError as e:
                            logger.error(f"Error parsing Ollama response: {e}")
                            continue
                            
            except Exception as e:
                logger.error(f"Error in chat stream: {e}")
                error_msg = json.dumps({"error": str(e)})
                yield f"data: {error_msg}\n\n"
        
        return Response(
            stream_with_context(generate()),
            mimetype='text/event-stream'
        )
        
    except Exception as e:
        logger.error(f"Chat processing error: {str(e)}")
        return APIResponse.error(str(e))
@bp.route('/chat/stop', methods=['POST'])
def stop_chat():
    """停止当前对话"""
    try:
        ChatService.stop_current_chat()
        return APIResponse.success(message="Chat stopped")
    except Exception as e:
        return APIResponse.error(message=str(e))

@bp.route('/chat/reset', methods=['POST'])
def reset_chat():
    """重置对话"""
    try:
        # 这里可以添加一些清理逻辑，比如清除会话状态等
        return APIResponse.success(message="Chat reset successfully")
    except Exception as e:
        logger.error(f"Error resetting chat: {str(e)}")
        return APIResponse.error(str(e)) 