# app/api/rag/utils.py

import os
import json
import requests
import numpy as np
from typing import List, Tuple, Dict, Any
from pymilvus import connections, Collection, utility

from config.settings import settings
from config.model_config import ModelConfig

def connect_milvus():
    """连接Milvus数据库"""
    connections.connect("default", host=settings.MILVUS_HOST, port=settings.MILVUS_PORT)

def search_text_vectors(query_vector, top_k: int = 10, knowledge_base: str = "default") -> Tuple[List[str], List[float], List[str], List[str], List[Dict[str, Any]]]:
    """搜索文本向量"""
    from app.core.logging import get_logger
    from app.core.knowledge_base_manager import get_kb_manager
    
    logger = get_logger(__name__)
    
    logger.info(f"开始文本向量搜索，top_k={top_k}, knowledge_base={knowledge_base}")
    logger.debug(f"查询向量维度: {len(query_vector)}")
    
    # 获取知识库管理器
    kb_manager = get_kb_manager()
    collection_names = kb_manager.get_collection_names(knowledge_base)
    text_collection_name = collection_names["text_collection"]
    
    if not utility.has_collection(text_collection_name):
        logger.warning(f"文本集合 {text_collection_name} 不存在")
        return [], [], [], [], []
    
    collection = Collection(text_collection_name)
    logger.info(f"加载文本集合: {text_collection_name}")
    collection.load()
    
    # 检查集合中的实体数量
    entity_count = collection.num_entities
    logger.info(f"文本集合中的实体数量: {entity_count}")
    
    if entity_count == 0:
        logger.warning("文本集合中没有数据")
        return [], [], [], [], []
    
    # 确保query_vector是列表格式
    if hasattr(query_vector, 'tolist'):
        query_vector = query_vector.tolist()
        logger.debug("将numpy数组转换为列表格式")
    
    search_params = {
        "metric_type": "IP",
        "params": {"nprobe": 10},
    }
    
    logger.info(f"执行向量搜索，参数: {search_params}")
    
    # 如果知识库不是default，搜索指定分区
    if knowledge_base != "default":
        partition_name = knowledge_base
        # 检查分区是否存在
        if not collection.has_partition(partition_name):
            logger.warning(f"文本集合中不存在分区 {partition_name}，使用默认分区搜索")
            results = collection.search(
                data=[query_vector],
                anns_field="vector",
                param=search_params,
                limit=top_k,
                output_fields=["text", "source", "file_type", "page_num", "slide_num", "paragraph_num"]
            )
        else:
            logger.info(f"搜索文本分区: {partition_name}")
        results = collection.search(
            data=[query_vector],
            anns_field="vector",
            param=search_params,
            limit=top_k,
                output_fields=["text", "source", "file_type", "page_num", "slide_num", "paragraph_num"],
            partition_names=[partition_name]
        )
    else:
        # 对于default知识库，搜索默认分区
        results = collection.search(
            data=[query_vector],
            anns_field="vector",
            param=search_params,
            limit=top_k,
            output_fields=["text", "source", "file_type", "page_num", "slide_num", "paragraph_num"]
        )
    
    hits = results[0]
    logger.info(f"搜索完成，找到 {len(hits)} 个结果")
    
    texts = [hit.entity.get('text') for hit in hits]
    scores = [hit.distance for hit in hits]
    sources = [hit.entity.get('source') for hit in hits]
    file_types = [hit.entity.get('file_type') for hit in hits]
    
    # 提取位置信息
    position_info = []
    for hit in hits:
        pos_info = {
            'page_num': hit.entity.get('page_num', 0),
            'slide_num': hit.entity.get('slide_num', 0),
            'paragraph_num': hit.entity.get('paragraph_num', 0)
        }
        position_info.append(pos_info)
    
    # 过滤和优化搜索结果
    filtered_results = []
    for i, (text, score, source, file_type, pos_info) in enumerate(zip(texts, scores, sources, file_types, position_info)):
        # 过滤太短的文本块
        if len(text.strip()) < 10:  # 降低长度要求
            logger.info(f"过滤文本结果 {i+1}: 内容太短 ({len(text)} 字符)")
            continue
        
        # 过滤相似度过低的结果
        if score < 0.5:  # 进一步降低相似度阈值
            logger.info(f"过滤文本结果 {i+1}: 相似度过低 ({score:.4f})")
            continue
        
        # 过滤重复内容
        is_duplicate = False
        for existing_result in filtered_results:
            existing_text = existing_result[0]  # 获取文本内容
            if _calculate_text_similarity(text, existing_text) > 0.8:
                is_duplicate = True
                logger.info(f"过滤文本结果 {i+1}: 重复内容")
                break
        
        if not is_duplicate:
            filtered_results.append((text, score, source, file_type, pos_info))
            logger.info(f"保留文本结果 {i+1}: 相似度={score:.4f}, 来源={source}, 类型={file_type}, 位置={pos_info}")
        logger.debug(f"文本内容: {text[:100]}...")
    
            # 重新组织结果
        if filtered_results:
            filtered_texts, filtered_scores, filtered_sources, filtered_file_types, filtered_position_info = zip(*filtered_results)
            texts = list(filtered_texts)
            scores = list(filtered_scores)
            sources = list(filtered_sources)
            file_types = list(filtered_file_types)
            position_info = list(filtered_position_info)
        else:
            # 如果没有过滤后的结果，返回原始结果但降低相似度阈值
            logger.warning("没有满足条件的搜索结果，降低相似度阈值")
            fallback_texts, fallback_scores, fallback_sources, fallback_file_types, fallback_position_info = [], [], [], [], []
            for i, (text, score, source, file_type, pos_info) in enumerate(zip(texts, scores, sources, file_types, position_info)):
                if score >= 0.4 and len(text.strip()) >= 5:  # 进一步降低要求
                    fallback_texts.append(text)
                    fallback_scores.append(score)
                    fallback_sources.append(source)
                    fallback_file_types.append(file_type)
                    fallback_position_info.append(pos_info)
            texts = fallback_texts
            scores = fallback_scores
            sources = fallback_sources
            file_types = fallback_file_types
            position_info = fallback_position_info
    
    logger.info(f"文本向量搜索完成，返回 {len(texts)} 个过滤后的结果")
    return texts, scores, sources, file_types, position_info

def _calculate_text_similarity(text1: str, text2: str) -> float:
    """计算两个文本的相似度（基于字符重叠）"""
    if not text1 or not text2:
        return 0.0
    
    # 简单的字符重叠相似度计算
    set1 = set(text1.lower())
    set2 = set(text2.lower())
    
    if not set1 or not set2:
        return 0.0
    
    intersection = set1.intersection(set2)
    union = set1.union(set2)
    
    return len(intersection) / len(union) if union else 0.0

def search_image_vectors(query_vector, top_k: int = 10, knowledge_base: str = "default") -> Tuple[List[str], List[float], List[str], List[str], List[Dict[str, Any]]]:
    """搜索图片向量"""
    from app.core.knowledge_base_manager import get_kb_manager
    from app.core.logging import get_logger
    from config.settings import settings
    import numpy as np
    
    logger = get_logger(__name__)
    
    # 获取知识库管理器
    kb_manager = get_kb_manager()
    collection_names = kb_manager.get_collection_names(knowledge_base)
    image_collection_name = collection_names["image_collection"]
    
    if not utility.has_collection(image_collection_name):
        logger.warning(f"图片集合 {image_collection_name} 不存在")
        return [], [], [], [], []
    
    collection = Collection(image_collection_name)
    collection.load()
    
    # 确保query_vector是列表格式
    if hasattr(query_vector, 'tolist'):
        query_vector = query_vector.tolist()
    
    # 检查向量维度匹配
    query_dim = len(query_vector)
    expected_dim = settings.IMAGE_VECTOR_DIMENSION
    
    logger.info(f"图片搜索 - 查询向量维度: {query_dim}, 期望维度: {expected_dim}")
    
    # 如果维度不匹配，尝试调整
    if query_dim != expected_dim:
        logger.warning(f"向量维度不匹配，尝试调整: {query_dim} -> {expected_dim}")
        
        if query_dim > expected_dim:
            # 如果查询向量维度更大，截取前expected_dim个元素
            query_vector = query_vector[:expected_dim]
            logger.info(f"截取向量到 {expected_dim} 维")
        elif query_dim < expected_dim:
            # 如果查询向量维度更小，用零填充
            query_vector = query_vector + [0.0] * (expected_dim - query_dim)
            logger.info(f"填充向量到 {expected_dim} 维")
    
    search_params = {
        "metric_type": "IP",
        "params": {"nprobe": 10},
    }
    
    try:
        # 如果知识库不是default，搜索指定分区
        if knowledge_base != "default":
            partition_name = knowledge_base
            # 检查分区是否存在
            if not collection.has_partition(partition_name):
                logger.warning(f"图片集合中不存在分区 {partition_name}，使用默认分区搜索")
                results = collection.search(
                    data=[query_vector],
                    anns_field="vector",
                    param=search_params,
                    limit=top_k,
                    output_fields=["image_path", "source", "file_type", "timestamp", "frame_index"]
                )
            else:
                logger.info(f"搜索图片分区: {partition_name}")
            results = collection.search(
                data=[query_vector],
                anns_field="vector",
                param=search_params,
                limit=top_k,
                    output_fields=["image_path", "source", "file_type", "timestamp", "frame_index"],
                partition_names=[partition_name]
            )
        else:
            # 对于default知识库，搜索默认分区
            results = collection.search(
                data=[query_vector],
                anns_field="vector",
                param=search_params,
                limit=top_k,
                output_fields=["image_path", "source", "file_type", "timestamp", "frame_index"]
            )
        
        hits = results[0]
        image_paths = [hit.entity.get('image_path') for hit in hits]
        scores = [hit.distance for hit in hits]
        sources = [hit.entity.get('source') for hit in hits]
        file_types = [hit.entity.get('file_type') for hit in hits]
        
        # 提取时间戳信息
        timestamp_info = []
        for hit in hits:
            ts_info = {
                'timestamp': hit.entity.get('timestamp', 0.0),
                'frame_index': hit.entity.get('frame_index', 0)
            }
            timestamp_info.append(ts_info)
        
        # 过滤和优化图片搜索结果
        filtered_results = []
        for i, (path, score, source, file_type, ts_info) in enumerate(zip(image_paths, scores, sources, file_types, timestamp_info)):
            # 过滤相似度过低的结果
            if score < 0.5:  # 进一步降低相似度阈值
                logger.info(f"过滤图片结果 {i+1}: 相似度过低 ({score:.4f})")
                continue
            
            # 过滤重复来源
            is_duplicate = False
            for existing_result in filtered_results:
                existing_path = existing_result[0]  # 获取图片路径
                if existing_path == path:
                    is_duplicate = True
                    logger.info(f"过滤图片结果 {i+1}: 重复来源")
                    break
            
            if not is_duplicate:
                filtered_results.append((path, score, source, file_type, ts_info))
                logger.info(f"保留图片结果 {i+1}: 相似度={score:.4f}, 来源={source}, 时间戳={ts_info}")
        
        # 重新组织结果
        if filtered_results:
            filtered_image_paths, filtered_scores, filtered_sources, filtered_file_types, filtered_timestamp_info = zip(*filtered_results)
            image_paths = list(filtered_image_paths)
            scores = list(filtered_scores)
            sources = list(filtered_sources)
            file_types = list(filtered_file_types)
            timestamp_info = list(filtered_timestamp_info)
        else:
            # 如果没有过滤后的结果，返回原始结果但降低相似度阈值
            logger.warning("没有满足条件的图片搜索结果，降低相似度阈值")
            fallback_image_paths, fallback_scores, fallback_sources, fallback_file_types, fallback_timestamp_info = [], [], [], [], []
            for i, (path, score, source, file_type, ts_info) in enumerate(zip(image_paths, scores, sources, file_types, timestamp_info)):
                if score >= 0.4:  # 进一步降低阈值
                    fallback_image_paths.append(path)
                    fallback_scores.append(score)
                    fallback_sources.append(source)
                    fallback_file_types.append(file_type)
                    fallback_timestamp_info.append(ts_info)
            image_paths = fallback_image_paths
            scores = fallback_scores
            sources = fallback_sources
            file_types = fallback_file_types
            timestamp_info = fallback_timestamp_info
        
        logger.info(f"图片搜索完成，返回 {len(image_paths)} 个过滤后的结果")
        return image_paths, scores, sources, file_types, timestamp_info
        
    except Exception as e:
        logger.error(f"图片向量搜索失败: {e}")
        return [], [], [], [], []

def call_aliyun_embedding_api(text: str) -> np.ndarray:
    """调用阿里云文本嵌入API"""
    from app.core.logging import get_logger
    logger = get_logger(__name__)
    
    # 检查文本是否为空或只包含空白字符
    if not text or not text.strip():
        logger.warning(f"文本为空或只包含空白字符，跳过API调用")
        # 返回随机向量作为降级方案
        fallback_vector = np.random.rand(settings.VECTOR_DIMENSION).astype(np.float32)
        logger.warning(f"使用随机向量作为降级方案，维度: {fallback_vector.shape}")
        return fallback_vector
    
    # 去除首尾空白字符
    text = text.strip()
    
    # 检查文本长度
    if len(text) > 6000:  # 阿里云API限制
        text = text[:6000]
        logger.warning(f"文本过长，截取前6000字符进行向量化")
    
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {settings.BAILIAN_API_KEY}"
    }
    
    # 使用阿里云百炼兼容OpenAI的API格式
    payload = {
        "model": "text-embedding-v4",
        "input": text,
        "dimensions": settings.VECTOR_DIMENSION,
        "encoding_format": "float"
    }
    
    logger.info(f"调用阿里云嵌入API，文本长度: {len(text)}")
    logger.debug(f"API请求URL: https://dashscope.aliyuncs.com/compatible-mode/v1/embeddings")
    logger.debug(f"API请求Payload: {payload}")
    
    try:
        response = requests.post("https://dashscope.aliyuncs.com/compatible-mode/v1/embeddings", headers=headers, json=payload, timeout=30)
        
        logger.info(f"API响应状态码: {response.status_code}")
        
        if response.status_code != 200:
            logger.error(f"API调用失败，状态码: {response.status_code}")
            logger.error(f"错误响应: {response.text}")
            raise Exception(f"API调用失败: {response.status_code} - {response.text}")
        
        result = response.json()
        logger.debug(f"API响应: {result}")
        
        # 解析OpenAI兼容接口的响应格式
        embedding_list = None
        if "data" in result and len(result["data"]) > 0:
            embedding_list = result["data"][0]["embedding"]
            logger.info("使用OpenAI兼容格式解析响应")
        else:
            logger.error(f"无法解析API响应格式: {result}")
            raise ValueError("无法解析API响应格式")
        
        if not embedding_list or len(embedding_list) != settings.VECTOR_DIMENSION:
            logger.error(f"向量维度不匹配，期望: {settings.VECTOR_DIMENSION}，实际: {len(embedding_list) if embedding_list else 0}")
            raise ValueError(f"向量维度不匹配: {len(embedding_list) if embedding_list else 0}")
        
        vector = np.array(embedding_list, dtype=np.float32)
        logger.info(f"成功生成向量，维度: {vector.shape}")
        return vector
        
    except Exception as e:
        logger.error(f"调用阿里云嵌入API时发生错误: {e}")
        logger.error(f"错误类型: {type(e).__name__}")
        import traceback
        logger.error(f"详细错误信息: {traceback.format_exc()}")
        
        # 返回随机向量作为降级方案
        fallback_vector = np.random.rand(settings.VECTOR_DIMENSION).astype(np.float32)
        logger.warning(f"使用随机向量作为降级方案，维度: {fallback_vector.shape}")
        return fallback_vector

def call_aliyun_image_embedding_api(image_path: str) -> np.ndarray:
    """调用阿里云图片嵌入API"""
    from app.core.logging import get_logger
    import base64
    import os
    logger = get_logger(__name__)
    
    logger.info(f"调用阿里云图片嵌入API，图片路径: {image_path}")
    
    # 读取图片文件并转换为base64
    try:
        with open(image_path, 'rb') as f:
            image_data = f.read()
            image_base64 = base64.b64encode(image_data).decode('utf-8')
    except Exception as e:
        logger.error(f"读取图片文件失败: {e}")
        raise
    
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {settings.BAILIAN_API_KEY}"
    }
    
    # 使用阿里云多模态嵌入API - 正确的格式
    payload = {
        "model": "multimodal-embedding-v1",
        "input": {
            "contents": [
                {"image": f"data:image/jpeg;base64,{image_base64}"}
            ]
        }
    }
    
    logger.debug(f"图片API请求URL: https://dashscope.aliyuncs.com/api/v1/services/embeddings/multimodal-embedding/multimodal-embedding")
    logger.debug(f"图片API请求Payload: {payload}")
    
    try:
        # 使用正确的多模态API端点
        response = requests.post(
            "https://dashscope.aliyuncs.com/api/v1/services/embeddings/multimodal-embedding/multimodal-embedding",
            headers=headers,
            json=payload,
            timeout=30
        )
        
        logger.info(f"图片API响应状态码: {response.status_code}")
        
        if response.status_code != 200:
            logger.error(f"图片API调用失败，状态码: {response.status_code}")
            logger.error(f"错误响应: {response.text}")
            raise Exception(f"图片API调用失败: {response.status_code} - {response.text}")
        
        result = response.json()
        logger.debug(f"图片API响应: {result}")
        
        # 解析多模态API的响应格式
        embedding_list = None
        if "output" in result and "embeddings" in result["output"] and len(result["output"]["embeddings"]) > 0:
            embedding_list = result["output"]["embeddings"][0]["embedding"]
            logger.info("解析图片嵌入API响应")
        else:
            logger.error(f"无法解析图片API响应格式: {result}")
            raise ValueError("无法解析图片API响应格式")
        
        if not embedding_list or len(embedding_list) != settings.IMAGE_VECTOR_DIMENSION:
            logger.error(f"图片向量维度不匹配，期望: {settings.IMAGE_VECTOR_DIMENSION}，实际: {len(embedding_list) if embedding_list else 0}")
            raise ValueError(f"图片向量维度不匹配: {len(embedding_list) if embedding_list else 0}")
        
        vector = np.array(embedding_list, dtype=np.float32)
        logger.info(f"成功生成图片向量，维度: {vector.shape}")
        return vector
        
    except Exception as e:
        logger.error(f"调用阿里云图片嵌入API时发生错误: {e}")
        logger.error(f"错误类型: {type(e).__name__}")
        import traceback
        logger.error(f"详细错误信息: {traceback.format_exc()}")
        
        # 返回随机向量作为降级方案
        fallback_vector = np.random.rand(settings.IMAGE_VECTOR_DIMENSION).astype(np.float32)
        logger.warning(f"使用随机图片向量作为降级方案，维度: {fallback_vector.shape}")
        return fallback_vector

def call_aliyun_embedding_api_batch(texts: List[str]) -> List[np.ndarray]:
    """调用阿里云文本嵌入API（批量处理）"""
    from app.core.logging import get_logger
    logger = get_logger(__name__)
    
    logger.info(f"批量调用阿里云嵌入API，文本数量: {len(texts)}")
    
    # 检查文本长度并截取
    processed_texts = []
    for text in texts:
        if len(text) > 6000:  # 阿里云API限制
            text = text[:6000]
            logger.warning(f"文本过长，截取前6000字符进行向量化")
        processed_texts.append(text)
    
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {settings.BAILIAN_API_KEY}"
    }
    
    # 使用阿里云百炼兼容OpenAI的API格式
    payload = {
        "model": "text-embedding-v4",
        "input": processed_texts,
        "dimensions": settings.VECTOR_DIMENSION,
        "encoding_format": "float"
    }
    
    logger.debug(f"批量API请求URL: https://dashscope.aliyuncs.com/compatible-mode/v1/embeddings")
    logger.debug(f"批量API请求Payload: {payload}")
    
    try:
        response = requests.post("https://dashscope.aliyuncs.com/compatible-mode/v1/embeddings", headers=headers, json=payload, timeout=60)
        
        logger.info(f"批量API响应状态码: {response.status_code}")
        
        if response.status_code != 200:
            logger.error(f"批量API调用失败，状态码: {response.status_code}")
            logger.error(f"错误响应: {response.text}")
            raise Exception(f"批量API调用失败: {response.status_code} - {response.text}")
        
        result = response.json()
        logger.debug(f"批量API响应: {result}")
        
        # 解析OpenAI兼容接口的响应格式
        vectors = []
        if "data" in result and len(result["data"]) > 0:
            for item in result["data"]:
                embedding_list = item["embedding"]
                if not embedding_list or len(embedding_list) != settings.VECTOR_DIMENSION:
                    logger.error(f"向量维度不匹配，期望: {settings.VECTOR_DIMENSION}，实际: {len(embedding_list) if embedding_list else 0}")
                    # 使用随机向量作为降级方案
                    fallback_vector = np.random.rand(settings.VECTOR_DIMENSION).astype(np.float32)
                    vectors.append(fallback_vector)
                else:
                    vector = np.array(embedding_list, dtype=np.float32)
                    vectors.append(vector)
            
            logger.info(f"批量成功生成 {len(vectors)} 个向量")
            return vectors
        else:
            logger.error(f"无法解析批量API响应格式: {result}")
            raise ValueError("无法解析批量API响应格式")
        
    except Exception as e:
        logger.error(f"批量调用阿里云嵌入API时发生错误: {e}")
        logger.error(f"错误类型: {type(e).__name__}")
        import traceback
        logger.error(f"详细错误信息: {traceback.format_exc()}")
        
        # 返回随机向量作为降级方案
        fallback_vectors = [np.random.rand(settings.VECTOR_DIMENSION).astype(np.float32) for _ in texts]
        logger.warning(f"使用随机向量作为降级方案，生成 {len(fallback_vectors)} 个向量")
        return fallback_vectors

def call_llm_api(prompt: str) -> str:
    """调用大语言模型API"""
    from app.core.logging import get_logger
    logger = get_logger(__name__)
    
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {ModelConfig.LLM_API_KEY}"
    }
    
    # 使用阿里云百炼兼容OpenAI的API格式
    payload = {
        "model": ModelConfig.LLM_MODEL,
        "messages": [
            {"role": "system", "content": "你是一个智能助手。请根据提供的知识库内容来回答用户的问题。"},
            {"role": "user", "content": prompt}
        ],
        **ModelConfig.MODEL_PARAMS['llm']
    }
    
    logger.info(f"开始调用LLM，模型: {ModelConfig.LLM_MODEL}")
    logger.debug(f"LLM请求Payload: {payload}")
    
    try:
        response = requests.post(ModelConfig.LLM_API_URL, headers=headers, json=payload, timeout=60)
        logger.info(f"LLM响应状态码: {response.status_code}")
        
        if response.status_code != 200:
            logger.error(f"LLM调用失败，状态码: {response.status_code}")
            logger.error(f"错误响应: {response.text}")
            return f"调用大语言模型失败: {response.status_code} - {response.text}"
        
        response.raise_for_status()
        result = response.json()
        logger.debug(f"LLM响应: {result}")
        
        content = result.get("choices", [{}])[0].get("message", {}).get("content", "")
        logger.info(f"LLM调用成功，返回内容长度: {len(content)}")
        return content
        
    except Exception as e:
        logger.error(f"LLM调用失败: {e}")
        return f"调用大语言模型时发生错误: {str(e)}"

async def call_llm_stream_async(prompt: str):
    """异步流式调用阿里云百炼大模型，使用官方推荐的方式"""
    from app.core.logging import get_logger
    import asyncio
    from langchain.callbacks import AsyncIteratorCallbackHandler
    from langchain.chat_models import ChatOpenAI
    from langchain.schema import HumanMessage, SystemMessage
    
    logger = get_logger(__name__)
    
    try:
        # 创建异步回调处理器
        callback = AsyncIteratorCallbackHandler()
        
        # 创建ChatOpenAI实例，使用阿里云百炼兼容接口
        llm = ChatOpenAI(
            model_name=ModelConfig.LLM_MODEL,
            openai_api_base=ModelConfig.LLM_API_URL,
            openai_api_key=ModelConfig.LLM_API_KEY,
            streaming=True,
            callbacks=[callback],
            **ModelConfig.MODEL_PARAMS['llm']
        )
        
        logger.info(f"开始异步流式调用LLM，模型: {ModelConfig.LLM_MODEL}")
        
        # 创建消息
        messages = [
            SystemMessage(content="你是一个智能助手。请根据提供的知识库内容来回答用户的问题。"),
            HumanMessage(content=prompt)
        ]
        
        # 创建异步任务
        task = asyncio.create_task(
            llm.agenerate([messages])
        )
        
        # 流式返回结果
        async for token in callback.aiter():
            yield token
            
        # 等待任务完成
        await task
        
    except Exception as e:
        logger.error(f"异步LLM流式调用失败: {e}")
        import traceback
        logger.error(f"详细错误信息: {traceback.format_exc()}")
        yield f"\nLLM调用失败: {e}\n"

def call_llm_stream(prompt: str):
    """流式调用阿里云百炼大模型，参考ask_question.py的实现"""
    from app.core.logging import get_logger
    logger = get_logger(__name__)
    
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {ModelConfig.LLM_API_KEY}"
    }
    
    # 使用阿里云百炼兼容OpenAI的API格式
    payload = {
        "model": ModelConfig.LLM_MODEL,
        "messages": [
            {"role": "system", "content": "你是一个智能助手。请根据提供的知识库内容来回答用户的问题。"},
            {"role": "user", "content": prompt}
        ],
        "stream": True,
        **ModelConfig.MODEL_PARAMS['llm']
    }
    
    logger.info(f"开始流式调用LLM，模型: {ModelConfig.LLM_MODEL}")
    
    try:
        with requests.post(ModelConfig.LLM_API_URL, headers=headers, json=payload, stream=True, timeout=120) as response:
            logger.info(f"LLM响应状态码: {response.status_code}")
            
            if response.status_code != 200:
                error_text = response.text
                logger.error(f"LLM API调用失败，状态码: {response.status_code}")
                logger.error(f"错误响应: {error_text}")
                yield f"\nLLM API调用失败: {response.status_code} - {error_text}\n"
                return
            
            response.raise_for_status()
            
            logger.info("开始处理LLM流式响应...")
            for line in response.iter_lines(decode_unicode=True):
                if not line or line.strip() == "":
                    continue
                    
                # 处理SSE格式
                if line.startswith("data:"):
                    line = line[5:].strip()
                    
                if line == "[DONE]":
                    logger.info("LLM流式调用完成")
                    break
                    
                try:
                    data = json.loads(line)
                    # 参考ask_question.py的解析方式
                    content = data.get("choices", [{}])[0].get("delta", {}).get("content") \
                        or data.get("choices", [{}])[0].get("message", {}).get("content") \
                        or data.get("data", "")
                    if content:
                        yield content
                except Exception as e:
                    logger.warning(f"解析响应数据失败: {e}")
                    continue
                    
    except Exception as e:
        logger.error(f"LLM流式调用失败: {e}")
        import traceback
        logger.error(f"详细错误信息: {traceback.format_exc()}")
        yield f"\n发生错误: {e}\n"

def build_prompt_with_sources(question: str, text_sources: List[str], image_sources: List[str]) -> str:
    """构建包含知识库内容的prompt"""
    from app.core.logging import get_logger
    logger = get_logger(__name__)
    
    logger.info(f"开始构建prompt，文本源数量: {len(text_sources)}, 图片源数量: {len(image_sources)}")
    
    prompt_parts = []
    
    # 添加文本内容
    if text_sources:
        prompt_parts.append("文本知识库内容:")
        for i, text in enumerate(text_sources, 1):
            prompt_parts.append(f"{i}. {text}")
            logger.debug(f"添加文本源 {i}: {text[:100]}...")
    
    # 添加图片信息（不提供图片内容，只告知用户）
    if image_sources:
        prompt_parts.append("\n相关图片文件:")
        for i, img_path in enumerate(image_sources, 1):
            filename = os.path.basename(img_path)
            prompt_parts.append(f"{i}. {filename}")
            logger.debug(f"添加图片源 {i}: {filename}")
    
    # 添加用户问题
    prompt_parts.append(f"\n用户问题: {question}")
    prompt_parts.append("\n请根据上述知识库内容回答用户问题。如果知识库内容与问题无关，请说明情况并根据自己的知识回答。")
    
    final_prompt = "\n".join(prompt_parts)
    
    logger.info(f"Prompt构建完成，总长度: {len(final_prompt)} 字符")
    logger.debug(f"构建的Prompt内容:\n{final_prompt}")
    
    return final_prompt

def filter_sources_by_threshold(sources: List[str], scores: List[float], threshold: float = None) -> Tuple[List[str], List[float]]:
    """根据相似度阈值过滤来源"""
    if threshold is None:
        threshold = settings.SIMILARITY_THRESHOLD
    
    filtered_sources = []
    filtered_scores = []
    
    for source, score in zip(sources, scores):
        if score >= threshold:
            filtered_sources.append(source)
            filtered_scores.append(score)
    
    return filtered_sources, filtered_scores

def build_prompt_with_sources_and_history(question: str, text_chunks: List[str], image_chunks: List[str] = None, conversation_history: List[dict] = None) -> str:
    """构建包含知识库来源和对话历史的prompt"""
    from app.core.logging import get_logger
    logger = get_logger(__name__)
    
    logger.info(f"开始构建多轮对话prompt，文本源数量: {len(text_chunks)}, 图片源数量: {len(image_chunks) if image_chunks else 0}, 历史对话数量: {len(conversation_history) if conversation_history else 0}")
    
    # 构建知识库上下文
    context_parts = []
    
    # 添加文本来源
    if text_chunks:
        context_parts.append("【文本知识库内容】")
        for i, chunk in enumerate(text_chunks, 1):
            context_parts.append(f"{i}. {chunk}")
    
    # 添加图片来源
    if image_chunks:
        context_parts.append("【图片知识库内容】")
        for i, chunk in enumerate(image_chunks, 1):
            context_parts.append(f"{i}. {chunk}")
    
    context = "\n\n".join(context_parts) if context_parts else "无相关知识库内容"
    
    # 构建对话历史
    history_text = ""
    if conversation_history:
        history_parts = ["【对话历史】"]
        for msg in conversation_history:
            role = "用户" if msg["role"] == "user" else "助手"
            content = msg["content"]
            history_parts.append(f"{role}: {content}")
        history_text = "\n".join(history_parts) + "\n\n"
    
    # 构建完整的prompt
    prompt = f"""你是一个智能助手，请根据以下知识库内容和对话历史来回答用户的问题。

知识库内容：
{context}

{history_text}当前用户问题：{question}

请用中文回答，注意：
1. 优先使用知识库内容回答问题
2. 如果知识库内容无法回答，请说明无法从知识库中找到相关信息
3. 保持对话的连贯性，参考之前的对话历史
4. 回答要简洁、准确、有用"""
    
    logger.info(f"多轮对话Prompt构建完成，总长度: {len(prompt)} 字符")
    return prompt 

def generate_image_query_vector(text_query: str) -> np.ndarray:
    """为图片检索生成专门的查询向量"""
    from app.core.logging import get_logger
    from config.settings import settings
    import numpy as np
    
    logger = get_logger(__name__)
    
    try:
        # 方法1：使用多模态API，将文本转换为图片查询向量
        logger.info(f"为图片检索生成查询向量，文本: {text_query}")
        
        # 使用多模态API，将文本转换为图片查询向量
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {settings.BAILIAN_API_KEY}"
        }
        
        # 使用多模态API，但输入文本而不是图片
        payload = {
            "model": "multimodal-embedding-v1",
            "input": {
                "contents": [
                    {"text": text_query}
                ]
            }
        }
        
        response = requests.post(
            "https://dashscope.aliyuncs.com/api/v1/services/embeddings/multimodal-embedding/multimodal-embedding",
            headers=headers,
            json=payload,
            timeout=30
        )
        
        if response.status_code != 200:
            logger.error(f"图片查询向量生成失败: {response.status_code}")
            logger.error(f"错误响应: {response.text}")
            # 返回随机向量作为降级方案
            return np.random.rand(settings.IMAGE_VECTOR_DIMENSION).astype(np.float32)
        
        result = response.json()
        embedding_list = result["output"]["embeddings"][0]["embedding"]
        
        if len(embedding_list) != settings.IMAGE_VECTOR_DIMENSION:
            logger.warning(f"图片查询向量维度不匹配: {len(embedding_list)} != {settings.IMAGE_VECTOR_DIMENSION}")
            # 调整维度
            if len(embedding_list) > settings.IMAGE_VECTOR_DIMENSION:
                embedding_list = embedding_list[:settings.IMAGE_VECTOR_DIMENSION]
            else:
                embedding_list = embedding_list + [0.0] * (settings.IMAGE_VECTOR_DIMENSION - len(embedding_list))
        
        vector = np.array(embedding_list, dtype=np.float32)
        logger.info(f"成功生成图片查询向量，维度: {vector.shape}")
        return vector
        
    except Exception as e:
        logger.error(f"图片查询向量生成失败: {e}")
        # 返回随机向量作为降级方案
        return np.random.rand(settings.IMAGE_VECTOR_DIMENSION).astype(np.float32) 