import os
import sys
import json
import time
import logging
import hashlib
import threading
import shutil
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple, Union, Set
from datetime import datetime
from functools import lru_cache

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("vector_store")

# 定义Document类在全局作用域
class Document:
    def __init__(self, page_content: str, metadata: Dict[str, Any] = None):
        self.page_content = page_content
        self.metadata = metadata or {}

    def to_langchain_document(self) -> 'LangchainDocument':
        """转换为LangChain Document格式"""
        from langchain_core.documents import Document as LangchainDocument
        return LangchainDocument(page_content=self.page_content, metadata=self.metadata)

# 使用直接导入Embeddings，不依赖于langchain_core
class Embeddings:
    """基础嵌入接口类，定义了嵌入向量的基本方法"""
    
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """获取文档的嵌入向量"""
        raise NotImplementedError
        
    def embed_query(self, text: str) -> List[float]:
        """获取查询的嵌入向量"""
        raise NotImplementedError

# 创建一个简单的嵌入实现
class SimpleEmbeddings(Embeddings):
    """简单的嵌入实现，使用固定维度的向量"""
    
    def __init__(self, size: int = 384):
        self.size = size
    
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """为每个文档生成固定维度的随机向量"""
        import hashlib
        import struct
        
        result = []
        for text in texts:
            # 使用文本的哈希值来确保相同文本有相同的嵌入
            hash_value = hashlib.md5(text.encode()).digest()
            # 将哈希值转换为浮点数列表
            floats = []
            for i in range(0, self.size * 4, 4):
                idx = i % len(hash_value)
                value = struct.unpack('!f', hash_value[idx:idx+4])[0]
                floats.append(value)
            
            # 确保长度正确
            while len(floats) < self.size:
                floats.append(0.0)
            
            # 正规化向量
            import math
            magnitude = math.sqrt(sum(x*x for x in floats))
            if magnitude > 0:
                floats = [x/magnitude for x in floats]
                
            result.append(floats[:self.size])
        return result
    
    def embed_query(self, text: str) -> List[float]:
        """为查询生成固定维度的随机向量"""
        result = self.embed_documents([text])
        return result[0] if result else [0.0] * self.size

# 检查必要的依赖
try:
    import yaml
    from langchain_core.documents import Document as LangchainDocument
    
    # 检查ChromaDB是否可用
    try:
        import chromadb
        from chromadb.config import Settings
        from langchain_community.vectorstores import Chroma
        CHROMA_AVAILABLE = True
    except ImportError as e:
        logger.warning(f"ChromaDB导入失败: {str(e)}")
        CHROMA_AVAILABLE = False
    
    DEPS_AVAILABLE = CHROMA_AVAILABLE
except ImportError as e:
    logger.error(f"缺少必要的依赖: {str(e)}")
    DEPS_AVAILABLE = False

class ProductMonitor:
    """监控产品数据变化并同步向量存储"""
    
    def __init__(self, products_dir: str, vector_store_client: 'VectorStoreClient'):
        self.products_dir = products_dir
        self.vector_store_client = vector_store_client
        self.known_products: Dict[str, Dict] = {}  # 产品ID -> 产品哈希和时间戳
        self.is_running = False
        self.monitor_thread = None
        
    def start(self, interval: int = 60):
        """开始监控产品变化"""
        if self.is_running:
            return
            
        self.is_running = True
        self.monitor_thread = threading.Thread(target=self._monitor_loop, args=(interval,), daemon=True)
        self.monitor_thread.start()
        logger.info(f"产品监控已启动，目录: {self.products_dir}, 间隔: {interval}秒")
        
    def stop(self):
        """停止监控"""
        self.is_running = False
        if self.monitor_thread:
            self.monitor_thread.join(timeout=5)
        logger.info("产品监控已停止")
        
    def _monitor_loop(self, interval: int):
        """监控循环"""
        self._scan_and_sync_products()  # 初始扫描
        
        while self.is_running:
            try:
                time.sleep(interval)
                self._scan_and_sync_products()
            except Exception as e:
                logger.error(f"产品监控异常: {str(e)}")
                
    def _scan_and_sync_products(self):
        """扫描产品并同步向量存储"""
        if not os.path.exists(self.products_dir):
            logger.warning(f"产品目录不存在: {self.products_dir}")
            return
            
        # 获取当前所有产品ID
        current_products: Set[str] = set()
        
        # 遍历产品目录
        for product_dir in os.listdir(self.products_dir):
            product_path = os.path.join(self.products_dir, product_dir)
            
            # 检查是否是目录
            if not os.path.isdir(product_path):
                continue
                
            # 查找产品信息和分析文件 - 支持多种可能的文件名
            product_id = product_dir
            
            # 检查可能的产品信息文件
            product_info_path = None
            possible_info_files = [
                os.path.join(product_path, "product_info.yaml"),
                os.path.join(product_path, "info.yaml"),
                os.path.join(product_path, "product.yaml")
            ]
            
            for path in possible_info_files:
                if os.path.exists(path):
                    product_info_path = path
                    break
                    
            if not product_info_path:
                logger.warning(f"产品 {product_id} 缺少产品信息文件，使用目录名作为产品ID")
                
            # 检查可能的分析结果文件
            analysis_path = None
            possible_analysis_files = [
                os.path.join(product_path, "analysis_result.json"),
                os.path.join(product_path, "analysis_result.yaml"),
                os.path.join(product_path, "analysis.json"),
                os.path.join(product_path, "analysis.yaml")
            ]
            
            for path in possible_analysis_files:
                if os.path.exists(path):
                    analysis_path = path
                    break
            
            current_products.add(product_id)
            
            # 如果找到了分析文件，继续处理
            if analysis_path or product_info_path:
                # 计算文件哈希值
                try:
                    info_hash = self._get_file_hash(product_info_path) if product_info_path else None
                    analysis_hash = self._get_file_hash(analysis_path) if analysis_path else None
                    
                    # 获取当前时间戳
                    current_time = time.time()
                    
                    # 如果产品不在已知列表中或哈希值发生变化，更新向量存储
                    if (product_id not in self.known_products or 
                        self.known_products[product_id].get('info_hash') != info_hash or
                        self.known_products[product_id].get('analysis_hash') != analysis_hash):
                        
                        # 检查是否有有效的分析内容
                        has_analysis = False
                        if analysis_path:
                            if analysis_path.endswith('.json'):
                                analysis_data = self._read_json(analysis_path)
                                has_analysis = bool(analysis_data)
                            elif analysis_path.endswith('.yaml'):
                                analysis_data = self._read_yaml(analysis_path)
                                has_analysis = bool(analysis_data)
                        
                        # 添加或更新向量存储
                        if has_analysis or product_info_path:
                            logger.info(f"更新产品向量存储: {product_id}")
                            self._update_product_vectors(
                                product_id, 
                                product_info_path or "", 
                                analysis_path or ""
                            )
                            
                        # 更新记录
                        self.known_products[product_id] = {
                            'info_hash': info_hash,
                            'analysis_hash': analysis_hash,
                            'timestamp': current_time
                        }
                except Exception as e:
                    logger.error(f"处理产品 {product_id} 时出错: {str(e)}")
            else:
                logger.warning(f"产品 {product_id} 没有找到分析结果文件")
        
        # 检查已删除的产品
        deleted_products = set(self.known_products.keys()) - current_products
        for product_id in deleted_products:
            logger.info(f"删除产品向量存储: {product_id}")
            self._delete_product_vectors(product_id)
            del self.known_products[product_id]
    
    def _update_product_vectors(self, product_id: str, info_path: str, analysis_path: str):
        """更新产品向量存储"""
        try:
            # 读取产品信息
            product_info = self._read_yaml(info_path) if os.path.exists(info_path) else {}
            
            # 尝试读取分析结果（可能是 json 或 yaml 格式）
            analysis_result = {}
            
            # 首先尝试从JSON读取
            if os.path.exists(analysis_path):
                analysis_result = self._read_json(analysis_path)
                
            # 如果JSON为空，尝试查找yaml文件
            if not analysis_result:
                yaml_path = os.path.join(os.path.dirname(analysis_path), "analysis_result.yaml")
                if os.path.exists(yaml_path):
                    yaml_data = self._read_yaml(yaml_path)
                    # 分析结果可能嵌套在analysis_result键下
                    if isinstance(yaml_data, dict):
                        analysis_result = yaml_data.get("analysis_result", {})
                        if not analysis_result and yaml_data:
                            # 如果没有analysis_result键但有其他内容，使用整个yaml数据
                            analysis_result = yaml_data
            
            if not analysis_result:
                logger.warning(f"产品 {product_id} 无分析结果，跳过更新向量存储")
                return
                
            # 准备文档
            documents = []
            
            # 提取产品名称
            product_name = product_info.get('name', f'产品-{product_id}')
            
            # 将各部分分析结果添加为单独的文档
            if isinstance(analysis_result, dict):
                for key, value in analysis_result.items():
                    if isinstance(value, str) and value.strip():
                        doc = LangchainDocument(
                            page_content=f"{key}: {value}",
                            metadata={
                                "product_id": product_id,
                                "product_name": product_name,
                                "section": key
                            }
                        )
                        documents.append(doc)
            elif isinstance(analysis_result, str) and analysis_result.strip():
                # 如果分析结果是单个字符串，作为一个文档
                doc = LangchainDocument(
                    page_content=analysis_result,
                    metadata={
                        "product_id": product_id,
                        "product_name": product_name
                    }
                )
                documents.append(doc)
            
            if not documents:
                logger.warning(f"产品 {product_id} 无有效分析内容，跳过更新向量存储")
                return
                
            # 更新向量存储
            store_path = os.path.join("products", product_id)
            
            # 直接调用同步版本的向量存储更新方法，避免使用协程
            try:
                # 确保父目录存在
                full_path = self.vector_store_client._get_full_store_path(store_path)
                os.makedirs(os.path.dirname(full_path), exist_ok=True)
                
                # 如果存在旧的向量存储，先删除
                if store_path in self.vector_store_client.active_stores:
                    # 尝试从字典中删除
                    del self.vector_store_client.active_stores[store_path]
                
                # 清理目录
                self.vector_store_client._clean_store_directory(full_path)
                
                # 创建ChromaDB客户端设置
                if CHROMA_AVAILABLE:
                    from chromadb.config import Settings
                    client_settings = Settings(
                        anonymized_telemetry=False,
                        allow_reset=True,
                        is_persistent=True
                    )
                    
                    # 使用同步方法创建向量存储
                    from langchain_community.vectorstores import Chroma
                    vector_store = Chroma.from_documents(
                        documents=documents,
                        embedding=self.vector_store_client.embeddings,
                        persist_directory=full_path,
                        client_settings=client_settings
                    )
                    
                    # 确保持久化
                    if hasattr(vector_store, "persist"):
                        vector_store.persist()
                    
                    # 保存到活跃存储中
                    self.vector_store_client.active_stores[store_path] = vector_store
                    
                    logger.info(f"成功更新产品 {product_id} 的向量存储")
                else:
                    logger.error(f"ChromaDB不可用，无法更新产品 {product_id} 向量存储")
            except Exception as e:
                logger.error(f"更新产品 {product_id} 向量存储过程中出错: {str(e)}")
                
        except Exception as e:
            logger.error(f"更新产品 {product_id} 向量时出错: {str(e)}")
    
    def _delete_product_vectors(self, product_id: str):
        """删除产品向量存储"""
        try:
            store_path = os.path.join("products", product_id)
            self.vector_store_client.delete_vectorstore(store_path)
        except Exception as e:
            logger.error(f"删除产品 {product_id} 向量时出错: {str(e)}")
    
    @staticmethod
    def _get_file_hash(file_path: str) -> str:
        """获取文件的哈希值"""
        if not os.path.exists(file_path):
            return ""
            
        try:
            with open(file_path, 'rb') as f:
                file_hash = hashlib.md5(f.read()).hexdigest()
            return file_hash
        except Exception as e:
            logger.error(f"计算文件 {file_path} 哈希值出错: {str(e)}")
            return ""
    
    def _read_yaml(self, file_path: str) -> Union[Dict, List, None]:
        """读取YAML文件"""
        try:
            if not os.path.exists(file_path):
                return None
                
            with open(file_path, 'r', encoding='utf-8') as f:
                return yaml.safe_load(f)
        except Exception as e:
            logger.error(f"读取YAML文件失败 {file_path}: {str(e)}")
            return None
    
    def _read_json(self, file_path: str) -> Union[Dict, List, None]:
        """读取JSON文件"""
        try:
            if not os.path.exists(file_path):
                return None
            
            # 检查文件扩展名
            if file_path.lower().endswith('.yaml') or file_path.lower().endswith('.yml'):
                return self._read_yaml(file_path)
                
            with open(file_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except json.JSONDecodeError as e:
            # 如果JSON解析失败，尝试作为YAML读取
            if file_path.endswith('.yaml') or file_path.endswith('.yml'):
                return self._read_yaml(file_path)
            logger.error(f"读取JSON文件失败 {file_path}: {str(e)}")
            return None
        except Exception as e:
            logger.error(f"读取JSON文件失败 {file_path}: {str(e)}")
            return None


class VectorStoreClient:
    """向量存储客户端，支持ChromaDB"""
    
    _instance = None
    _init_error = None
    
    # 存储根路径
    VECTOR_STORE_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "static", "vector_store")
    
    @classmethod
    def get_instance(cls) -> 'VectorStoreClient':
        """获取单例实例"""
        if cls._instance is None:
            cls._instance = cls()
        return cls._instance
    
    @classmethod
    def get_init_error(cls) -> Optional[str]:
        """获取初始化错误信息"""
        return cls._init_error
    
    def __init__(self):
        """初始化向量存储客户端"""
        # 基本属性
        self.embeddings = None
        self.active_stores: Dict[str, Any] = {}  # path -> vector_store
        self.is_initialized = False
        
        # 确保存储根路径存在
        os.makedirs(self.VECTOR_STORE_ROOT, exist_ok=True)
        
        # 检查依赖是否可用
        if not DEPS_AVAILABLE:
            VectorStoreClient._init_error = "缺少必要的依赖项，请安装: pip install chromadb langchain-chroma pyyaml"
            return
            
        # 初始化嵌入模型
        try:
            logger.info("初始化自定义简单嵌入模型...")
            # 使用自定义的简单嵌入模型
            self.embeddings = SimpleEmbeddings(size=384)
            self.is_initialized = True
            logger.info("成功初始化简单嵌入模型")
            
            # 启动产品监控 - 使用正确的路径
            app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
            products_dir = os.path.join(app_dir, "static", "uploads", "products")
            
            if os.path.exists(products_dir):
                logger.info(f"找到产品目录: {products_dir}")
                self.product_monitor = ProductMonitor(products_dir, self)
                self.product_monitor.start(interval=300)  # 5分钟检查一次
            else:
                logger.warning(f"产品目录不存在: {products_dir}")
        except Exception as e:
            logger.error(f"初始化嵌入模型失败: {str(e)}")
            VectorStoreClient._init_error = f"初始化嵌入模型失败: {str(e)}"
    
    def _get_full_store_path(self, store_path: str) -> str:
        """获取完整的存储路径"""
        # 如果是绝对路径，直接返回
        if os.path.isabs(store_path):
            return store_path
            
        # 相对路径，转换为绝对路径
        return os.path.join(self.VECTOR_STORE_ROOT, store_path)
    
    def _clean_store_directory(self, directory: str) -> None:
        """清理向量存储目录"""
        try:
            if os.path.exists(directory):
                logger.info(f"清理向量存储目录: {directory}")
                shutil.rmtree(directory, ignore_errors=True)
            os.makedirs(directory, exist_ok=True)
        except Exception as e:
            logger.error(f"清理目录 {directory} 失败: {str(e)}")
            # 仍然尝试创建目录
            os.makedirs(directory, exist_ok=True)
    
    async def create_or_load_vectorstore(self, 
                                        documents: List[Union[Document, LangchainDocument]],
                                        store_path: str,
                                        store_type: str = "chroma") -> Tuple[bool, Optional[str]]:
        """
        创建或加载向量存储
        
        Args:
            documents: 文档列表，可以是自定义Document或LangchainDocument
            store_path: 向量存储路径
            store_type: 向量存储类型，仅支持"chroma"
            
        Returns:
            tuple: (是否成功, 错误信息)
        """
        # 检查初始化状态
        if not self.is_initialized:
            error_msg = "向量存储客户端未初始化"
            logger.error(error_msg)
            return False, error_msg
            
        # 检查文档
        if not documents:
            error_msg = "文档列表为空，无法创建向量存储"
            logger.warning(error_msg)
            return False, error_msg
            
        # 目前仅支持Chroma
        if store_type.lower() != "chroma":
            logger.info(f"指定的存储类型 {store_type} 不支持，将使用ChromaDB")
            store_type = "chroma"
            
        if not CHROMA_AVAILABLE:
            error_msg = "ChromaDB不可用，请安装依赖"
            logger.error(error_msg)
            return False, error_msg
            
        # 获取完整路径
        full_path = self._get_full_store_path(store_path)
        
        try:
            # 转换文档类型如有必要
            langchain_documents = []
            for doc in documents:
                if isinstance(doc, Document):
                    langchain_documents.append(doc.to_langchain_document())
                else:
                    langchain_documents.append(doc)
                    
            # 尝试创建向量存储
            store_dir = os.path.dirname(full_path)
            os.makedirs(store_dir, exist_ok=True)
            
            # 清理并重建目录
            self._clean_store_directory(full_path)
            
            # 使用ChromaDB创建向量存储
            try:
                logger.info(f"创建新的向量存储: {full_path}")
                
                # 创建ChromaDB客户端设置
                client_settings = Settings(
                    anonymized_telemetry=False,  # 禁用遥测
                    allow_reset=True,            # 允许重置集合
                    is_persistent=True           # 启用持久化
                )
                
                # 创建向量存储
                vector_store = Chroma.from_documents(
                    documents=langchain_documents,
                    embedding=self.embeddings,
                    persist_directory=full_path,
                    client_settings=client_settings
                )
                
                # 确保持久化
                if hasattr(vector_store, "persist"):
                    vector_store.persist()
                
                # 保存到活跃存储中
                self.active_stores[store_path] = vector_store
                
                logger.info(f"成功创建向量存储: {full_path}")
                return True, None
            except Exception as e:
                error_msg = f"创建向量存储失败: {str(e)}"
                logger.error(error_msg)
                return False, error_msg
        except Exception as e:
            error_msg = f"处理向量存储时发生未预期错误: {str(e)}"
            logger.error(error_msg)
            return False, error_msg
    
    async def update_product_vectorstore(self, 
                                  product_id: str, 
                                  documents: List[Union[Document, LangchainDocument]], 
                                  store_path: str) -> Tuple[bool, Optional[str]]:
        """
        更新产品向量存储
        
        Args:
            product_id: 产品ID
            documents: 文档列表，可以是自定义Document或LangchainDocument
            store_path: 向量存储路径
            
        Returns:
            tuple: (是否成功, 错误信息)
        """
        if not self.is_initialized:
            error_msg = "向量存储客户端未初始化"
            logger.error(error_msg)
            return False, error_msg
            
        if not documents:
            error_msg = "文档列表为空，无法更新向量存储"
            logger.warning(error_msg)
            return False, error_msg
            
        # 获取完整路径
        full_path = self._get_full_store_path(store_path)
        
        try:
            # 转换文档类型如有必要
            langchain_documents = []
            for doc in documents:
                if isinstance(doc, Document):
                    langchain_documents.append(doc.to_langchain_document())
                else:
                    langchain_documents.append(doc)
            
            # 确保父目录存在
            os.makedirs(os.path.dirname(full_path), exist_ok=True)
            
            # 如果存在旧的向量存储，先删除
            if store_path in self.active_stores:
                # 尝试从字典中删除
                del self.active_stores[store_path]
            
            # 清理目录
            self._clean_store_directory(full_path)
            
            # 使用ChromaDB创建新的向量存储
            try:
                logger.info(f"更新产品向量存储: {full_path}")
                
                # 创建ChromaDB客户端设置
                client_settings = Settings(
                    anonymized_telemetry=False,  # 禁用遥测
                    allow_reset=True,            # 允许重置集合
                    is_persistent=True           # 启用持久化
                )
                
                # 创建向量存储
                vector_store = Chroma.from_documents(
                    documents=langchain_documents,
                    embedding=self.embeddings,
                    persist_directory=full_path,
                    client_settings=client_settings
                )
                
                # 确保持久化
                if hasattr(vector_store, "persist"):
                    vector_store.persist()
                
                # 保存到活跃存储中
                self.active_stores[store_path] = vector_store
                
                logger.info(f"成功更新产品向量存储: {full_path}")
                return True, None
            except Exception as e:
                error_msg = f"更新产品向量存储失败: {str(e)}"
                logger.error(error_msg)
                return False, error_msg
        except Exception as e:
            error_msg = f"处理向量存储时发生未预期错误: {str(e)}"
            logger.error(error_msg)
            return False, error_msg
    
    async def delete_vectorstore(self, store_path: str) -> bool:
        """
        删除向量存储
        
        Args:
            store_path: 向量存储路径
            
        Returns:
            是否成功
        """
        if not self.is_initialized:
            logger.error("向量存储客户端未初始化")
            return False
            
        # 获取完整路径
        full_path = self._get_full_store_path(store_path)
        
        try:
            # 如果在活跃存储中，先删除
            if store_path in self.active_stores:
                del self.active_stores[store_path]
            
            # 删除目录
            if os.path.exists(full_path):
                logger.info(f"删除向量存储: {full_path}")
                shutil.rmtree(full_path, ignore_errors=True)
            
            return True
        except Exception as e:
            logger.error(f"删除向量存储 {store_path} 失败: {str(e)}")
            return False
    
    async def similarity_search(self, 
                              query: str, 
                              k: int = 4,
                              store_path: Optional[str] = None) -> List[LangchainDocument]:
        """
        相似度搜索
        
        Args:
            query: 查询文本
            k: 返回结果数量
            store_path: 可选的向量存储路径，为空则搜索所有产品
            
        Returns:
            相似文档列表
        """
        if not self.is_initialized:
            logger.error("向量存储客户端未初始化")
            return []
            
        if not query.strip():
            logger.warning("查询文本为空")
            return []
            
        try:
            # 如果指定了store_path，只搜索该路径
            if store_path:
                return await self._search_single_store(query, k, store_path)
            
            # 否则，搜索产品目录下的所有向量存储
            return await self._search_all_products(query, k)
        except Exception as e:
            logger.error(f"相似度搜索失败: {str(e)}")
            return []
    
    async def _search_single_store(self, query: str, k: int, store_path: str) -> List[LangchainDocument]:
        """在单个向量存储中搜索"""
        # 检查该存储是否已加载
        if store_path in self.active_stores:
            vector_store = self.active_stores[store_path]
        else:
            # 尝试加载向量存储
            full_path = self._get_full_store_path(store_path)
            if not os.path.exists(full_path):
                logger.warning(f"向量存储不存在: {full_path}")
                return []
                
            try:
                logger.info(f"加载向量存储: {full_path}")
                
                # 创建客户端设置
                client_settings = Settings(
                    anonymized_telemetry=False,
                    is_persistent=True
                )
                
                # 加载向量存储
                vector_store = Chroma(
                    persist_directory=full_path,
                    embedding_function=self.embeddings,
                    client_settings=client_settings
                )
                
                self.active_stores[store_path] = vector_store
            except Exception as e:
                logger.error(f"加载向量存储 {full_path} 失败: {str(e)}")
                return []
        
        # 执行搜索
        try:
            # 这是一个同步方法，不使用await
            results = vector_store.similarity_search(query, k=k)
            return results
        except Exception as e:
            logger.error(f"在向量存储 {store_path} 中搜索失败: {str(e)}")
            return []
    
    async def _search_all_products(self, query: str, k: int) -> List[LangchainDocument]:
        """搜索所有产品向量存储"""
        all_results = []
        
        # 获取产品向量存储目录
        products_vector_dir = os.path.join(self.VECTOR_STORE_ROOT, "products")
        if not os.path.exists(products_vector_dir):
            logger.warning(f"产品向量存储目录不存在: {products_vector_dir}")
            return []
            
        # 遍历所有产品向量存储
        for product_id in os.listdir(products_vector_dir):
            product_store_path = os.path.join("products", product_id)
            
            # 搜索单个产品
            product_results = await self._search_single_store(query, k, product_store_path)
            all_results.extend(product_results)
        
        # 按相似度排序（假设相似度在Chroma结果的首位）
        # 这里我们简单取前k个结果
        if len(all_results) > k:
            all_results = all_results[:k]
        
        return all_results 