import logging
import torch
import numpy as np
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams, PointStruct
import hashlib
import os
import traceback
import time

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def normalize_path(path):
    """规范化路径格式，确保不同表示的相同路径被识别为相同路径"""
    if path is None:
        return None
    # 规范化路径分隔符并转为绝对路径
    normalized = os.path.abspath(os.path.normpath(path))
    # 统一使用正斜杠
    normalized = normalized.replace("\\", "/")
    return normalized

class VectorDB:
    def __init__(self):
        self.qdrant_client = None
        self.total_vectors = 0
        self.processed_vectors = 0
        self.last_progress = 0  # 用于跟踪上次显示的进度
        self.processed_paths = set()  # 用于追踪已经处理过的路径

    def initialize(self):
        """初始化向量数据库"""
        try:
            # 直接使用本地文件模式
            logger.info("使用Qdrant向量数据库本地文件模式...")
            
            # 确保目录存在
            qdrant_path = os.path.abspath("qdrant_db")
            os.makedirs(qdrant_path, exist_ok=True)
            
            # 使用本地文件模式
            self.qdrant_client = QdrantClient(path=qdrant_path)
            logger.info("Qdrant向量数据库本地文件模式初始化成功")
            
            # 创建collection
            try:
                self.qdrant_client.get_collection("image_features")
                logger.info("图片特征集合已存在")
                # 获取集合信息
                collection_info = self.qdrant_client.get_collection("image_features")
                logger.info(f"图片特征集合状态: 共有向量 {collection_info.vectors_count} 个")
                
                # 加载已处理的路径，用于断点续传
                self._load_processed_paths("image_features")
                
            except Exception as e:
                logger.info(f"创建新的图片特征集合: {str(e)}")
                self.qdrant_client.recreate_collection(
                    collection_name="image_features",
                    vectors_config=VectorParams(
                        size=2048,  # ResNet50特征维度
                        distance=Distance.COSINE
                    )
                )
                logger.info("创建新的图片特征集合完成")
            
            # 创建历史记录collection
            try:
                self.qdrant_client.get_collection("history_features")
                logger.info("历史记录集合已存在")
                # 获取集合信息
                collection_info = self.qdrant_client.get_collection("history_features")
                logger.info(f"历史记录集合状态: 共有向量 {collection_info.vectors_count} 个")
            except Exception as e:
                logger.info(f"创建新的历史记录集合: {str(e)}")
                self.qdrant_client.recreate_collection(
                    collection_name="history_features",
                    vectors_config=VectorParams(
                        size=2048,  # ResNet50特征维度
                        distance=Distance.COSINE
                    )
                )
                logger.info("创建新的历史记录集合完成")
            
            logger.info("向量数据库初始化成功")
            
        except Exception as e:
            logger.error(f"初始化向量数据库失败: {str(e)}")
            logger.error(f"错误详情: {traceback.format_exc()}")
            raise

    def _load_processed_paths(self, collection_name):
        """加载已经处理过的路径，用于断点续传"""
        try:
            # 分批获取所有点
            batch_size = 100
            offset = None
            count = 0
            
            while True:
                points, next_offset = self.qdrant_client.scroll(
                    collection_name=collection_name,
                    limit=batch_size,
                    offset=offset,
                    with_payload=True,  # 获取元数据
                    with_vectors=False  # 不需要向量数据
                )
                
                for point in points:
                    if 'path' in point.payload:
                        path = normalize_path(point.payload['path'])
                        self.processed_paths.add(path)
                        count += 1
                
                if next_offset is None:
                    break
                    
                offset = next_offset
            
            logger.info(f"已加载 {count} 个已处理的路径记录")
            
        except Exception as e:
            logger.error(f"加载已处理路径出错: {str(e)}")
            # 出错时重置集合，确保安全
            self.processed_paths = set()

    def store_vector(self, image_path, features, metadata=None):
        """存储特征向量到Qdrant"""
        try:
            if metadata is None:
                metadata = {}
            
            # 统一路径格式
            normalized_path = normalize_path(image_path)
            
            # 检查是否已经处理过此路径（断点续传）
            if (normalized_path in self.processed_paths):
                # 已处理，跳过
                self.processed_vectors += 1
                # 不再输出每个跳过的文件日志
                return
            
            # 更新元数据中的路径
            if 'path' in metadata:
                metadata['path'] = normalize_path(metadata['path'])
            else:
                metadata['path'] = normalized_path
                
            # 确保features是列表或numpy数组
            if isinstance(features, torch.Tensor):
                features = features.cpu().numpy()
            
            # 将特征向量存储到Qdrant
            self.qdrant_client.upsert(
                collection_name="image_features",
                points=[
                    PointStruct(
                        id=hashlib.md5(normalized_path.encode()).hexdigest(),
                        vector=features.tolist() if isinstance(features, np.ndarray) else features,
                        payload=metadata
                    )
                ]
            )
            
            # 添加到已处理集合
            self.processed_paths.add(normalized_path)
            
            # 更新进度
            self.processed_vectors += 1
            
            # 由于主函数已经显示进度，这里不再输出进度信息

        except Exception as e:
            logger.error(f"存储向量失败: {str(e)}")

    def set_total_vectors(self, total):
        """设置要处理的总向量数"""
        self.total_vectors = total
        self.processed_vectors = 0
        self.last_progress = 0  # 重置进度计数器

    def store_history_vector(self, image_path, features, metadata):
        """存储历史记录特征向量到Qdrant"""
        try:
            if isinstance(features, torch.Tensor):
                features = features.cpu().numpy()
                
            # 将特征向量存储到Qdrant
            self.qdrant_client.upsert(
                collection_name="history_features",
                points=[
                    PointStruct(
                        id=hashlib.md5(image_path.encode()).hexdigest(),
                        vector=features.tolist() if isinstance(features, np.ndarray) else features,
                        payload=metadata
                    )
                ]
            )
            logger.info(f"成功存储历史记录向量: {image_path}")
        except Exception as e:
            logger.error(f"存储历史记录向量失败 {image_path}: {str(e)}")

    def find_similar_in_history(self, features, top_k=8, current_image_url=None):
        """在历史记录中查找相似图片，只返回相似度大于65%且最多4张的图片
        
        Args:
            features: 特征向量
            top_k: 最多返回的结果数量
            current_image_url: 当前图片URL，用于排除自身
        """
        try:
            if isinstance(features, torch.Tensor):
                features = features.cpu().numpy()
                
            # 搜索相似向量 - 获取更多结果以便过滤
            search_result = self.qdrant_client.search(
                collection_name="history_features",
                query_vector=features.tolist() if isinstance(features, np.ndarray) else features,
                limit=top_k + 1  # 多获取一些结果用于过滤
            )
            
            results = []
            for hit in search_result:
                metadata = hit.payload
                similarity = float(hit.score * 100)
                
                # 排除当前图片（相似度接近100%）和相似度低于65%的图片
                if 'image_url' in metadata and similarity > 65.0:
                    # 如果提供了当前图片URL，则排除自身
                    if current_image_url and metadata.get('image_url') == current_image_url:
                        logger.info(f"排除当前图片: {current_image_url}, 相似度: {similarity:.2f}%")
                        continue
                    
                    # 排除接近100%的结果（可能是同一张图片的不同版本）
                    if similarity > 99.9:
                        logger.info(f"排除高度相似图片: {metadata.get('image_url')}, 相似度: {similarity:.2f}%")
                        continue
                        
                    results.append({
                        'image_url': metadata['image_url'],
                        'main_class': metadata.get('main_class', '未知'),
                        'detailed_class': metadata.get('detailed_class', '未知'),
                        'similarity': similarity,
                        'timestamp': metadata.get('timestamp', '')
                    })
            
            # 最多只返回4张图片
            return results[:4]
        except Exception as e:
            logger.error(f"搜索历史记录中的相似图片失败: {str(e)}")
            return []

    def find_similar_in_dataset(self, features, top_k=4):
        """在数据集中查找相似图片"""
        try:
            if isinstance(features, torch.Tensor):
                features = features.cpu().numpy()
                
            # 记录查询向量的维度
            vector_size = len(features.tolist() if isinstance(features, np.ndarray) else features)
            logger.info(f"查询向量维度: {vector_size}")
            
            # 获取集合信息
            collection_info = self.qdrant_client.get_collection("image_features")
            logger.info(f"向量数据库集合信息: {collection_info}")
            
            # 如果向量数为0，直接返回
            if collection_info.vectors_count == 0:
                logger.error("向量数据库中没有向量数据，请确保已经执行了预加载步骤")
                return []
                
            # 等待索引完成（最多等待5秒）
            if collection_info.indexed_vectors_count < collection_info.vectors_count:
                logger.info(f"正在等待索引完成: {collection_info.indexed_vectors_count}/{collection_info.vectors_count}")
                import time
                max_wait = 5  # 最多等待5秒
                start_time = time.time()
                while time.time() - start_time < max_wait:
                    collection_info = self.qdrant_client.get_collection("image_features")
                    if collection_info.indexed_vectors_count >= collection_info.vectors_count:
                        logger.info("索引已完成")
                        break
                    time.sleep(0.5)
            
            # 确保查询向量是列表格式
            query_vector = features.tolist() if isinstance(features, np.ndarray) else features
            
            # 搜索相似向量
            search_result = self.qdrant_client.search(
                collection_name="image_features",
                query_vector=query_vector,
                limit=top_k
            )
            
            logger.info(f"搜索到 {len(search_result)} 个相似结果")
            
            results = []
            for hit in search_result:
                metadata = hit.payload
                if metadata and 'path' in metadata:
                    # 规范化路径
                    path = normalize_path(metadata.get('path', ''))
                    
                    result_item = {
                        'path': path,
                        'similarity': float(hit.score * 100),
                        'main_class': metadata.get('main_category', '未知'),
                        'detailed_class': metadata.get('class', '未知'),
                        'text': f"{metadata.get('main_category', '未知')} - {metadata.get('class', '未知')}"
                    }
                    results.append(result_item)
                    logger.info(f"相似结果: {result_item['similarity']:.2f}% - {os.path.basename(path)}")
            
            return results
        except Exception as e:
            logger.error(f"搜索数据集中的相似图片失败: {str(e)}")
            logger.error(f"错误详情: {traceback.format_exc()}")
            return []