#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
队列模式文件搜索工具
专为亿级文件数量优化，实现生产者-消费者模式
"""

import os
import json
import time
import threading
import queue
from datetime import datetime
from typing import List, Dict, Optional, Callable
from pathlib import Path
from dataclasses import dataclass
from collections import defaultdict
from pymilvus import connections, Collection, CollectionSchema, FieldSchema, DataType, utility

from config import TARGET_DIRECTORY
from .SentenceTransformerTools import VectorTool


@dataclass
class FileInfo:
    """文件信息数据类"""
    file_path: str
    file_name: str
    file_type: str
    file_size: int
    modified_time: str


@dataclass
class Progress:
    """进度统计数据类"""
    scanned_files: int = 0
    indexed_files: int = 0
    failed_files: int = 0
    skipped_files: int = 0  # 添加跳过文件计数
    start_time: float = 0
    last_update: float = 0
    scan_speed: float = 0
    index_speed: float = 0
    scan_completed: bool = False  # 添加扫描完成标志
    processing_completed: bool = False  # 添加处理完成标志


class QueuedFileSearchTool:
    """队列模式文件搜索工具类"""
    
    def __init__(self, 
                 collection_name: str = "queued_file_search",
                 milvus_host: str = "c-f88be548ab3aa84d.milvus.aliyuncs.com",
                 milvus_port: int = 19530,
                 username: str = "root",
                 password: str = "Demo123456@",
                 token: str = None,
                 queue_size: int = 10000,
                 batch_size: int = 100,
                 num_workers: int = 3):
        """
        初始化队列模式文件搜索工具
        
        Args:
            collection_name: Milvus集合名称
            milvus_host: Milvus服务器地址
            milvus_port: Milvus服务器端口
            username: 用户名
            password: 密码
            token: 访问令牌
            queue_size: 队列大小
            batch_size: 批处理大小
            num_workers: 消费者线程数量
        """
        self.collection_name = collection_name
        self.milvus_host = milvus_host
        self.milvus_port = milvus_port
        self.username = username
        self.password = password
        self.token = token
        self.queue_size = queue_size
        self.batch_size = batch_size
        self.num_workers = num_workers
        
        # 队列和线程控制
        self.file_queue = queue.Queue(maxsize=queue_size)
        self.workers = []
        self.stop_event = threading.Event()
        self.scan_thread = None
        self.monitor_thread = None
        
        # 进度统计
        self.progress = Progress()
        self.progress_lock = threading.Lock()
        
        # 向量工具和数据库连接
        self.vector_tool = VectorTool()
        self.collection = None
        self.target_directory = None  # 存储目标目录，用于去重检查
        
        # 初始化Milvus连接
        self._init_milvus()
    
    def _init_milvus(self):
        """初始化Milvus连接和集合"""
        try:
            # 处理host参数
            clean_host = self.milvus_host
            if clean_host.startswith("http://"):
                clean_host = clean_host[7:]
            elif clean_host.startswith("https://"):
                clean_host = clean_host[8:]
            
            # 连接参数
            connect_params = {
                "alias": "default",
                "host": clean_host,
                "port": self.milvus_port
            }
            
            # 添加身份验证
            if self.username and self.password:
                connect_params["user"] = self.username
                connect_params["password"] = self.password
            elif self.token:
                connect_params["token"] = self.token
            
            # 连接到Milvus
            connections.connect(**connect_params)
            print(f"✅ Milvus连接成功: {clean_host}:{self.milvus_port}")
            
            # 初始化集合
            if utility.has_collection(self.collection_name):
                self.collection = Collection(self.collection_name)
                self.collection.load()
                print(f"✅ 集合 '{self.collection_name}' 已加载")
            else:
                self._create_collection()
                
        except Exception as e:
            print(f"❌ Milvus初始化失败: {str(e)}")
            raise e
    
    def _create_collection(self):
        """创建Milvus集合"""
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="file_path", dtype=DataType.VARCHAR, max_length=2000),  # 增加路径长度
            FieldSchema(name="file_name", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="file_type", dtype=DataType.VARCHAR, max_length=100),   # 增加文件类型长度
            FieldSchema(name="file_size", dtype=DataType.INT64),
            FieldSchema(name="modified_time", dtype=DataType.VARCHAR, max_length=50),
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=1024)
        ]
        
        schema = CollectionSchema(fields, description="队列模式文件搜索集合")
        self.collection = Collection(self.collection_name, schema)
        
        # 创建索引
        index_params = {
            "index_type": "IVF_FLAT",
            "metric_type": "COSINE",
            "params": {"nlist": 1024}
        }
        
        self.collection.create_index("embedding", index_params)
        self.collection.load()
        
        print(f"✅ 成功创建集合 '{self.collection_name}'")
    
    def _validate_data_length(self, file_path: str, file_name: str, file_type: str) -> tuple:
        """验证和截断数据长度，确保符合数据库字段限制"""
        safe_file_path = file_path[:2000] if len(file_path) > 2000 else file_path
        safe_file_name = file_name[:500] if len(file_name) > 500 else file_name
        safe_file_type = file_type[:100] if len(file_type) > 100 else file_type
        
        # 记录截断警告
        if len(file_path) > 2000:
            print(f"⚠️ 文件路径过长被截断: {file_path[:50]}...")
        if len(file_name) > 500:
            print(f"⚠️ 文件名过长被截断: {file_name[:50]}...")
        if len(file_type) > 100:
            print(f"⚠️ 文件类型过长被截断: {file_type}")
        
        return safe_file_path, safe_file_name, safe_file_type
    
    def _file_scanner_producer(self, root_path: str, max_files: int = -1):
        """文件扫描生产者线程"""
        file_count = 0
        unlimited = max_files < 0
        
        try:
            print(f"🔍 开始扫描: {root_path}")
            
            for root, dirs, files in os.walk(root_path):
                if self.stop_event.is_set():
                    break
                
                # 处理目录
                for dir_name in dirs:
                    if self.stop_event.is_set():
                        break
                    if not unlimited and file_count >= max_files:
                        break
                    
                    dir_path = os.path.join(root, dir_name)
                    try:
                        stat = os.stat(dir_path)
                        
                        # 验证和截断数据长度
                        safe_dir_path, safe_dir_name, _ = self._validate_data_length(
                            dir_path, dir_name, "directory"
                        )
                        
                        file_info = FileInfo(
                            file_path=safe_dir_path,
                            file_name=safe_dir_name,
                            file_type="directory",
                            file_size=0,
                            modified_time=datetime.fromtimestamp(stat.st_mtime).isoformat()
                        )
                        
                        # 将文件信息放入队列
                        self.file_queue.put(file_info, timeout=30)
                        file_count += 1
                        
                        # 更新进度
                        with self.progress_lock:
                            self.progress.scanned_files += 1
                            
                    except (OSError, PermissionError, queue.Full) as e:
                        print(f"⚠️ 跳过目录 {dir_path}: {e}")
                        continue
                
                # 处理文件
                for file_name in files:
                    if self.stop_event.is_set():
                        break
                    if not unlimited and file_count >= max_files:
                        break
                    
                    file_path = os.path.join(root, file_name)
                    try:
                        stat = os.stat(file_path)
                        file_ext = Path(file_name).suffix.lower() or "no_extension"
                        
                        # 验证和截断数据长度
                        safe_file_path, safe_file_name, safe_file_type = self._validate_data_length(
                            file_path, file_name, file_ext
                        )
                        
                        file_info = FileInfo(
                            file_path=safe_file_path,
                            file_name=safe_file_name,
                            file_type=safe_file_type,
                            file_size=stat.st_size,
                            modified_time=datetime.fromtimestamp(stat.st_mtime).isoformat()
                        )
                        
                        # 将文件信息放入队列
                        self.file_queue.put(file_info, timeout=30)
                        file_count += 1
                        
                        # 更新进度
                        with self.progress_lock:
                            self.progress.scanned_files += 1
                            
                    except (OSError, PermissionError, queue.Full) as e:
                        print(f"⚠️ 跳过文件 {file_path}: {e}")
                        continue
                
                if not unlimited and file_count >= max_files:
                    break
                    
        except Exception as e:
            print(f"❌ 扫描过程出错: {e}")
        finally:
            # 标记扫描完成
            with self.progress_lock:
                self.progress.scan_completed = True
            
            # 发送结束信号给所有消费者
            for _ in range(self.num_workers):
                try:
                    self.file_queue.put(None, timeout=10)
                except queue.Full:
                    pass
            
            print(f"✅ 文件扫描完成，共扫描 {file_count} 个文件")
            print("🔄 等待索引处理完成...")
    
    def _indexing_consumer(self, worker_id: int):
        """索引消费者线程"""
        batch = []
        
        while not self.stop_event.is_set():
            try:
                # 从队列获取文件信息
                file_info = self.file_queue.get(timeout=5)
                
                # 结束信号
                if file_info is None:
                    break
                
                batch.append(file_info)
                
                # 批量处理
                if len(batch) >= self.batch_size:
                    self._process_batch(batch, worker_id)
                    batch = []
                
            except queue.Empty:
                # 队列为空，处理剩余批次
                if batch:
                    self._process_batch(batch, worker_id)
                    batch = []
                continue
            except Exception as e:
                print(f"❌ Worker {worker_id} 处理出错: {e}")
                continue
        
        # 处理剩余批次
        if batch:
            self._process_batch(batch, worker_id)
        
        print(f"🔄 Worker {worker_id} 已完成所有任务")
    
    def _get_relative_path(self, file_path: str, base_dir: str) -> str:
        """获取相对于基础目录的文件路径"""
        # 标准化路径分隔符
        file_path = file_path.replace('\\', '/')
        base_dir = base_dir.replace('\\', '/')
        
        if file_path.startswith(base_dir):
            relative_path = file_path[len(base_dir):].lstrip('/')
            return relative_path
        
        # 如果不是以base_dir开头，则直接返回文件路径
        return file_path
    
    def _check_existing_files(self, batch: List[FileInfo], base_dir: str = None) -> List[FileInfo]:
        """检查批次中的文件是否已存在，返回需要新增的文件"""
        if not batch:
            return []
        
        try:
            # 建构查询条件：基于文件路径和修改时间
            search_conditions = []
            for file_info in batch:
                # 处理文件路径（移除前缀目录）
                if base_dir:
                    relative_path = self._get_relative_path(file_info.file_path, base_dir)
                else:
                    relative_path = file_info.file_path
                
                # 转义查询字符串中的特殊字符
                escaped_path = relative_path.replace('"', '\\"')
                escaped_time = file_info.modified_time.replace('"', '\\"')
                
                search_conditions.append(f'file_path == "{escaped_path}" && modified_time == "{escaped_time}"')
            
            # 如果条件太多，分批查询
            if len(search_conditions) > 50:  # Milvus查询表达式有长度限制
                return self._check_existing_files_in_batches(batch, base_dir)
            
            # 构建统一查询表达式
            combined_expr = ' || '.join(f'({condition})' for condition in search_conditions)
            
            # 查询已存在的文件
            existing_results = self.collection.query(
                expr=combined_expr,
                output_fields=["file_path", "modified_time"],
                limit=len(batch) * 2  # 稍微增加限制以防重复
            )
            
            # 建立已存在文件的集合
            existing_files = set()
            for result in existing_results:
                key = f"{result['file_path']}|{result['modified_time']}"
                existing_files.add(key)
            
            # 筛选出需要新增的文件
            new_files = []
            for file_info in batch:
                if base_dir:
                    relative_path = self._get_relative_path(file_info.file_path, base_dir)
                else:
                    relative_path = file_info.file_path
                
                key = f"{relative_path}|{file_info.modified_time}"
                if key not in existing_files:
                    new_files.append(file_info)
            
            return new_files
            
        except Exception as e:
            print(f"⚠️ 去重检查失败，将处理所有文件: {e}")
            return batch  # 如果检查失败，就处理所有文件
    
    def _check_existing_files_in_batches(self, batch: List[FileInfo], base_dir: str = None) -> List[FileInfo]:
        """分批检查文件是否存在（适用于大批次）"""
        new_files = []
        batch_size = 50  # 每次检查50个文件
        
        for i in range(0, len(batch), batch_size):
            sub_batch = batch[i:i + batch_size]
            sub_new_files = self._check_existing_files(sub_batch, base_dir)
            new_files.extend(sub_new_files)
        
        return new_files
    
    def _process_batch(self, batch: List[FileInfo], worker_id: int):
        """处理一个批次的文件"""
        try:
            # 去重检查：只处理新文件
            new_files = self._check_existing_files(batch, self.target_directory)
            
            if not new_files:
                print(f"🔄 Worker {worker_id}: 批次中所有 {len(batch)} 个文件已存在，跳过")
                with self.progress_lock:
                    self.progress.indexed_files += len(batch)  # 计数中包含跳过的文件
                    self.progress.skipped_files += len(batch)
                return
            
            skipped_count = len(batch) - len(new_files)
            if skipped_count > 0:
                print(f"🔄 Worker {worker_id}: 跳过 {skipped_count} 个重复文件，处理 {len(new_files)} 个新文件")
                with self.progress_lock:
                    self.progress.skipped_files += skipped_count
            
            # 准备向量化文本
            texts = []
            for file_info in new_files:
                text = f"{file_info.file_name} {file_info.file_type}"
                texts.append(text)
            
            # 生成向量
            embeddings = self.vector_tool.generate_vector(texts)
            
            # 准备插入数据（确保路径是相对路径）
            data = [
                [self._get_relative_path(f.file_path, self.target_directory) for f in new_files],
                [f.file_name for f in new_files],
                [f.file_type for f in new_files],
                [f.file_size for f in new_files],
                [f.modified_time for f in new_files],
                embeddings
            ]
            
            # 插入到Milvus
            self.collection.insert(data)
            
            # 更新进度（包含新增和跳过的）
            with self.progress_lock:
                self.progress.indexed_files += len(batch)
            
            print(f"✅ Worker {worker_id}: 新增 {len(new_files)} 个文件索引")
            
        except Exception as e:
            print(f"❌ Worker {worker_id} 批次处理失败: {e}")
            with self.progress_lock:
                self.progress.failed_files += len(batch)
    
    def start_queued_indexing(self, root_path: str, max_files: int = -1, 
                             progress_callback: Optional[Callable] = None):
        """启动队列模式索引"""
        print(f"🚀 启动队列模式索引")
        print(f"   目标目录: {root_path}")
        print(f"   文件限制: {'无限制' if max_files < 0 else max_files}")
        print(f"   队列大小: {self.queue_size}")
        print(f"   批处理大小: {self.batch_size}")
        print(f"   工作线程: {self.num_workers}")
        print(f"   去重检查: 已启用")
        
        # 保存目标目录用于去重检查
        self.target_directory = root_path
        
        # 重置进度
        self.progress = Progress()
        self.progress.start_time = time.time()
        self.stop_event.clear()
        
        # 启动消费者线程
        for i in range(self.num_workers):
            worker = threading.Thread(
                target=self._indexing_consumer,
                args=(i,),
                name=f"IndexWorker-{i}"
            )
            worker.start()
            self.workers.append(worker)
        
        # 启动生产者线程
        self.scan_thread = threading.Thread(
            target=self._file_scanner_producer,
            args=(root_path, max_files),
            name="FileScanner"
        )
        self.scan_thread.start()
        
        # 启动进度监控
        if progress_callback:
            self.monitor_thread = threading.Thread(
                target=self._progress_monitor,
                args=(progress_callback,),
                name="ProgressMonitor"
            )
            self.monitor_thread.start()
        
        print("✅ 队列模式索引已启动")
    
    def _progress_monitor(self, callback: Callable):
        """进度监控线程"""
        while not self.stop_event.is_set():
            time.sleep(2)  # 每2秒更新一次
            
            with self.progress_lock:
                current_time = time.time()
                elapsed = current_time - self.progress.start_time
                
                if elapsed > 0:
                    self.progress.scan_speed = self.progress.scanned_files / elapsed
                    self.progress.index_speed = self.progress.indexed_files / elapsed
                
                self.progress.last_update = current_time
                
                # 检查是否所有任务都完成了
                if self.progress.scan_completed and self._all_workers_finished():
                    self.progress.processing_completed = True
                    print("\n✅ 所有处理任务已完成，停止进度监控")
                    break
                
                # 调用回调函数
                callback(self.progress)
    
    def wait_completion(self, timeout: Optional[float] = None):
        """等待索引完成"""
        try:
            # 等待扫描线程完成
            if self.scan_thread:
                self.scan_thread.join(timeout)
            
            # 等待所有工作线程完成
            for worker in self.workers:
                worker.join(timeout)
            
            # 等待监控线程完成
            if hasattr(self, 'monitor_thread') and self.monitor_thread:
                self.monitor_thread.join(timeout=10)  # 给监控线程10秒时间完成
            
            # 刷新集合
            self.collection.flush()
            
            print("✅ 队列模式索引完成")
            
        except Exception as e:
            print(f"❌ 等待完成时出错: {e}")
    
    def stop_indexing(self):
        """停止索引"""
        print("🛑 正在停止索引...")
        self.stop_event.set()
        
        # 清空队列
        while not self.file_queue.empty():
            try:
                self.file_queue.get_nowait()
            except queue.Empty:
                break
        
        # 等待线程结束
        self.wait_completion(timeout=30)
        
        # 清理线程列表
        self.workers.clear()
        self.scan_thread = None
        self.monitor_thread = None
        
        print("✅ 索引已停止")
    
    def _all_workers_finished(self) -> bool:
        """检查所有工作线程是否已完成"""
        for worker in self.workers:
            if worker.is_alive():
                return False
        return True
    
    def get_progress(self) -> Progress:
        """获取当前进度"""
        with self.progress_lock:
            return Progress(
                scanned_files=self.progress.scanned_files,
                indexed_files=self.progress.indexed_files,
                failed_files=self.progress.failed_files,
                skipped_files=self.progress.skipped_files,
                start_time=self.progress.start_time,
                last_update=self.progress.last_update,
                scan_speed=self.progress.scan_speed,
                index_speed=self.progress.index_speed,
                scan_completed=self.progress.scan_completed,
                processing_completed=self.progress.processing_completed
            )
    
    def search_files(self, query: str, top_k: int = 10) -> List[Dict]:
        """搜索文件"""
        query_embedding = self.vector_tool.generate_vector(query)
        
        search_params = {
            "metric_type": "COSINE",
            "params": {"nprobe": 10}
        }
        
        results = self.collection.search(
            data=[query_embedding],
            anns_field="embedding",
            param=search_params,
            limit=top_k,
            output_fields=["file_path", "file_name", "file_type", "file_size", "modified_time"]
        )
        
        search_results = []
        for hits in results:
            for hit in hits:
                result = {
                    "file_path": hit.entity.get("file_path"),
                    "file_name": hit.entity.get("file_name"),
                    "file_type": hit.entity.get("file_type"),
                    "file_size": hit.entity.get("file_size"),
                    "modified_time": hit.entity.get("modified_time"),
                    "similarity_score": hit.score
                }
                search_results.append(result)
        
        return search_results
    
    def get_collection_stats(self) -> Dict:
        """获取集合统计信息"""
        return {
            "collection_name": self.collection_name,
            "total_entities": self.collection.num_entities,
            "is_loaded": utility.load_state(self.collection_name)
        }
    
    def clear_collection(self):
        """清空集合"""
        if self.collection:
            self.collection.drop()
            self._create_collection()
    
    def __del__(self):
        """析构函数"""
        try:
            self.stop_indexing()
            connections.disconnect("default")
        except:
            pass