#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
大数据量文件处理演示
专为亿级文件数量优化，使用队列模式处理
"""

import time
import signal
import sys

from config import TARGET_DIRECTORY
from tools.QueuedFileSearchTools import QueuedFileSearchTool, Progress


class MassiveFileProcessor:
    """大数据量文件处理器"""
    
    def __init__(self):
        self.search_tool = None
        self.start_time = 0
        self.last_progress_time = 0
        
    def progress_callback(self, progress: Progress):
        """进度回调函数"""
        current_time = time.time()
        
        # 每10秒输出一次进度
        if current_time - self.last_progress_time >= 10:
            self.last_progress_time = current_time
            elapsed = current_time - self.start_time
            
            print(f"\n📊 实时进度统计 (运行时间: {elapsed:.1f}秒)")
            print(f"   已扫描文件: {progress.scanned_files:,} 个")
            print(f"   已索引文件: {progress.indexed_files:,} 个") 
            print(f"   跳过文件: {progress.skipped_files:,} 个 (重复)")
            print(f"   失败文件: {progress.failed_files:,} 个")
            print(f"   扫描速度: {progress.scan_speed:.1f} 文件/秒")
            print(f"   索引速度: {progress.index_speed:.1f} 文件/秒")
            
            # 计算预估完成时间
            if progress.index_speed > 0 and progress.scanned_files > progress.indexed_files:
                remaining = progress.scanned_files - progress.indexed_files
                eta = remaining / progress.index_speed
                print(f"   预计剩余时间: {eta/60:.1f} 分钟")
    
    def signal_handler(self, signum, frame):
        """信号处理函数"""
        print(f"\n🛑 接收到停止信号 ({signum})")
        if self.search_tool:
            self.search_tool.stop_indexing()
        sys.exit(0)
    
    def run_massive_indexing(self, target_directory: str, max_files: int = -1):
        """运行大数据量索引"""
        print("🚀 大数据量文件索引系统")
        print("=" * 60)
        
        # 注册信号处理
        signal.signal(signal.SIGINT, self.signal_handler)
        signal.signal(signal.SIGTERM, self.signal_handler)
        
        try:
            # 初始化搜索工具
            print("1. 初始化队列模式搜索工具...")
            self.search_tool = QueuedFileSearchTool(
                collection_name="massive_file_search",
                queue_size=50000,    # 大队列
                batch_size=200,      # 大批次
                num_workers=5        # 多工作线程
            )
            
            # 清空现有数据（可选）
            stats = self.search_tool.get_collection_stats()
            if stats['total_entities'] > 0:
                choice = input(f"\n集合中已有 {stats['total_entities']:,} 条记录，是否清空？(y/n): ")
                if choice.lower() == 'y':
                    print("正在清空集合...")
                    self.search_tool.clear_collection()
            
            # 启动索引
            print(f"\n2. 启动队列模式索引...")
            print(f"   目标目录: {target_directory}")
            print(f"   文件限制: {'无限制' if max_files < 0 else f'{max_files:,}'}")
            print(f"   处理模式: 多线程队列")
            
            self.start_time = time.time()
            self.last_progress_time = self.start_time
            
            # 启动索引，带进度回调
            self.search_tool.start_queued_indexing(
                root_path=target_directory,
                max_files=max_files,
                progress_callback=self.progress_callback
            )
            
            print("\n✅ 索引已启动，正在后台处理...")
            print("💡 按 Ctrl+C 可以随时停止处理")
            print("-" * 60)
            
            # 等待完成
            self.search_tool.wait_completion()
            
            # 最终统计
            self.show_final_stats()
            
        except KeyboardInterrupt:
            print("\n⏹️ 用户中断处理")
            if self.search_tool:
                self.search_tool.stop_indexing()
        except Exception as e:
            print(f"\n❌ 处理过程中出错: {str(e)}")
        finally:
            print("\n🏁 处理结束")
    
    def show_final_stats(self):
        """显示最终统计信息"""
        final_progress = self.search_tool.get_progress()
        stats = self.search_tool.get_collection_stats()
        total_time = time.time() - self.start_time
        
        print("\n" + "=" * 60)
        print("📈 最终处理统计")
        print("=" * 60)
        print(f"总处理时间: {total_time/60:.2f} 分钟")
        print(f"扫描文件总数: {final_progress.scanned_files:,} 个")
        print(f"成功索引: {final_progress.indexed_files:,} 个")
        print(f"跳过重复: {final_progress.skipped_files:,} 个")
        print(f"失败文件: {final_progress.failed_files:,} 个")
        print(f"数据库记录: {stats['total_entities']:,} 条")
        print(f"平均扫描速度: {final_progress.scan_speed:.1f} 文件/秒")
        print(f"平均索引速度: {final_progress.index_speed:.1f} 文件/秒")
        
        if final_progress.scanned_files > 0:
            # 计算实际新增文件数
            actual_new_files = final_progress.indexed_files - final_progress.skipped_files
            success_rate = (actual_new_files / final_progress.scanned_files) * 100
            print(f"新增文件: {actual_new_files:,} 个")
            print(f"新增成功率: {success_rate:.2f}%")
            
            if final_progress.skipped_files > 0:
                skip_rate = (final_progress.skipped_files / final_progress.scanned_files) * 100
                print(f"去重效率: {skip_rate:.2f}% (避免了重复索引)")
    
    def test_search(self):
        """测试搜索功能"""
        if not self.search_tool:
            print("❌ 请先运行索引")
            return
        
        print("\n🔍 测试搜索功能")

        while True:
            query = input("\n请输入搜索关键词: ").strip()

            if query.lower() == 'quit':
                print("退出搜索")
                break

            if not query:
                print("请输入有效的搜索关键词")
                continue

            # 执行搜索
            start_time = time.time()
            results = self.search_tool.search_files(query, top_k=10)
            search_time = time.time() - start_time

            # 显示结果
            print(f"\n搜索结果 (用时 {search_time:.3f}秒):")
            print("-" * 80)

            if results:
                for i, result in enumerate(results, 1):
                    file_size_mb = result['file_size'] / 1024 / 1024 if result['file_size'] > 0 else 0
                    print(f"{i:2d}. 文件名: {result['file_name']}")
                    print(f"     路径: {result['file_path']}")
                    print(f"     类型: {result['file_type']:<15} 大小: {file_size_mb:.2f}MB")
                    print(f"     相似度: {result['similarity_score']:.4f}")
                    print(f"     修改时间: {result['modified_time']}")
                    print()
            else:
                print("未找到相关文件")

            print("-" * 80)
        # test_queries = ["python", "文档", "配置", "图片", "数据"]
        #
        # for query in test_queries:
        #     print(f"\n搜索: '{query}'")
        #     start_time = time.time()
        #
        #     results = self.search_tool.search_files(query, top_k=5)
        #     search_time = time.time() - start_time
        #
        #     print(f"用时: {search_time:.4f}秒, 结果: {len(results)}个")
        #
        #     for i, result in enumerate(results[:3], 1):
        #         print(f"  {i}. {result['file_name']} (相似度: {result['similarity_score']:.3f})")


def main():
    """主函数"""
    print("大数据量文件索引系统")
    print("专为亿级文件数量优化")
    print("=" * 50)
    
    processor = MassiveFileProcessor()
    
    # 配置参数
    target_directory = input("请输入目标目录路径 (回车表示默认): ").strip()
    if not target_directory:
        target_directory = TARGET_DIRECTORY
    
    # 文件数量限制
    max_files_input = input("请输入文件数量限制 (回车表示无限制): ").strip()
    if max_files_input:
        try:
            max_files = int(max_files_input)
        except ValueError:
            max_files = -1
    else:
        max_files = -1
    
    print(f"\n配置确认:")
    print(f"  目标目录: {target_directory}")
    print(f"  文件限制: {'无限制' if max_files < 0 else f'{max_files:,}'}")
    print(f"  处理模式: 队列模式 (多线程)")
    
    confirm = input(f"\n是否开始处理？(y/n): ").lower()
    if confirm != 'y':
        print("处理取消")
        return
    
    try:
        # 运行大数据量索引
        processor.run_massive_indexing(target_directory, max_files)
        
        # 询问是否测试搜索
        test_choice = input(f"\n是否测试搜索功能？(y/n): ").lower()
        if test_choice == 'y':
            processor.test_search()
            
    except Exception as e:
        print(f"程序执行出错: {str(e)}")
    
    print("\n程序结束")


if __name__ == "__main__":
    main()