import os
import json
from datetime import datetime
from typing import List, Dict, Tuple, Optional
from pathlib import Path
from pymilvus import connections, Collection, CollectionSchema, FieldSchema, DataType, utility
# from pymilvus import connections
# from pymilvus import FieldSchema, CollectionSchema, DataType, Collection

from .SentenceTransformerTools import VectorTool


class FileSearchTool:
    """文件搜索工具类，支持向量化文件名搜索"""
    
    def __init__(self, collection_name: str = "file_search",
                 milvus_host: str = "c-f88be548ab3aa84d.milvus.aliyuncs.com",
                 milvus_port: int = 19530,
                 username: str = "root",
                 password: str = "Demo123456@",
                 token: str = None):
        """
        初始化文件搜索工具
        
        Args:
            collection_name: Milvus集合名称
            milvus_host: Milvus服务器地址
            milvus_port: Milvus服务器端口
            username: Milvus用户名
            password: Milvus密码
            token: Milvus访问令牌(可选)
        """
        self.collection_name = collection_name
        self.milvus_host = milvus_host
        self.milvus_port = milvus_port
        self.username = username
        self.password = password
        self.token = token
        self.vector_tool = VectorTool()
        self.collection = None
        
        # 初始化Milvus连接和集合
        self._init_milvus()
    
    def _init_milvus(self):
        """初始化Milvus连接和集合"""
        try:
            # 处理host参数，移除协议前缀
            clean_host = self.milvus_host
            if clean_host.startswith("http://"):
                clean_host = clean_host[7:]
            elif clean_host.startswith("https://"):
                clean_host = clean_host[8:]
            
            # 准备连接参数
            connect_params = {
                "alias": "default",
                "host": clean_host,
                "port": self.milvus_port
            }
            
            # 添加身份验证参数
            if self.username and self.password:
                connect_params["user"] = self.username
                connect_params["password"] = self.password
                print(f"使用用户名密码连接到Milvus: {clean_host}:{self.milvus_port}")
            elif self.token:
                connect_params["token"] = self.token
                print(f"使用令牌连接到Milvus: {clean_host}:{self.milvus_port}")
            else:
                print(f"无身份验证连接到Milvus: {clean_host}:{self.milvus_port}")
            
            # 连接到Milvus
            connections.connect(**connect_params)
            print("Milvus连接成功!")
            
            # 检查集合是否存在
            if utility.has_collection(self.collection_name):
                print(f"集合 '{self.collection_name}' 已存在，正在加载...")
                self.collection = Collection(self.collection_name)
                self.collection.load()
            else:
                # 创建新集合
                self._create_collection()
                
        except Exception as e:
            print(f"Milvus初始化失败: {str(e)}")
            print("请检查:")
            print("1. Milvus服务器是否正在运行")
            print("2. 网络连接是否正常")  
            print("3. 用户名密码是否正确")
            raise e
    
    def _create_collection(self):
        """创建Milvus集合"""
        # 定义字段模式
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="file_path", dtype=DataType.VARCHAR, max_length=2000),  # 增加路径长度
            FieldSchema(name="file_name", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="file_type", dtype=DataType.VARCHAR, max_length=100),   # 增加文件类型长度
            FieldSchema(name="file_size", dtype=DataType.INT64),
            FieldSchema(name="modified_time", dtype=DataType.VARCHAR, max_length=50),
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=1024)  # BGE模型输出1024维向量
        ]
        
        # 创建集合模式
        schema = CollectionSchema(fields, description="文件信息向量搜索集合")
        
        # 创建集合
        self.collection = Collection(self.collection_name, schema)
        
        # 创建索引
        index_params = {
            "index_type": "IVF_FLAT",
            "metric_type": "COSINE",  # 使用余弦相似度
            "params": {"nlist": 1024}
        }
        
        self.collection.create_index("embedding", index_params)
        self.collection.load()
        
        print(f"成功创建集合 '{self.collection_name}'")
    
    def _validate_data_length(self, file_path: str, file_name: str, file_type: str) -> tuple:
        """验证和截断数据长度，确保符合数据库字段限制"""
        safe_file_path = file_path[:2000] if len(file_path) > 2000 else file_path
        safe_file_name = file_name[:500] if len(file_name) > 500 else file_name
        safe_file_type = file_type[:100] if len(file_type) > 100 else file_type
        
        # 记录截断警告
        if len(file_path) > 2000:
            print(f"⚠️ 文件路径过长被截断: {file_path[:50]}...")
        if len(file_name) > 500:
            print(f"⚠️ 文件名过长被截断: {file_name[:50]}...")
        if len(file_type) > 100:
            print(f"⚠️ 文件类型过长被截断: {file_type}")
        
        return safe_file_path, safe_file_name, safe_file_type
    
    def scan_directory(self, root_path: str, max_files: int = 10000) -> List[Dict]:
        """
        扫描目录并收集文件信息
        
        Args:
            root_path: 根目录路径
            max_files: 最大文件数限制，如果小于0则不限制数量
            
        Returns:
            文件信息列表
        """
        file_info_list = []
        file_count = 0
        unlimited = max_files < 0  # 判断是否无限制模式
        
        if unlimited:
            print(f"开始扫描目录: {root_path} (无文件数量限制)")
        else:
            print(f"开始扫描目录: {root_path} (限制: {max_files}个文件)")
        
        try:
            for root, dirs, files in os.walk(root_path):
                # 处理目录
                for dir_name in dirs:
                    if not unlimited and file_count >= max_files:
                        break
                    
                    dir_path = os.path.join(root, dir_name)
                    try:
                        stat = os.stat(dir_path)
                        
                        # 验证和截断数据长度
                        safe_dir_path, safe_dir_name, _ = self._validate_data_length(
                            dir_path, dir_name, "directory"
                        )
                        
                        file_info = {
                            "file_path": safe_dir_path,
                            "file_name": safe_dir_name,
                            "file_type": "directory",
                            "file_size": 0,
                            "modified_time": datetime.fromtimestamp(stat.st_mtime).isoformat()
                        }
                        file_info_list.append(file_info)
                        file_count += 1
                    except (OSError, PermissionError) as e:
                        print(f"无法访问目录 {dir_path}: {e}")
                        continue
                
                # 处理文件
                for file_name in files:
                    if not unlimited and file_count >= max_files:
                        break
                    
                    file_path = os.path.join(root, file_name)
                    try:
                        stat = os.stat(file_path)
                        file_ext = Path(file_name).suffix.lower() or "no_extension"
                        
                        # 验证和截断数据长度
                        safe_file_path, safe_file_name, safe_file_type = self._validate_data_length(
                            file_path, file_name, file_ext
                        )
                        
                        file_info = {
                            "file_path": safe_file_path,
                            "file_name": safe_file_name,
                            "file_type": safe_file_type,
                            "file_size": stat.st_size,
                            "modified_time": datetime.fromtimestamp(stat.st_mtime).isoformat()
                        }
                        file_info_list.append(file_info)
                        file_count += 1
                    except (OSError, PermissionError) as e:
                        print(f"无法访问文件 {file_path}: {e}")
                        continue
                
                if not unlimited and file_count >= max_files:
                    print(f"已达到文件数量限制 {max_files}，停止扫描")
                    break
                    
        except Exception as e:
            print(f"扫描目录时出错: {e}")
        
        print(f"扫描完成，共收集 {len(file_info_list)} 个文件/目录")
        return file_info_list
    
    def index_files(self, file_info_list: List[Dict], batch_size: int = 100):
        """
        将文件信息索引到向量数据库
        
        Args:
            file_info_list: 文件信息列表
            batch_size: 批处理大小
        """
        if not file_info_list:
            print("没有文件需要索引")
            return
        
        print(f"开始索引 {len(file_info_list)} 个文件...")
        
        # 批量处理文件
        for i in range(0, len(file_info_list), batch_size):
            batch = file_info_list[i:i + batch_size]
            
            # 准备文本进行向量化
            texts_for_embedding = []
            for file_info in batch:
                # 组合文件名和扩展名用于向量化
                text = f"{file_info['file_name']} {file_info['file_type']}"
                texts_for_embedding.append(text)
            
            # 生成向量
            embeddings = self.vector_tool.generate_vector(texts_for_embedding)
            
            # 准备插入数据
            data = [
                [info["file_path"] for info in batch],
                [info["file_name"] for info in batch],
                [info["file_type"] for info in batch],
                [info["file_size"] for info in batch],
                [info["modified_time"] for info in batch],
                embeddings
            ]
            
            # 插入到Milvus
            try:
                self.collection.insert(data)
                print(f"已索引第 {i+1}-{min(i+batch_size, len(file_info_list))} 个文件")
            except Exception as e:
                print(f"索引批次 {i//batch_size + 1} 时出错: {e}")
                continue
        
        # 刷新集合
        self.collection.flush()
        print("文件索引完成")
    
    def search_files(self, query: str, top_k: int = 10) -> List[Dict]:
        """
        搜索文件
        
        Args:
            query: 搜索查询
            top_k: 返回结果数量
            
        Returns:
            搜索结果列表
        """
        print(f"搜索查询: '{query}'")
        
        # 生成查询向量
        query_embedding = self.vector_tool.generate_vector(query)
        
        # 定义搜索参数
        search_params = {
            "metric_type": "COSINE",
            "params": {"nprobe": 10}
        }
        
        # 执行搜索
        results = self.collection.search(
            data=[query_embedding],
            anns_field="embedding",
            param=search_params,
            limit=top_k,
            output_fields=["file_path", "file_name", "file_type", "file_size", "modified_time"]
        )
        
        # 格式化结果
        search_results = []
        for hits in results:
            for hit in hits:
                result = {
                    "file_path": hit.entity.get("file_path"),
                    "file_name": hit.entity.get("file_name"),
                    "file_type": hit.entity.get("file_type"),
                    "file_size": hit.entity.get("file_size"),
                    "modified_time": hit.entity.get("modified_time"),
                    "similarity_score": hit.score
                }
                search_results.append(result)
        
        print(f"找到 {len(search_results)} 个相关结果")
        return search_results
    
    def get_collection_stats(self) -> Dict:
        """获取集合统计信息"""
        stats = {
            "collection_name": self.collection_name,
            "total_entities": self.collection.num_entities,
            "is_loaded": utility.load_state(self.collection_name)
        }
        return stats
    
    def clear_collection(self):
        """清空集合数据"""
        if self.collection:
            self.collection.drop()
            print(f"集合 '{self.collection_name}' 已清空")
            self._create_collection()
    
    def __del__(self):
        """析构函数，断开连接"""
        try:
            connections.disconnect("default")
        except:
            pass