# -*- coding: utf-8 -*-

import weaviate
from weaviate import WeaviateClient
from weaviate.collections import Collection
import os
import re
import traceback
import time
import json
import hashlib
from datetime import datetime
import threading
import queue
import concurrent.futures
from typing import List, Dict, Tuple, Any, Optional, Union
from config import *
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler

# 添加必要的导入
import torch
from sentence_transformers import SentenceTransformer
from transformers import AutoModelForSequenceClassification, AutoTokenizer

def check_model_complete(model_path):
    """检查本地模型是否完整"""
    if not os.path.exists(model_path):
        return False
    
    # 检查是否为 HuggingFace 缓存格式（有 snapshots 目录）
    snapshots_dir = os.path.join(model_path, "snapshots")
    if os.path.exists(snapshots_dir):
        # 找到最新的快照目录
        snapshot_dirs = [d for d in os.listdir(snapshots_dir) if os.path.isdir(os.path.join(snapshots_dir, d))]
        if snapshot_dirs:
            # 使用最新的快照目录
            latest_snapshot = sorted(snapshot_dirs)[-1]
            model_path = os.path.join(snapshots_dir, latest_snapshot)
            print(f"使用快照目录: {model_path}")
    
    # 检查必要的文件是否存在
    required_files = [
        "config.json",
        "tokenizer.json", 
        "tokenizer_config.json"
    ]
    
    # 对于 SentenceTransformer 模型，还需要检查
    if "Embedding" in model_path:
        required_files.extend([
            "modules.json",
            "config_sentence_transformers.json"
        ])
    
    # 检查模型权重文件
    model_files = [
        "pytorch_model.bin",
        "model.safetensors",
        "model.safetensors.index.json"
    ]
    
    # 检查配置文件
    for file in required_files:
        if not os.path.exists(os.path.join(model_path, file)):
            print(f"缺少配置文件: {file}")
            return False
    
    # 检查模型权重文件（至少需要一个）
    has_model_file = False
    for file in model_files:
        if os.path.exists(os.path.join(model_path, file)):
            has_model_file = True
            break
    
    if not has_model_file:
        print("缺少模型权重文件")
        return False
    
    print(f"本地模型完整: {model_path}")
    return True

def get_hf_snapshot_path(base_path):
    """
    自动获取 HuggingFace 缓存 snapshot 目录
    """
    snapshots_dir = os.path.join(base_path, "snapshots")
    if os.path.exists(snapshots_dir):
        subdirs = [os.path.join(snapshots_dir, d) for d in os.listdir(snapshots_dir) if os.path.isdir(os.path.join(snapshots_dir, d))]
        if subdirs:
            # 取最新的 snapshot
            return sorted(subdirs)[-1]
    return base_path

def init_models():
    """初始化嵌入模型和重排序模型"""
    print("正在初始化模型...")
    
    # 连接到Weaviate以检查向量维度
    try:
        client = connect_to_weaviate(WEAVIATE_URL)
        print("成功连接到Weaviate")
        
        # 检查集合中的向量维度
        vector_dim = None
        try:
            collections = client.collections.list_all()
            if collections:
                # 查找第一个有数据的集合
                for coll_name in collections:
                    collection = client.collections.get(coll_name)
                    sample_objects = collection.query.fetch_objects(limit=1)
                    if sample_objects and sample_objects.objects:
                        # 获取第一个对象的向量维度
                        sample_vector = sample_objects.objects[0].vector
                        vector_dim = len(sample_vector)
                        print(f"检测到集合 {coll_name} 中的向量维度为: {vector_dim}")
                        break
        except Exception as e:
            print(f"检查向量维度失败: {e}")
    except Exception as e:
        print(f"连接Weaviate失败: {e}")
        vector_dim = None
    
    # 初始化嵌入模型
    embedding_model = None
    for model_path in DEFAULT_MODEL_PATHS["embedding"]:
        try:
            print(f"尝试加载嵌入模型: {model_path}")
            # 自动定位 snapshot 目录
            real_model_path = get_hf_snapshot_path(model_path)
            if os.path.exists(real_model_path):
                print(f"从本地路径加载模型: {real_model_path}")
                embedding_model = SentenceTransformer(
                    real_model_path,
                    model_kwargs=EMBEDDING_MODEL_KWARGS,
                    tokenizer_kwargs=EMBEDDING_TOKENIZER_KWARGS
                )
            else:
                print(f"从在线加载模型: {model_path}")
                embedding_model = SentenceTransformer(
                    model_path,
                    model_kwargs=EMBEDDING_MODEL_KWARGS,
                    tokenizer_kwargs=EMBEDDING_TOKENIZER_KWARGS
                )
            print(f"成功加载嵌入模型: {real_model_path}")
            
            # 检查模型输出维度是否与集合中的向量维度匹配
            if vector_dim:
                # 测试模型输出维度
                test_embedding = embedding_model.encode("测试文本", normalize_embeddings=True)
                model_dim = len(test_embedding)
                print(f"嵌入模型输出维度: {model_dim}")
                
                if model_dim != vector_dim:
                    print(f"警告: 模型输出维度 ({model_dim}) 与集合中的向量维度 ({vector_dim}) 不匹配")
                    print("尝试加载其他模型...")
                    embedding_model = None
                    continue
            
            break
        except Exception as e:
            print(f"加载嵌入模型 {model_path} 失败: {e}")
    
    # 如果没有找到匹配的模型，尝试使用备用模型
    if embedding_model is None:
        print("尝试加载备用嵌入模型...")
        embedding_model = fallback_embedding_model()
        
        # 如果仍然没有找到模型，抛出错误
        if embedding_model is None:
            raise ValueError("无法加载嵌入模型，请检查模型路径或网络连接")
    
    # 初始化重排序模型 - 使用新的加载方法
    print("正在加载重排序模型...")
    try:
        reranker_data = load_reranker_model()
        print("成功加载重排序模型")
    except Exception as e:
        print(f"加载重排序模型失败: {e}")
        raise ValueError("无法加载重排序模型，请检查模型路径或网络连接")
    
    return embedding_model, reranker_data

def fallback_embedding_model():
    """加载备用嵌入模型"""
    print("尝试加载备用嵌入模型...")
    for model_path in DEFAULT_MODEL_PATHS["fallback_embedding"]:
        try:
            print(f"尝试加载备用嵌入模型: {model_path}")
            model = SentenceTransformer(model_path, device=DEVICE)
            print(f"成功加载备用嵌入模型: {model_path}")
            return model
        except Exception as e:
            print(f"加载备用嵌入模型 {model_path} 失败: {e}")
    return None

def fallback_reranker_model():
    """加载备用重排序模型"""
    print("尝试加载备用重排序模型...")
    for model_path in DEFAULT_MODEL_PATHS["fallback_reranker"]:
        try:
            print(f"尝试加载备用重排序模型: {model_path}")
            tokenizer = AutoTokenizer.from_pretrained(model_path)
            model = AutoModelForSequenceClassification.from_pretrained(model_path)
            model = model.to(DEVICE)
            model.eval()
            print(f"成功加载备用重排序模型: {model_path}")
            return tokenizer, model
        except Exception as e:
            print(f"加载备用重排序模型 {model_path} 失败: {e}")
    return None, None

def connect_to_weaviate(url=WEAVIATE_URL):
    """连接到Weaviate"""
    print(f"正在连接到Weaviate: {url}...")
    max_retries = 3
    retry_interval = 5
    
    for retry in range(max_retries):
        try:
            # 使用 weaviate.connect_to_local() 方法连接
            client = weaviate.connect_to_local(
                port=8080,
                grpc_port=50051
            )
            
            # 测试连接
            client.collections.list_all()
            print("成功连接到Weaviate")
            return client
        except Exception as e:
            print(f"连接Weaviate失败 {retry+1}/{max_retries}: {e}")
            if retry < max_retries - 1:
                print(f"等待 {retry_interval} 秒后重试...")
                time.sleep(retry_interval)
                retry_interval *= 2
            else:
                print("所有重试失败，请检查Weaviate连接")
                print(f"请检查Weaviate连接: docker ps")
                print("查看Weaviate日志: docker logs db-weaviate-vector-project-weaviate-1")
                print("重启Weaviate: docker restart db-weaviate-vector-project-weaviate-1")
                raise

def create_collection(client, collection_name=COLLECTION_NAME):
    """创建集合"""
    print(f"检查集合: {collection_name}...")
    try:
        # 检查集合是否存在
        collections = client.collections.list_all()
        if collection_name in collections:  # collections 现在是字符串列表
            print(f"集合 {collection_name} 已存在，使用现有集合")
            return client.collections.get(collection_name)
            
        # 创建新集合
        print(f"正在创建新集合: {collection_name}...")
        collection = client.collections.create(
            name=collection_name,
            vectorizer_config=weaviate.classes.config.Configure.Vectorizer.none(),
            properties=[
                {
                    "name": "text",
                    "data_type": weaviate.classes.config.DataType.TEXT
                },
                {
                    "name": "source",
                    "data_type": weaviate.classes.config.DataType.TEXT
                }
            ]
        )
        
        print(f"成功创建集合: {collection_name}")
        return collection
    except Exception as e:
        print(f"创建/获取集合失败: {e}")
        traceback.print_exc()
        return None

def index_documents(client, documents, sources, collection_name=COLLECTION_NAME, batch_size=BATCH_SIZE, embedding_model=None):
    """索引文档到Weaviate"""
    print(f"索引 {len(documents)} 个文档到集合 {collection_name}...")
    
    try:
        # 获取集合
        collection = client.collections.get(collection_name)
        
        # 生成嵌入
        if embedding_model:
            print("使用提供的嵌入模型生成向量...")
            embeddings = embedding_model.encode(documents, normalize_embeddings=True)
        else:
            print("未提供嵌入模型，使用零向量...")
            # 尝试获取集合的向量维度
            try:
                # 查询集合中的一个对象来获取向量维度
                sample_objects = collection.query.fetch_objects(limit=1)
                if sample_objects and sample_objects.objects:
                    # 获取第一个对象的向量维度
                    sample_vector = sample_objects.objects[0].vector
                    vector_dim = len(sample_vector)
                    print(f"检测到集合中的向量维度为: {vector_dim}")
                else:
                    # 如果集合为空，使用默认维度
                    vector_dim = 1024  # 使用1024维作为默认值，因为错误信息显示现有向量是1024维
                    print(f"集合为空，使用默认向量维度: {vector_dim}")
            except Exception as e:
                # 如果出现错误，使用默认维度
                vector_dim = 1024  # 使用1024维作为默认值
                print(f"获取向量维度失败: {e}，使用默认维度: {vector_dim}")
            
            # 创建零向量，维度与集合中的现有向量相同
            embeddings = [torch.zeros(vector_dim).tolist() for _ in range(len(documents))]
        
        # 准备批量处理
        with collection.batch.dynamic() as batch:
            for i, (doc, source, embedding) in enumerate(zip(documents, sources, embeddings)):
                # 确保embedding是列表而不是numpy数组或tensor
                if hasattr(embedding, 'tolist'):
                    embedding = embedding.tolist()
                elif not isinstance(embedding, list):
                    embedding = list(embedding)
                
                # 添加对象
                try:
                    batch.add_object(
                        properties={
                            "text": doc,
                            "source": source
                        },
                        vector=embedding
                    )
                except Exception as e:
                    print(f"添加对象 {i} 失败: {e}")
                    print(f"对象属性: text={doc[:50]}..., source={source}")
                    print(f"向量类型: {type(embedding)}, 长度: {len(embedding)}")
                    continue
                
                # 打印进度
                if (i + 1) % 100 == 0 or i == len(documents) - 1:
                    print(f"处理进度: {i+1}/{len(documents)} 文档")
        
        print(f"索引完成: {len(documents)} 个文档")
        return True
    except Exception as e:
        print(f"索引文档失败: {e}")
        traceback.print_exc()
        return False

def search_and_rerank(client, query, embedding_model, reranker_data, 
                     collection_name=COLLECTION_NAME, alpha=ALPHA, limit=QUERY_LIMIT, search_type="hybrid"):
    """搜索并重排序结果"""
    try:
        print(f"执行{search_type}搜索: {query}")
        
        # 生成查询嵌入
        query_embedding = embedding_model.encode(
            [f"{QUERY_INSTRUCTION} {query}"], 
            normalize_embeddings=True
        )[0]
        
        # 获取集合
        collection = client.collections.get(collection_name)
        
        # 根据搜索类型执行不同的搜索
        if search_type == "hybrid":
            # 执行混合搜索
            response = collection.query.hybrid(
                query=query,
                vector=query_embedding,
                alpha=alpha,
                limit=limit
            )
        elif search_type == "semantic":
            # 执行语义搜索（向量搜索）
            response = collection.query.near_vector(
                vector=query_embedding,
                limit=limit
            )
        elif search_type == "keyword":
            # 执行关键词搜索（BM25）
            response = collection.query.bm25(
                query=query,
                limit=limit
            )
        else:
            # 默认使用混合搜索
            print(f"未知的搜索类型: {search_type}，使用混合搜索")
            response = collection.query.hybrid(
                query=query,
                vector=query_embedding,
                alpha=alpha,
                limit=limit
            )
        
        # 提取结果并去重
        hits = []
        seen_texts = set()  # 用于记录已经看到的文本内容
        
        # 计算所有文档的向量表示，用于计算相似度
        all_texts = []
        unique_objects = []
        
        # 首先进行去重
        for obj in response.objects:
            text = obj.properties["text"]
            # 如果文本已经出现过，则跳过
            if text in seen_texts:
                continue
                
            # 记录这个文本已经出现过
            seen_texts.add(text)
            unique_objects.append(obj)
            all_texts.append(text)
        
        if all_texts:
            # 为所有文档生成向量表示
            print("计算文档向量表示...")
            doc_embeddings = embedding_model.encode(all_texts, normalize_embeddings=True)
            
            # 计算余弦相似度 (通过点积，因为向量已经归一化)
            similarities = [
                float(torch.tensor(query_embedding).dot(torch.tensor(doc_emb)).item())
                for doc_emb in doc_embeddings
            ]
            
            # 确保相似度在0到1之间
            similarities = [max(0.0, min(1.0, sim)) for sim in similarities]
            
            # 分析关键词匹配
            query_terms = query.lower().split()
            
            for i, obj in enumerate(unique_objects):
                text = obj.properties["text"].lower()
                score = similarities[i]
                
                # 添加关键词匹配的加权
                keyword_bonus = 0.0
                
                # 检查完全匹配
                if any(term in text for term in query_terms):
                    # 每个关键词的匹配都提高分数
                    for term in query_terms:
                        if term in text:
                            # 根据关键词长度和权重给予加分
                            term_weight = min(1.0, max(0.1, len(term) / 10))  # 词越长权重越大
                            keyword_bonus += term_weight * KEYWORD_MATCH_BONUS
                
                # 应用关键词加成，但保持在0-1范围内
                adjusted_score = min(1.0, score + keyword_bonus * 0.2)
                
            hits.append({
                "text": obj.properties["text"],
                "source": obj.properties.get("source", "未知"),
                    "score": adjusted_score,
                    "semantic_score": score,
                    "keyword_bonus": keyword_bonus,
                    "original_score": obj.metadata.score,
                    "id": getattr(obj, "id", f"id-{i}")  # 添加ID以便进一步确保唯一性
                })
                
            # 按调整后的分数排序
            hits = sorted(hits, key=lambda x: x["score"], reverse=True)
            
            # 打印调试信息
            print("搜索分数明细:")
            for hit in hits[:5]:
                text_preview = hit["text"][:30].replace("\n", " ")
                print(f"文本: {text_preview}... | 最终分数: {hit['score']:.4f} | 语义分数: {hit.get('semantic_score', 0):.4f} | 关键词加成: {hit.get('keyword_bonus', 0):.4f}")
                
        else:
            # 如果没有结果，保持原始代码行为
            for obj in response.objects:
                # 确保分数有值，如果为None则设置默认值
                score = obj.metadata.score
                if score is None:
                    score = 0.0  # 设置默认分数为0.0，表示未找到匹配
                
                # 确保不添加重复文本
                text = obj.properties["text"]
                if text in seen_texts:
                    continue
                seen_texts.add(text)
                
                hits.append({
                    "text": obj.properties["text"],
                    "source": obj.properties.get("source", "未知"),
                    "score": score
                })
        
        # 检查是否启用重排序功能
        if not USE_RERANKER:
            print("重排序功能已禁用，使用优化的相似度排序结果")
            for hit in hits:
                hit["rerank_score"] = hit["score"]
            formatted_results = [(hit["text"], hit["source"], hit["score"]) for hit in hits]
            return formatted_results
        
        # 重排序 - 使用新的方法
        if reranker_data and hits:
            print("使用重排序模型重新排序结果...")
            
            try:
                # 使用threading.Timer作为超时机制（兼容Windows）
                import threading
                import queue
                
                result_queue = queue.Queue()
                timed_out = [False]  # 使用列表以便在内部函数中修改
                
                def rerank_task():
                    try:
                        # 如果使用CrossEncoder
                        if reranker_data.get("use_cross_encoder", False):
                            model = reranker_data["model"]
                            pairs = [[query, hit["text"]] for hit in hits]
                            scores = model.predict(pairs)
                            for i, score in enumerate(scores):
                                hits[i]["rerank_score"] = float(score)
                        else:
                            # 使用传统方式
                            # 如果只有一条结果，则直接处理
                            if len(hits) == 1:
                                print("仅有一条结果，单独处理...")
                                text = hits[0]["text"]
                                pair = format_instruction(QUERY_INSTRUCTION, query, text)
                            
                                # 确保tokenizer有padding token
                                tokenizer = reranker_data["tokenizer"]
                                if tokenizer.pad_token is None:
                                    tokenizer.pad_token = tokenizer.eos_token or tokenizer.unk_token or "[PAD]"
                                if tokenizer.pad_token_id is None:
                                    tokenizer.pad_token_id = tokenizer.eos_token_id or tokenizer.unk_token_id or 0
                                
                                # 单条处理不需要batch
                                inputs = tokenizer(
                                    pair,
                                    return_tensors="pt",
                                    padding=False,
                                    truncation=True,
                                    max_length=reranker_data["max_length"]
                                )
                                
                                # 添加前缀和后缀
                                prefix_tokens = reranker_data["prefix_tokens"]
                                suffix_tokens = reranker_data["suffix_tokens"]
                                input_ids = inputs["input_ids"][0].tolist()
                                new_input_ids = prefix_tokens + input_ids + suffix_tokens
                                inputs["input_ids"] = torch.tensor([new_input_ids]).to(reranker_data["model"].device)
                                
                                # 处理其他输入
                                for key in inputs:
                                    if key != "input_ids":
                                        inputs[key] = inputs[key].to(reranker_data["model"].device)
                                
                                # 计算分数
                                with torch.no_grad():
                                    batch_scores = reranker_data["model"](**inputs).logits[:, -1, :]
                                    true_id = reranker_data["token_true_id"]
                                    false_id = reranker_data["token_false_id"]
                                    true_score = batch_scores[0, true_id].item()
                                    false_score = batch_scores[0, false_id].item()
                                    score = torch.nn.functional.softmax(torch.tensor([false_score, true_score]), dim=0)[1].item()
                                
                                hits[0]["rerank_score"] = score
                            else:
                                # 处理多条结果
                                pairs = [format_instruction(QUERY_INSTRUCTION, query, hit["text"]) for hit in hits]
                                inputs = process_rerank_inputs(reranker_data, pairs)
                                scores = compute_rerank_scores(reranker_data, inputs)
                                for i, score in enumerate(scores):
                                    hits[i]["rerank_score"] = score
                        
                        # 任务完成，将结果放入队列
                        result_queue.put(True)
                    except Exception as e:
                        # 捕获任务内部的异常
                        result_queue.put(e)
                
                def timeout_callback():
                    # 超时回调
                    if result_queue.empty():
                        timed_out[0] = True
                        result_queue.put(TimeoutError("重排序操作超时"))
                
                # 创建并启动任务线程
                task_thread = threading.Thread(target=rerank_task)
                task_thread.daemon = True  # 设置为守护线程，主线程结束时会自动终止
                
                # 设置超时
                timeout_seconds = 10
                timer = threading.Timer(timeout_seconds, timeout_callback)
                
                # 启动线程和定时器
                task_thread.start()
                timer.start()
                
                try:
                    # 等待结果或超时
                    result = result_queue.get(block=True, timeout=timeout_seconds + 1)
                    
                    # 取消定时器
                    timer.cancel()
                    
                    # 检查结果
                    if isinstance(result, Exception):
                        raise result
                
                    # 按重排序分数排序
                    if not timed_out[0]:
                        hits = sorted(hits, key=lambda x: x["rerank_score"], reverse=True)
                        print("重排序完成")
                except queue.Empty:
                    # 这里不应该发生，因为timeout_callback会添加一个结果
                    print("重排序结果获取超时")
                    for hit in hits:
                        hit["rerank_score"] = hit["score"]
                
            except TimeoutError as e:
                print(f"重排序超时: {e}")
                # 如果重排序超时，使用原始排序
                for hit in hits:
                    hit["rerank_score"] = hit["score"]
            except Exception as e:
                print(f"重排序失败: {e}")
                traceback.print_exc()
                # 如果重排序失败，保持原始排序
                for hit in hits:
                    hit["rerank_score"] = hit["score"]
        
        # 将结果格式化为三元组列表 [(text, source, score), ...]
        formatted_results = [(hit["text"], hit["source"], hit.get("rerank_score", hit["score"])) for hit in hits]
        return formatted_results
    except Exception as e:
        print(f"搜索失败: {e}")
        traceback.print_exc()
        return []

def index_sample_documents(client, embedding_model=None):
    """索引示例文档"""
    print("索引示例文档...")
    
    # 检查集合中是否已有文档
    try:
        collection = client.collections.get(COLLECTION_NAME)
        # 获取文档数量
        results = collection.query.fetch_objects(limit=1)
        existing_count = len(results.objects)
        
        if existing_count > 0:
            print(f"集合中已存在 {existing_count} 个文档，跳过示例文档索引")
            return [], []
    except Exception as e:
        print(f"检查现有文档时出错: {e}，将继续索引示例文档")
    
    # 使用样本文档但确保每个文档都不同
    documents = []
    sources = []
    
    # 添加示例文档
    for i, doc in enumerate(SAMPLE_DOCUMENTS):
        documents.append(doc)
        sources.append(f"示例文档-{i+1}")
    
    success = index_documents(client, documents, sources, collection_name=COLLECTION_NAME, embedding_model=embedding_model)
    return documents, sources

def extract_text_from_epub(epub_path):
    """从EPUB提取文本"""
    try:
        import ebooklib
        from ebooklib import epub
        from bs4 import BeautifulSoup
        
        # 加载EPUB文件
        book = epub.read_epub(epub_path)
        
        # 存储所有提取的文本
        all_chunks = []
        chapter_count = 0
        
        # 获取书籍元数据
        book_title = "未知书籍"
        try:
            # 尝试获取书籍标题
            metadata = book.get_metadata('DC', 'title')
            if metadata and len(metadata) > 0 and len(metadata[0]) > 0:
                book_title = metadata[0][0]
        except Exception as e:
            print(f"  - 获取书籍元数据失败: {e}")
            
        # 从文件名提取书名作为备选
        if book_title == "未知书籍":
            book_title = os.path.basename(epub_path).split('.')[0]
            
        print(f"  - 处理书籍: {book_title}")
        
        # 获取所有项目
        for item in book.get_items():
            # 只处理文本内容
            if item.get_type() == ebooklib.ITEM_DOCUMENT:
                chapter_count += 1
                # 获取内容
                try:
                    content = item.get_content().decode('utf-8')
                except UnicodeDecodeError:
                    try:
                        content = item.get_content().decode('utf-8', errors='ignore')
                    except:
                        content = item.get_content().decode('gbk', errors='ignore')
                
                # 使用BeautifulSoup提取文本
                soup = BeautifulSoup(content, 'html.parser')
                
                # 移除脚本和样式元素
                for script in soup(["script", "style"]):
                    script.extract()
                
                # 获取文本
                text = soup.get_text(separator=' ')
                
                # 清理文本
                lines = []
                for line in text.split('\n'):
                    line = line.strip()
                    if line:
                        lines.append(line)
                
                text = '\n'.join(lines)
                text = text.strip()
                
                if not text:
                    continue
                
                # 提取章节标题（如果有）
                chapter_title = ""
                if soup.title and soup.title.string:
                    chapter_title = soup.title.string.strip()
                elif soup.find('h1'):
                    chapter_title = soup.find('h1').get_text().strip()
                elif soup.find('h2'):
                    chapter_title = soup.find('h2').get_text().strip()
                
                # 如果没有找到标题标签，尝试使用第一行作为标题
                if not chapter_title and lines and len(lines[0]) < 100:
                    chapter_title = lines[0]
                
                print(f"  - 处理章节 {chapter_count}: {chapter_title if chapter_title else '无标题'}")
                
                # 分割章节内容为多个片段
                chunks = chunk_text(text)
                
                # 为每个片段添加书籍和章节标题前缀
                if chunks:
                    for i, chunk in enumerate(chunks):
                        # 添加书籍和章节信息
                        if chapter_title:
                            chunk_with_meta = f"[书籍: {book_title}] [章节: {chapter_title}] {chunk}"
                        else:
                            chunk_with_meta = f"[书籍: {book_title}] {chunk}"
                        
                        all_chunks.append(chunk_with_meta)
                    
                    print(f"  - 章节 {chapter_count} 处理完成，生成 {len(chunks)} 个文本片段")
        
        print(f"  - 共处理 {chapter_count} 个章节，总计 {len(all_chunks)} 个文本片段")
        return all_chunks
    except ImportError:
        print("  - 缺少处理EPUB的必要库，请安装 ebooklib 和 beautifulsoup4")
        return []
    except Exception as e:
        print(f"  - 处理EPUB文件时出错: {e}")
        traceback.print_exc()
        return []

def read_text_file(file_path):
    """读取文本文件"""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()
    except UnicodeDecodeError:
        try:
            with open(file_path, 'r', encoding='gbk') as f:
                return f.read()
        except:
            print(f"无法读取文件 {file_path}, 尝试二进制模式")
            with open(file_path, 'rb') as f:
                content = f.read()
                try:
                    return content.decode('utf-8', errors='ignore')
                except:
                    return content.decode('gbk', errors='ignore')

def chunk_text(text, chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP):
    """将文本分割成多个片段"""
    if len(text) <= chunk_size:
        return [text]
    
    chunks = []
    start = 0
    while start < len(text):
        end = min(start + chunk_size, len(text))
        chunks.append(text[start:end])
        start += chunk_size - chunk_overlap
    
    return chunks

def process_document(file_path):
    """处理文档，返回文本块列表"""
    file_ext = os.path.splitext(file_path)[1].lower()
    
    print(f"处理文件: {file_path}")
    
    try:
        if file_ext == '.epub':
            # 处理EPUB
            chunks = extract_text_from_epub(file_path)
            print(f"  - 从EPUB提取了 {len(chunks)} 个文本块")
            return chunks
        elif file_ext == '.txt' or file_ext == '.md':
            # 处理文本文件
            content = read_text_file(file_path)
            chunks = chunk_text(content)
            print(f"  - 从文本文件提取了 {len(chunks)} 个文本块")
            return chunks
        elif file_ext == '.pdf':
            # 提示不支持PDF
            print("  - 暂不支持PDF文件，请转换为TXT或EPUB格式")
            return []
        else:
            print(f"  - 不支持的文件类型: {file_ext}")
            return []
    except Exception as e:
        print(f"  - 处理文件时出错: {e}")
        traceback.print_exc()
        return []

def load_document_hashes():
    """加载文档哈希数据"""
    default_data = {"files": {}, "collections": {}, "last_update": datetime.now().isoformat()}
    
    if os.path.exists(DOCUMENT_HASH_FILE):
        try:
            with open(DOCUMENT_HASH_FILE, "r", encoding="utf-8") as f:
                data = json.load(f)
                
                # 验证数据结构
                if not isinstance(data, dict):
                    print(f"哈希数据格式错误，重置数据")
                    return default_data
                
                # 确保基本结构存在
                if "files" not in data:
                    print(f"哈希数据缺少files字段，添加空字典")
                    data["files"] = {}
                
                if "collections" not in data:
                    data["collections"] = {}
                
                # 检查文件记录中是否有hash字段
                for file_path, file_info in list(data["files"].items()):
                    if not isinstance(file_info, dict):
                        print(f"文件 {file_path} 的记录格式错误，重新创建")
                        if os.path.exists(file_path):
                            data["files"][file_path] = {
                                "hash": calculate_file_hash(file_path),
                                "last_indexed": datetime.now().isoformat()
                            }
                        else:
                            # 文件不存在，删除记录
                            del data["files"][file_path]
                    elif "hash" not in file_info and os.path.exists(file_path):
                        print(f"文件 {file_path} 的记录缺少hash字段，添加")
                        file_info["hash"] = calculate_file_hash(file_path)
                
                return data
        except Exception as e:
            print(f"加载文档哈希数据失败: {e}")
            traceback.print_exc()
    
    return default_data

def save_document_hashes(hash_data):
    """保存文档哈希数据"""
    hash_data["last_update"] = datetime.now().isoformat()
    try:
        with open(DOCUMENT_HASH_FILE, "w", encoding="utf-8") as f:
            json.dump(hash_data, f, ensure_ascii=False, indent=2)
    except Exception as e:
        print(f"保存文档哈希数据失败: {e}")

def calculate_file_hash(file_path, algorithm=HASH_ALGORITHM):
    """计算文件哈希"""
    hasher = hashlib.new(algorithm)
    with open(file_path, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hasher.update(chunk)
    return hasher.hexdigest()

def get_file_info(file_path):
    """获取文件信息"""
    stat = os.stat(file_path)
    file_hash = calculate_file_hash(file_path)
    return {
        "path": file_path,
        "hash": file_hash,
        "mtime": stat.st_mtime,
        "size": stat.st_size,
        "last_indexed": datetime.now().isoformat()
    }

def check_if_file_changed(file_path, hash_data, force_reindex=False):
    """检查文件是否变化"""
    if force_reindex or FORCE_REINDEX:
        print(f"  - 强制重新索引: {file_path}")
        return True
        
    # 检查hash_data结构是否完整
    if not hash_data or "files" not in hash_data:
        print(f"  - 哈希数据不完整，视为新文件: {file_path}")
        if "files" not in hash_data:
            hash_data["files"] = {}
        file_hash = calculate_file_hash(file_path)
        hash_data["files"][file_path] = {"hash": file_hash, "last_indexed": datetime.now().isoformat()}
        return True
        
    # 如果文件不在哈希记录中，视为新文件
    if file_path not in hash_data["files"]:
        print(f"  - 新文件: {file_path}")
        file_hash = calculate_file_hash(file_path)
        hash_data["files"][file_path] = {"hash": file_hash, "last_indexed": datetime.now().isoformat()}
        return True
    
    # 检查文件记录是否包含hash字段
    if "hash" not in hash_data["files"][file_path]:
        print(f"  - 文件记录缺少哈希值，重新计算: {file_path}")
        file_hash = calculate_file_hash(file_path)
        hash_data["files"][file_path]["hash"] = file_hash
        hash_data["files"][file_path]["last_indexed"] = datetime.now().isoformat()
        return True
    
    # 获取文件当前哈希值
    current_hash = calculate_file_hash(file_path)
    stored_hash = hash_data["files"][file_path]["hash"]
    
    # 检查哈希值是否变化
    if current_hash != stored_hash:
        print(f"  - 文件已修改: {file_path}")
        hash_data["files"][file_path]["hash"] = current_hash
        hash_data["files"][file_path]["last_indexed"] = datetime.now().isoformat()
        return True
    
    # 即使文件未变化，也返回True以确保所有文件都被索引
    print(f"  - 文件未变化，但仍将重新索引: {file_path}")
    return True

def load_documents_from_directory(directory=DATA_DIR, recursive=RECURSIVE_DIRECTORY_SCAN, base_dir=None, hash_data=None, force_reindex=False):
    """从目录中加载文档"""
    if not os.path.exists(directory):
        print(f"目录不存在: {directory}")
        return [], [], []
    
    if base_dir is None:
        base_dir = directory
    
    if hash_data is None:
        hash_data = load_document_hashes()
    
    documents = []
    sources = []
    file_paths = []
    
    # 获取目录中的所有文件
    all_files = []
    if recursive:
        for root, _, filenames in os.walk(directory):
            for filename in filenames:
                file_path = os.path.join(root, filename)
                all_files.append(file_path)
    else:
        for filename in os.listdir(directory):
            file_path = os.path.join(directory, filename)
            if os.path.isfile(file_path):
                all_files.append(file_path)
    
    # 检查已索引但不存在的文件
    if AUTO_DELETE_INDEX and hash_data and "files" in hash_data:
        for file_path in list(hash_data["files"].keys()):
            if file_path.startswith(directory) and file_path not in all_files and not os.path.exists(file_path):
                print(f"检测到已删除的文件: {file_path}")
                # 标记为需要删除索引，稍后处理
                hash_data["files"][file_path]["deleted"] = True
    
    # 处理所有文件
    for file_path in all_files:
        # 检查文件扩展名
        if not any(file_path.endswith(ext) for ext in SUPPORTED_FILE_EXTENSIONS):
            continue
        
        # 检查文件是否已经处理过且未变化
        if not check_if_file_changed(file_path, hash_data, force_reindex):
            print(f"跳过未变化的文件: {file_path}")
            continue
        
        # 获取文件的相对路径作为来源标识
        rel_path = os.path.relpath(file_path, base_dir)
        source = rel_path.replace("\\", "/")  # 统一使用正斜杠
        
        print(f"处理文件: {file_path}")
        
        # 根据文件类型提取文本
        if file_path.endswith(".epub"):
            chunks = extract_text_from_epub(file_path)
        else:
            text = read_text_file(file_path)
            chunks = chunk_text(text)
        
        if not chunks:
            print(f"  - 无法从文件中提取文本: {file_path}")
            continue
        
        print(f"  - 从文件中提取了 {len(chunks)} 个文本块")
        
        # 为每个块添加来源信息
        for chunk in chunks:
            documents.append(chunk)
            sources.append(source)
            file_paths.append(file_path)
        
        # 更新哈希数据
        file_info = get_file_info(file_path)
        if "files" not in hash_data:
            hash_data["files"] = {}
        hash_data["files"][file_path] = file_info
    
    # 保存更新后的哈希数据
    save_document_hashes(hash_data)
    
    return documents, sources, file_paths

def check_collection_exists(client, collection_name):
    """检查集合是否存在"""
    try:
        collections = client.collections.list_all()
        return collection_name in collections  # collections 现在是字符串列表
    except Exception as e:
        print(f"检查集合失败: {e}")
        return False

def format_instruction(instruction, query, doc):
    """
    格式化指令、查询和文档
    """
    if instruction is None:
        instruction = 'Given a web search query, retrieve relevant passages that answer the query'
    output = "<Instruct>: {instruction}\n<Query>: {query}\n<Document>: {doc}".format(instruction=instruction, query=query, doc=doc)
    return output

def load_reranker_model(model_config=None):
    """
    加载Qwen3 Reranker模型
    """
    if model_config is None:
        model_config = RERANKER_CONFIG
    
    # 根据配置确定模型路径
    if model_config["local_model_path"] and check_model_complete(model_config["local_model_path"]):
        print(f"正在加载本地重排序模型: {model_config['local_model_path']}")
        model_path = model_config["local_model_path"]
    else:
        print(f"正在从HuggingFace加载重排序模型: {model_config['model_id']}")
        model_path = model_config["model_id"]
    
    # 如果使用CrossEncoder加载方式
    if model_config["use_cross_encoder"]:
        print("使用CrossEncoder加载重排序模型")
        from sentence_transformers import CrossEncoder
        model = CrossEncoder(model_path)
        return {
            "model": model,
            "use_cross_encoder": True
        }
    
    # 使用传统方式加载模型
    # 加载tokenizer
    tokenizer = AutoTokenizer.from_pretrained(
        model_path, 
        padding_side='left'
    )
    
    # 根据参数加载模型
    if model_config["use_flash_attention"] and model_config["use_fp16"]:
        model = AutoModelForSequenceClassification.from_pretrained(
            model_path, 
            torch_dtype=torch.float16, 
            attn_implementation="flash_attention_2"
        ).cuda().eval()
        print("已启用flash_attention_2加速和FP16精度")
    elif model_config["use_flash_attention"]:
        model = AutoModelForSequenceClassification.from_pretrained(
            model_path, 
            attn_implementation="flash_attention_2"
        ).eval()
        print("已启用flash_attention_2加速")
    elif model_config["use_fp16"]:
        model = AutoModelForSequenceClassification.from_pretrained(
            model_path, 
            torch_dtype=torch.float16
        ).cuda().eval()
        print("已启用FP16精度")
    else:
        model = AutoModelForSequenceClassification.from_pretrained(
            model_path
        ).eval()
    
    # 配置模型参数
    token_false_id = tokenizer.convert_tokens_to_ids("no")
    token_true_id = tokenizer.convert_tokens_to_ids("yes")
    max_length = model_config["max_length"]
    
    prefix = "<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be \"yes\" or \"no\".<|im_end|>\n<|im_start|>user\n"
    suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
    prefix_tokens = tokenizer.encode(prefix, add_special_tokens=False)
    suffix_tokens = tokenizer.encode(suffix, add_special_tokens=False)
    
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token or tokenizer.unk_token or "[PAD]"
    if tokenizer.pad_token_id is None:
        tokenizer.pad_token_id = tokenizer.eos_token_id or tokenizer.unk_token_id or 0
    
    return {
        "model": model,
        "tokenizer": tokenizer,
        "token_false_id": token_false_id,
        "token_true_id": token_true_id,
        "max_length": max_length,
        "prefix_tokens": prefix_tokens,
        "suffix_tokens": suffix_tokens,
        "use_cross_encoder": False
    }

def process_rerank_inputs(model_data, pairs):
    """
    处理重排序输入文本
    """
    tokenizer = model_data["tokenizer"]
    max_length = model_data["max_length"]
    prefix_tokens = model_data["prefix_tokens"]
    suffix_tokens = model_data["suffix_tokens"]
    
    # 确保tokenizer有padding token
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token or tokenizer.unk_token or "[PAD]"
    if tokenizer.pad_token_id is None:
        tokenizer.pad_token_id = tokenizer.eos_token_id or tokenizer.unk_token_id or 0
    
    # 修改padding参数为'max_length'
    inputs = tokenizer(
        pairs, 
        padding=False, 
        truncation='longest_first',
        return_attention_mask=False, 
        max_length=max_length - len(prefix_tokens) - len(suffix_tokens)
    )
    
    for i, ele in enumerate(inputs['input_ids']):
        inputs['input_ids'][i] = prefix_tokens + ele + suffix_tokens
    
    # 使用padding='max_length'代替padding=True
    inputs = tokenizer.pad(
        inputs, 
        padding='max_length', 
        return_tensors="pt", 
        max_length=max_length
    )
    
    for key in inputs:
        inputs[key] = inputs[key].to(model_data["model"].device)
    return inputs

def compute_rerank_scores(model_data, inputs):
    """
    计算重排序相关性得分
    """
    model = model_data["model"]
    token_true_id = model_data["token_true_id"]
    token_false_id = model_data["token_false_id"]
    
    with torch.no_grad():
        batch_scores = model(**inputs).logits[:, -1, :]
        true_vector = batch_scores[:, token_true_id]
        false_vector = batch_scores[:, token_false_id]
        batch_scores = torch.stack([false_vector, true_vector], dim=1)
        batch_scores = torch.nn.functional.log_softmax(batch_scores, dim=1)
        scores = batch_scores[:, 1].exp().tolist()
    return scores

class DocumentProcessor:
    """文档处理器，负责处理和索引文档"""
    
    def __init__(self, client, embedding_model=None):
        self.client = client
        self.embedding_model = embedding_model
        self.queue = queue.Queue()
        self.processing = False
        self.hash_data = load_document_hashes()
        self.executor = None
    
    def start_processing(self):
        """启动异步处理"""
        if not self.processing:
            self.processing = True
            self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
            self.executor.submit(self._process_queue)
            print("文档处理器已启动")
    
    def stop_processing(self):
        """停止异步处理"""
        if self.processing:
            self.processing = False
            if self.executor:
                self.executor.shutdown(wait=False)
                self.executor = None
            print("文档处理器已停止")
    
    def _process_queue(self):
        """处理队列中的任务"""
        while self.processing:
            try:
                task = self.queue.get(timeout=1)
                directory, recursive, force_reindex = task
                self._process_directory(directory, recursive, force_reindex)
                self.queue.task_done()
            except queue.Empty:
                continue
            except Exception as e:
                print(f"处理队列时出错: {e}")
                traceback.print_exc()
    
    def _process_directory(self, directory, recursive=True, force_reindex=False):
        """处理指定目录中的文档"""
        try:
            print(f"处理目录: {directory}")
            
            # 加载文档哈希信息
            hash_data = load_document_hashes()
            
            # 加载目录中的文档
            documents, sources, file_paths = load_documents_from_directory(
                directory, recursive=recursive, hash_data=hash_data, force_reindex=force_reindex
            )
            
            if not documents:
                print(f"目录 {directory} 中没有新的或修改的文档")
                return
            
            # 确定集合名称
            if directory == DATA_DIR:
                # 根目录使用默认集合
                collection_name = COLLECTION_NAME
            else:
                # 子目录使用特定集合
                rel_path = os.path.relpath(directory, DATA_DIR)
                folder_name = rel_path.split(os.sep)[0]
                collection_name = f"{COLLECTION_PREFIX}{folder_name}"
            
            print(f"将文档索引到集合: {collection_name}")
            
            # 索引文档
            success = index_documents(
                self.client, documents, sources, 
                collection_name=collection_name, 
                embedding_model=self.embedding_model
            )
            
            if success:
                print(f"成功将 {len(documents)} 个文档索引到集合 {collection_name}")
            else:
                print(f"索引文档到集合 {collection_name} 失败")
            
            # 处理被删除的文件
            if AUTO_DELETE_INDEX and hash_data and "files" in hash_data:
                deleted_files = [path for path, info in hash_data["files"].items() 
                               if info.get("deleted", False)]
                
                if deleted_files:
                    print(f"处理 {len(deleted_files)} 个已删除的文件")
                    for file_path in deleted_files:
                        try:
                            # 获取文件相对路径作为source
                            if os.path.isabs(file_path):
                                rel_path = os.path.relpath(file_path, os.path.abspath(DATA_DIR))
                            else:
                                rel_path = file_path
                            
                            # 删除索引
                            source_name = rel_path.replace("\\", "/")
                            print(f"删除文件 {source_name} 的索引")
                            
                            # 确定集合名称
                            collection_parts = rel_path.split(os.sep)
                            if len(collection_parts) > 1:
                                folder_name = collection_parts[0]
                                coll_name = f"{COLLECTION_PREFIX}{folder_name}"
                            else:
                                coll_name = COLLECTION_NAME
                            
                            # 删除索引
                            delete_document_by_source(self.client, source_name, coll_name)
                            
                            # 从哈希数据中删除
                            del hash_data["files"][file_path]
                        except Exception as e:
                            print(f"删除文件 {file_path} 的索引时出错: {e}")
                            traceback.print_exc()
                    
                    # 保存更新后的哈希数据
                    save_document_hashes(hash_data)
        
        except Exception as e:
            print(f"处理目录 {directory} 时出错: {e}")
            traceback.print_exc()
    
    def process_directory_async(self, directory, recursive=True, force_reindex=False):
        """异步处理目录中的文档"""
        self.queue.put((directory, recursive, force_reindex))
        if not self.processing:
            self.start_processing()
        return True
    
    def process_directory_sync(self, directory, recursive=True, force_reindex=False):
        """同步处理目录中的文档"""
        return self._process_directory(directory, recursive, force_reindex)
    
    def process_all_documents(self, async_processing=ASYNC_VECTORIZATION):
        """处理所有文档"""
        if async_processing:
            return self.process_directory_async(DATA_DIR)
        else:
            return self.process_directory_sync(DATA_DIR)
    
    def wait_for_completion(self, timeout=None):
        """等待所有任务完成"""
        try:
            self.queue.join(timeout=timeout)
            return True
        except Exception:
            return False

def search_multiple_collections(client, query, embedding_model, reranker_data, 
                           collections=None, alpha=ALPHA, limit=QUERY_LIMIT, search_type="hybrid"):
    """在多个集合中搜索，并合并结果"""
    
    # 如果没有指定集合，获取所有集合
    if collections is None:
        try:
            collections = client.collections.list_all()
            collections = [c for c in collections if c.startswith(COLLECTION_PREFIX) or c == COLLECTION_NAME]
        except Exception as e:
            print(f"获取集合列表失败: {e}")
            collections = [COLLECTION_NAME]
    
    all_results = []
    print(f"在 {len(collections)} 个集合中搜索: {', '.join(collections)}")
    
    # 从每个集合获取结果
    for collection_name in collections:
        try:
            # 检查集合是否存在
            if not check_collection_exists(client, collection_name):
                print(f"集合 {collection_name} 不存在，已跳过")
                continue
                
            print(f"正在集合 {collection_name} 中搜索...")
            
            # 提高每个集合的查询限制，确保找到更多结果
            collection_limit = limit * 2
                
            # 在当前集合中搜索
            results = search_and_rerank(
                client, query, embedding_model, reranker_data,
                collection_name=collection_name, alpha=alpha, limit=collection_limit,
                search_type=search_type
            )
            
            if results:
                print(f"在集合 {collection_name} 中找到 {len(results)} 个结果")
                # 将集合名称添加到结果中
                all_results.extend([(text, f"{collection_name}:{source}", score) for text, source, score in results])
            else:
                print(f"在集合 {collection_name} 中未找到结果")
                
        except Exception as e:
            print(f"在集合 {collection_name} 中搜索失败: {e}")
            traceback.print_exc()
    
    # 按分数排序所有结果
    all_results.sort(key=lambda x: x[2], reverse=True)
    
    # 如果结果超过限制，截断
    if len(all_results) > limit:
        all_results = all_results[:limit]
    
    print(f"搜索完成，总共找到 {len(all_results)} 个结果")
    return all_results

def get_collection_list(client):
    """获取所有集合列表"""
    try:
        all_collections = client.collections.list_all()
        # 过滤掉非文档集合
        doc_collections = [c for c in all_collections if c.startswith(COLLECTION_PREFIX) or c == COLLECTION_NAME]
        
        # 获取每个集合的文档数
        collection_info = []
        for coll_name in doc_collections:
            try:
                collection = client.collections.get(coll_name)
                results = collection.query.fetch_objects(limit=1)
                # 获取对象列表长度
                all_docs = collection.query.fetch_objects(limit=1).objects
                count = len(all_docs) if all_docs else 0
                    
                # 提取文件夹名称（如果有）
                folder_name = coll_name
                if coll_name.startswith(COLLECTION_PREFIX):
                    folder_name = coll_name[len(COLLECTION_PREFIX):].replace('_', '/')
                
                collection_info.append({
                    "name": coll_name,
                    "display_name": folder_name,
                    "count": count
                })
            except Exception as e:
                print(f"获取集合 {coll_name} 信息失败: {e}")
                collection_info.append({
                    "name": coll_name,
                    "display_name": coll_name,
                    "count": "未知"
                })
        
        return collection_info
    except Exception as e:
        print(f"获取集合列表失败: {e}")
        return []

def list_collections(client):
    """列出所有文档集合"""
    try:
        collections = client.collections.list_all()
        # 只返回与文档相关的集合
        return [c for c in collections if c.startswith(COLLECTION_PREFIX) or c == COLLECTION_NAME]
    except Exception as e:
        print(f"获取集合列表失败: {e}")
        return []

def display_results(results, query):
    """显示搜索结果"""
    print(f"\n查询: {query}")
    
    if not results:
        print("未找到相关结果")
        return
        
    for i, result in enumerate(results[:5]):  # 显示前5条结果
        text = result.get("text", "")
        source = result.get("source", "未知来源")
        score = result.get("score")
        collection = result.get("collection", "")
        
        if score is not None:
            print(f"{i+1}. [{source}] (集合: {collection}) (分数: {score:.4f})")
        else:
            print(f"{i+1}. [{source}] (集合: {collection}) (分数: 未知)")
        
        # 显示文本片段，最多200个字符
        print(f"   {text[:200]}..." if len(text) > 200 else f"   {text}")
        print("")

def search_documents(client, query, collection_name=None, limit=5):
    """搜索文档，如果未指定集合，则搜索所有文档集合"""
    if not query or not query.strip():
        return []
    
    results = []
    
    try:
        # 获取所有集合
        if collection_name is None:
            collections = list_collections(client)
        else:
            collections = [collection_name]
        
        if not collections:
            print("没有可用的文档集合")
            return []
        
        print(f"正在搜索 {len(collections)} 个集合: {', '.join(collections)}")
        
        # 初始化嵌入模型
        embedding_model = None
        for model_path in DEFAULT_MODEL_PATHS["embedding"]:
            try:
                real_model_path = get_hf_snapshot_path(model_path)
                if os.path.exists(real_model_path):
                    embedding_model = SentenceTransformer(
                        real_model_path,
                        model_kwargs=EMBEDDING_MODEL_KWARGS,
                        tokenizer_kwargs=EMBEDDING_TOKENIZER_KWARGS
                    )
                    break
            except Exception as e:
                print(f"加载嵌入模型失败: {e}")
        
        if embedding_model is None:
            print("无法加载嵌入模型，将使用关键词搜索")
        
        # 提取查询中的关键词
        query_terms = query.lower().split()
        
        # 在每个集合中搜索
        for coll_name in collections:
            try:
                collection = client.collections.get(coll_name)
                print(f"搜索集合: {coll_name}")
                
                # 使用try-except捕获搜索过程中的错误
                try:
                    # 生成嵌入向量
                    if embedding_model:
                        query_vector = embedding_model.encode(query, convert_to_tensor=False).tolist()
                        
                        # 混合搜索 - 使用更高的结果限制以确保找到所有相关内容
                        response = collection.query.hybrid(
                            query=query,
                            vector=query_vector,
                            alpha=ALPHA,
                            limit=limit * 4  # 大幅增加返回结果数量以确保找到所有相关内容
                        )
                    else:
                        # 仅BM25搜索
                        response = collection.query.bm25(
                            query=query,
                            limit=limit * 4  # 增加返回结果数量
                        )
                    
                    # 处理结果
                    if response and hasattr(response, 'objects') and response.objects:
                        print(f"  - 在集合 {coll_name} 中找到 {len(response.objects)} 个结果")
                        for obj in response.objects:
                            # 使用get方法安全地获取属性
                            text = obj.properties.get("text", "")
                            source = obj.properties.get("source", "未知来源")
                            
                            # 安全地获取分数
                            score = 0.0  # 设置默认分数为0.0，表示未找到匹配
                            if hasattr(obj, 'metadata') and obj.metadata and hasattr(obj.metadata, 'score'):
                                if obj.metadata.score is not None:
                                    try:
                                        score = float(obj.metadata.score)
                                    except (ValueError, TypeError):
                                        pass
                            
                            # 通用关键词匹配加权
                            text_lower = text.lower()
                            source_lower = source.lower()
                            
                            # 检查文本和来源中的关键词匹配
                            keyword_matches = 0
                            exact_matches = 0
                            
                            for term in query_terms:
                                if len(term) > 1:
                                    # 检查文本中的匹配
                                    if term in text_lower:
                                        keyword_matches += 1
                                        # 精确短语匹配加分更多
                                        if query.lower() in text_lower:
                                            exact_matches += 1
                                    
                                    # 检查来源中的匹配
                                    if term in source_lower:
                                        keyword_matches += 0.5  # 来源匹配权重较低
                            
                            # 根据匹配情况调整分数
                            if keyword_matches > 0:
                                # 基础匹配分数
                                score += 0.1 * keyword_matches
                                
                                # 精确匹配额外加分
                                if exact_matches > 0:
                                    score += 0.5 * exact_matches
                                
                                # 检查是否包含章节或书籍信息
                                if "[章节:" in text or "[书籍:" in text:
                                    # 包含元数据的文档更可能是高质量的结果
                                    score += 0.2
                            
                            # 添加到结果列表
                            results.append({
                                "text": text,
                                "source": source,
                                "score": score,
                                "collection": coll_name
                            })
                    else:
                        print(f"  - 在集合 {coll_name} 中未找到结果")
                except Exception as e:
                    print(f"搜索集合 {coll_name} 时出错: {e}")
                    traceback.print_exc()
            except Exception as e:
                print(f"获取集合 {coll_name} 时出错: {e}")
        
        print(f"搜索完成，找到 {len(results)} 个结果")
        
        # 去重
        unique_results = []
        seen_texts = set()
        
        for result in results:
            text = result.get("text", "")
            # 使用文本的前100个字符作为去重依据
            text_start = text[:100] if text else ""
            
            if text_start and text_start not in seen_texts:
                seen_texts.add(text_start)
                unique_results.append(result)
        
        print(f"去重后剩余 {len(unique_results)} 个结果")
        
        # 按分数排序
        try:
            if unique_results:
                # 确保所有分数都是有效的浮点数
                for r in unique_results:
                    if r.get("score") is None:
                        r["score"] = 0.0
                    elif not isinstance(r["score"], (int, float)):
                        try:
                            r["score"] = float(r["score"])
                        except (ValueError, TypeError):
                            r["score"] = 0.0
                
                # 使用安全的排序键函数
                def safe_sort_key(item):
                    score = item.get("score")
                    if score is None:
                        return 0.0
                    try:
                        return float(score)
                    except (ValueError, TypeError):
                        return 0.0
                
                unique_results.sort(key=safe_sort_key, reverse=True)
        except Exception as e:
            print(f"排序结果时出错: {e}")
            traceback.print_exc()
        
        return unique_results[:limit]
    
    except Exception as e:
        print(f"搜索文档时发生错误: {e}")
        traceback.print_exc()
        return []

def delete_document_by_source(client, source_name, collection_name=COLLECTION_NAME):
    """删除指定来源的所有文档"""
    try:
        # 获取集合
        if not check_collection_exists(client, collection_name):
            print(f"集合 {collection_name} 不存在，无需删除")
            return
        
        collection = client.collections.get(collection_name)
        
        # 查询匹配指定source的文档
        print(f"在集合 {collection_name} 中查找来源为 {source_name} 的文档")
        
        # 使用模糊匹配来确保能找到相关文档
        # 1. 精确匹配
        try:
            query_result = collection.query.fetch_objects(
                filters={
                    "path": ["source"],
                    "operator": "Equal",
                    "valueText": source_name
                },
                limit=1000  # 设置一个较大的限制以获取所有匹配的文档
            )
            
            # 获取所有匹配文档的ID并删除
            if query_result.objects:
                ids = [obj.uuid for obj in query_result.objects]
                print(f"找到 {len(ids)} 个与文件 {source_name} 完全匹配的索引项")
                
                # 分批删除
                batch_size = 100
                for i in range(0, len(ids), batch_size):
                    batch_ids = ids[i:i+batch_size]
                    try:
                        collection.data.delete_many(batch_ids)
                        print(f"成功删除批次 {i//batch_size + 1}: {len(batch_ids)} 个对象")
                    except Exception as batch_error:
                        print(f"删除批次 {i//batch_size + 1} 时出错: {batch_error}")
                
                print(f"成功删除文件 {source_name} 的所有索引")
                return
        except Exception as exact_match_error:
            print(f"精确匹配查询失败: {exact_match_error}")
        
        # 2. 如果精确匹配没有结果或失败，尝试使用部分匹配
        try:
            # 获取文件名部分
            file_name = os.path.basename(source_name)
            print(f"尝试使用文件名部分匹配: {file_name}")
            
            query_result = collection.query.fetch_objects(
                filters={
                    "path": ["source"],
                    "operator": "Like",
                    "valueText": f"*{file_name}"
                },
                limit=1000
            )
            
            if query_result.objects:
                ids = [obj.uuid for obj in query_result.objects]
                print(f"找到 {len(ids)} 个与文件名 {file_name} 部分匹配的索引项")
                
                # 分批删除
                batch_size = 100
                for i in range(0, len(ids), batch_size):
                    batch_ids = ids[i:i+batch_size]
                    try:
                        collection.data.delete_many(batch_ids)
                        print(f"成功删除批次 {i//batch_size + 1}: {len(batch_ids)} 个对象")
                    except Exception as batch_error:
                        print(f"删除批次 {i//batch_size + 1} 时出错: {batch_error}")
                
                print(f"成功删除文件名 {file_name} 的所有索引")
                return
        except Exception as like_match_error:
            print(f"部分匹配查询失败: {like_match_error}")
        
        # 3. 如果上面两种方法都失败，尝试获取所有对象并过滤
        try:
            print("尝试获取所有对象并手动过滤...")
            all_objects = collection.query.fetch_objects(limit=10000)
            
            if all_objects and all_objects.objects:
                # 手动过滤包含源文件名的对象
                file_name = os.path.basename(source_name)
                matching_ids = []
                
                for obj in all_objects.objects:
                    obj_source = obj.properties.get("source", "")
                    if file_name in obj_source:
                        matching_ids.append(obj.uuid)
                
                if matching_ids:
                    print(f"通过手动过滤找到 {len(matching_ids)} 个匹配项")
                    
                    # 分批删除
                    batch_size = 100
                    for i in range(0, len(matching_ids), batch_size):
                        batch_ids = matching_ids[i:i+batch_size]
                        try:
                            collection.data.delete_many(batch_ids)
                            print(f"成功删除批次 {i//batch_size + 1}: {len(batch_ids)} 个对象")
                        except Exception as batch_error:
                            print(f"删除批次 {i//batch_size + 1} 时出错: {batch_error}")
                    
                    print(f"成功删除文件名 {file_name} 的所有索引")
                    return
            
            print(f"未找到与文件 {source_name} 相关的索引")
        except Exception as manual_filter_error:
            print(f"手动过滤失败: {manual_filter_error}")
        
        print(f"尝试了所有方法，但未能找到与文件 {source_name} 相关的索引")
    except Exception as e:
        print(f"删除文件 {source_name} 的索引时出错: {e}")
        traceback.print_exc()

def check_and_remove_deleted_files(client):
    """检查已删除的文件并删除对应的索引"""
    if not CHECK_DELETED_FILES:
        return
    
    print("检查已删除文件...")
    hash_data = load_document_hashes()
    if not hash_data or not hash_data.get("files"):
        print("没有索引记录，跳过检查")
        return
    
    deleted_files = []
    for file_path in list(hash_data["files"].keys()):
        try:
            if not os.path.exists(file_path):
                print(f"文件已删除: {file_path}")
                deleted_files.append(file_path)
        except Exception as e:
            print(f"检查文件 {file_path} 时出错: {e}")
            # 如果路径无效，也视为已删除
            deleted_files.append(file_path)
    
    if not deleted_files:
        print("没有检测到已删除的文件")
        return
    
    print(f"发现 {len(deleted_files)} 个已删除的文件，正在清理索引...")
    
    # 获取所有集合
    collections = list_collections(client)
    
    for file_path in deleted_files:
        try:
            # 获取文件相对路径作为source
            if os.path.isabs(file_path):
                rel_path = os.path.relpath(file_path, os.path.abspath(DATA_DIR))
            else:
                rel_path = file_path
            
            # 删除索引
            source_name = rel_path.replace("\\", "/")
            print(f"删除文件 {source_name} 的索引")
            
            # 确定集合名称
            collection_parts = rel_path.split(os.sep)
            if len(collection_parts) > 1:
                folder_name = collection_parts[0]
                collection_name = f"{COLLECTION_PREFIX}{folder_name}"
            else:
                collection_name = COLLECTION_NAME
            
            # 先尝试在指定集合中删除
            delete_document_by_source(client, source_name, collection_name)
            
            # 然后检查所有其他集合
            for coll in collections:
                if coll != collection_name:
                    delete_document_by_source(client, source_name, coll)
            
            # 从哈希数据中删除
            del hash_data["files"][file_path]
        except Exception as e:
            print(f"删除文件 {file_path} 的索引时出错: {e}")
            traceback.print_exc()
    
    # 保存更新后的哈希数据
    save_document_hashes(hash_data)
    print("已删除文件的索引清理完成")

class FileSystemChangeHandler(FileSystemEventHandler):
    """文件系统变化监控处理器"""
    
    def __init__(self, client, processor):
        """
        初始化处理器
        
        Args:
            client: Weaviate客户端
            processor: 文档处理器
        """
        self.client = client
        self.processor = processor
        self.hash_data = load_document_hashes()
        # 使用队列和线程处理文件事件，避免频繁操作
        self.event_queue = queue.Queue()
        self.processing = False
        self.worker_thread = None
        self.start_worker()
    
    def start_worker(self):
        """启动工作线程处理文件事件"""
        self.processing = True
        self.worker_thread = threading.Thread(target=self._process_events)
        self.worker_thread.daemon = True
        self.worker_thread.start()
    
    def stop_worker(self):
        """停止工作线程"""
        self.processing = False
        if self.worker_thread and self.worker_thread.is_alive():
            self.worker_thread.join(timeout=2)
    
    def _process_events(self):
        """处理文件事件队列"""
        while self.processing:
            try:
                # 获取事件，最多等待1秒
                try:
                    event_type, file_path = self.event_queue.get(timeout=1)
                    # 处理事件后标记任务完成
                    self.event_queue.task_done()
                except queue.Empty:
                    continue
                
                # 更新哈希数据
                self.hash_data = load_document_hashes()
                
                # 只处理支持的文件类型
                if not any(file_path.endswith(ext) for ext in SUPPORTED_FILE_EXTENSIONS):
                    continue
                
                if event_type == "deleted":
                    print(f"检测到文件删除: {file_path}")
                    self._handle_deleted_file(file_path)
                elif event_type == "created" or event_type == "modified":
                    print(f"检测到文件{event_type}: {file_path}")
                    self._handle_modified_file(file_path)
            except Exception as e:
                print(f"处理文件事件时出错: {e}")
                traceback.print_exc()
    
    def _handle_deleted_file(self, file_path):
        """处理文件删除事件"""
        try:
            # 标准化路径格式
            file_path = os.path.normpath(file_path)
            print(f"开始处理删除的文件: {file_path}")
            
            # 尝试获取相对路径
            try:
                # 先尝试获取相对于DATA_DIR的路径
                if os.path.isabs(file_path):
                    data_dir_abs = os.path.abspath(DATA_DIR)
                    if file_path.startswith(data_dir_abs):
                        rel_path = os.path.relpath(file_path, data_dir_abs)
                    else:
                        # 如果文件不在DATA_DIR目录下，尝试查找最接近的匹配
                        rel_path = os.path.basename(file_path)
                else:
                    rel_path = file_path
            except ValueError:
                # 如果出现路径错误，使用文件名作为备选
                rel_path = os.path.basename(file_path)
            
            # 标准化路径分隔符为正斜杠(/)，以匹配Weaviate中的存储格式
            source_name = rel_path.replace("\\", "/")
            print(f"处理删除的文件: {source_name}")
            
            # 确定集合名称
            collection_parts = rel_path.split(os.sep)
            if len(collection_parts) > 1:
                folder_name = collection_parts[0]
                collection_name = f"{COLLECTION_PREFIX}{folder_name}"
            else:
                collection_name = COLLECTION_NAME
            
            print(f"主要检查集合: {collection_name}")
            
            # 获取所有集合
            collections = list_collections(self.client)
            print(f"可用集合: {collections}")
            
            # 先尝试在指定集合中删除
            if collection_name in collections:
                print(f"在集合 {collection_name} 中删除文档")
                delete_document_by_source(self.client, source_name, collection_name)
            else:
                print(f"集合 {collection_name} 不存在，跳过")
            
            # 然后检查所有其他集合
            for coll in collections:
                if coll != collection_name:
                    print(f"在集合 {coll} 中检查并删除文档")
                    delete_document_by_source(self.client, source_name, coll)
            
            # 从哈希数据中删除
            # 重新加载最新的哈希数据
            self.hash_data = load_document_hashes()
            
            # 尝试多种可能的路径格式来匹配哈希数据中的条目
            found = False
            if file_path in self.hash_data.get("files", {}):
                print(f"在哈希数据中找到精确匹配: {file_path}")
                del self.hash_data["files"][file_path]
                found = True
            else:
                # 尝试查找可能的匹配项
                for stored_path in list(self.hash_data.get("files", {}).keys()):
                    # 检查文件名是否匹配
                    if os.path.basename(stored_path) == os.path.basename(file_path):
                        print(f"找到可能的匹配: {stored_path}")
                        del self.hash_data["files"][stored_path]
                        found = True
                        break
            
            if found:
                save_document_hashes(self.hash_data)
                print(f"已从哈希记录中删除: {file_path}")
            else:
                print(f"在哈希记录中未找到匹配项: {file_path}")
        except Exception as e:
            print(f"处理删除的文件 {file_path} 时出错: {e}")
            traceback.print_exc()
    
    def _handle_modified_file(self, file_path):
        """处理文件修改或创建事件"""
        if not VECTORIZE_ON_CHANGE:
            return
        
        try:
            # 获取文件所在目录
            directory = os.path.dirname(file_path)
            if not directory:
                directory = "."
            
            # 使用处理器处理目录
            print(f"处理修改的文件: {file_path}")
            self.processor.process_directory_sync(directory, recursive=False, force_reindex=True)
        except Exception as e:
            print(f"处理修改的文件 {file_path} 时出错: {e}")
            traceback.print_exc()
    
    def on_deleted(self, event):
        """文件删除事件处理"""
        if not event.is_directory and AUTO_DELETE_INDEX:
            self.event_queue.put(("deleted", event.src_path))
    
    def on_created(self, event):
        """文件创建事件处理"""
        if not event.is_directory:
            self.event_queue.put(("created", event.src_path))
    
    def on_modified(self, event):
        """文件修改事件处理"""
        if not event.is_directory:
            self.event_queue.put(("modified", event.src_path))

def main():
    """主函数，处理命令行参数并执行相应操作"""
    print("正在启动程序...")
    try:
        client = connect_to_weaviate(WEAVIATE_URL)
        print("成功连接到Weaviate")
        
        # 创建DocumentProcessor
        processor = DocumentProcessor(client)
        print("创建DocumentProcessor成功")
        
        # 加载文档哈希信息
        hash_data = load_document_hashes()
        print("加载文档哈希信息成功")
        
        # 强制检查并清理已删除的文件
        print("\n强制检查并清理已删除文件的索引...")
        check_and_remove_deleted_files(client)
        
        # 检查并显示data目录中的文档信息
        print("\n数据目录中的文档信息:")
        try:
            if os.path.exists(DATA_DIR):
                files = []
                unindexed_files = []
                for root, _, filenames in os.walk(DATA_DIR):
                    for filename in filenames:
                        if any(filename.endswith(ext) for ext in SUPPORTED_FILE_EXTENSIONS):
                            file_path = os.path.join(root, filename)
                            rel_path = os.path.relpath(file_path, DATA_DIR)
                            status = "已索引" if file_path in hash_data["files"] else "未索引"
                            files.append((rel_path, status))
                            if status == "未索引":
                                unindexed_files.append(file_path)
                
                if files:
                    for file_path, status in sorted(files):
                        print(f"  - {file_path} [{status}]")
                else:
                    print("  没有找到支持的文档文件")
                
                # 检测是否有未索引的文件
                if unindexed_files:
                    print(f"\n发现 {len(unindexed_files)} 个未索引的文件，开始索引...")
                    for file_path in unindexed_files:
                        dir_path = os.path.dirname(file_path)
                        print(f"索引文件: {file_path}")
                        try:
                            # 索引文件所在目录
                            processor.process_directory_sync(dir_path, force_reindex=False)
                        except Exception as e:
                            print(f"索引 {file_path} 时出错: {e}")
                            traceback.print_exc()
            else:
                print("  数据目录不存在")
        except Exception as e:
            print(f"检查文档信息时出错: {e}")
            traceback.print_exc()
        
        # 检查集合
        print("\n正在检查集合...")
        try:
            collections = list_collections(client)
            
            print("\n可用的文档集合:")
            if collections:
                for i, coll_name in enumerate(collections):
                    try:
                        collection = client.collections.get(coll_name)
                        doc_count = collection.aggregate.over_all().total_count
                        print(f"  - {coll_name} (Doc_{coll_name}): {doc_count} 个文档")
                    except Exception as e:
                        print(f"  - {coll_name} (获取文档数量失败: {e})")
            else:
                print("  没有可用的集合")
        except Exception as e:
            print(f"列出集合时出错: {e}")
            traceback.print_exc()
        
        # 完整检查并更新所有文档索引
        print("\n完整检查并更新所有文档索引...")
        try:
            processor.process_directory_sync(DATA_DIR, recursive=True, force_reindex=True)
        except Exception as e:
            print(f"处理所有文档时出错: {e}")
            traceback.print_exc()
        
        # 设置文件系统监控
        observer = None
        event_handler = None
        if FILE_MONITORING:
            print("\n设置文件系统监控...")
            try:
                event_handler = FileSystemChangeHandler(client, processor)
                observer = Observer()
                observer.schedule(event_handler, DATA_DIR, recursive=FILE_MONITOR_RECURSIVE)
                observer.start()
                print(f"开始监控目录: {DATA_DIR} (递归: {FILE_MONITOR_RECURSIVE})")
            except Exception as e:
                print(f"设置文件监控时出错: {e}")
                traceback.print_exc()
        else:
            print("\n文件监控已禁用")
        
        print("\n系统就绪，可以开始查询")
        
        try:
            # 主循环
            while True:
                query = input("\n请输入查询 (输入 'q' 退出，'help' 获取帮助): ")
                if query.lower() == 'q':
                    break
                elif query.lower() == 'help':
                    print("\n可用命令:")
                    print("  q - 退出程序")
                    print("  help - 显示帮助信息")
                    print("  collections - 列出所有集合")
                    print("  reindex - 重新索引所有文档")
                    print("  reindex_force - 强制重新索引所有文档（忽略哈希值）")
                    print("  check - 检查并清理已删除文件的索引")
                    print("  reset - 重置集合（删除并重新创建）")
                    print("  reset_all - 重置所有集合")
                    print("  其他输入将被视为搜索查询")
                elif query.lower() == 'collections':
                    try:
                        collections = list_collections(client)
                        print("\n可用的文档集合:")
                        if collections:
                            for i, coll_name in enumerate(collections):
                                try:
                                    collection = client.collections.get(coll_name)
                                    doc_count = collection.aggregate.over_all().total_count
                                    print(f"  - {coll_name}: {doc_count} 个文档")
                                except Exception as e:
                                    print(f"  - {coll_name} (获取文档数量失败: {e})")
                        else:
                            print("  没有可用的集合")
                    except Exception as e:
                        print(f"列出集合时出错: {e}")
                        traceback.print_exc()
                elif query.lower() == 'reindex':
                    try:
                        print("\n重新索引所有文档...")
                        processor.process_all_documents(async_processing=ASYNC_VECTORIZATION)
                        print("重新索引完成")
                    except Exception as e:
                        print(f"重新索引文档时出错: {e}")
                        traceback.print_exc()
                elif query.lower() == 'reindex_force':
                    try:
                        print("\n强制重新索引所有文档...")
                        processor.process_directory_sync(DATA_DIR, recursive=True, force_reindex=True)
                        print("强制重新索引完成")
                    except Exception as e:
                        print(f"强制重新索引文档时出错: {e}")
                        traceback.print_exc()
                elif query.lower() == 'check':
                    try:
                        print("\n检查并清理已删除文件的索引...")
                        check_and_remove_deleted_files(client)
                    except Exception as e:
                        print(f"检查删除文件时出错: {e}")
                        traceback.print_exc()
                elif query.lower() == 'reset':
                    try:
                        collection_name = input("请输入要重置的集合名称 (默认: Document): ").strip()
                        if not collection_name:
                            collection_name = COLLECTION_NAME
                        
                        confirm = input(f"确定要重置集合 {collection_name} 吗? 这将删除所有数据! (y/n): ").lower()
                        if confirm == 'y':
                            reset_collection(client, collection_name)
                            print(f"集合 {collection_name} 已重置，请重新索引文档")
                        else:
                            print("操作已取消")
                    except Exception as e:
                        print(f"重置集合时出错: {e}")
                        traceback.print_exc()
                elif query.lower() == 'reset_all':
                    try:
                        confirm = input(f"确定要重置所有集合吗? 这将删除所有数据! (y/n): ").lower()
                        if confirm == 'y':
                            collections = list_collections(client)
                            for coll_name in collections:
                                reset_collection(client, coll_name)
                            print("所有集合已重置，请重新索引文档")
                        else:
                            print("操作已取消")
                    except Exception as e:
                        print(f"重置所有集合时出错: {e}")
                        traceback.print_exc()
                else:
                    # 执行搜索
                    print(f"收到查询: {query}")
                    try:
                        results = search_documents(client, query)
                        display_results(results, query)
                    except Exception as e:
                        print(f"搜索时出错: {e}")
                        traceback.print_exc()
        except KeyboardInterrupt:
            print("\n接收到退出信号，正在关闭...")
        finally:
            # 停止文件监控
            if observer and event_handler:
                print("停止文件监控...")
                observer.stop()
                event_handler.stop_worker()
                observer.join()
            
            # 停止处理器
            processor.stop_processing()
            print("程序已退出")
    
    except Exception as e:
        print(f"程序运行时出错: {e}")
        traceback.print_exc()

def reset_collection(client, collection_name=COLLECTION_NAME):
    """删除并重新创建集合"""
    print(f"正在重置集合: {collection_name}...")
    try:
        # 检查集合是否存在
        collections = client.collections.list_all()
        if collection_name in collections:
            # 删除集合
            print(f"删除集合: {collection_name}")
            client.collections.delete(collection_name)
            print(f"集合 {collection_name} 已删除")
        
        # 创建新集合
        print(f"创建新集合: {collection_name}")
        collection = client.collections.create(
            name=collection_name,
            vectorizer_config=weaviate.classes.config.Configure.Vectorizer.none(),
            properties=[
                {
                    "name": "text",
                    "data_type": weaviate.classes.config.DataType.TEXT
                },
                {
                    "name": "source",
                    "data_type": weaviate.classes.config.DataType.TEXT
                }
            ]
        )
        
        print(f"集合 {collection_name} 已重置")
        return collection
    except Exception as e:
        print(f"重置集合失败: {e}")
        traceback.print_exc()
        return None

class WeaviateDocumentManager:
    """Weaviate文档管理器，提供对文档的增删改查功能"""
    
    def __init__(self, client=None, collection_name=COLLECTION_NAME):
        """初始化文档管理器
        
        Args:
            client: Weaviate客户端，如果为None则自动连接
            collection_name: 集合名称
        """
        self.client = client if client else connect_to_weaviate(WEAVIATE_URL)
        self.collection_name = collection_name
        self.embedding_model = None
        
        # 确保集合存在
        try:
            self.collection = self.client.collections.get(collection_name)
            print(f"成功连接到集合: {collection_name}")
        except Exception as e:
            print(f"获取集合失败: {e}，尝试创建新集合")
            self.collection = create_collection(self.client, collection_name)
    
    def load_embedding_model(self):
        """加载嵌入模型"""
        if self.embedding_model is not None:
            return self.embedding_model
            
        print("正在加载嵌入模型...")
        try:
            for model_path in DEFAULT_MODEL_PATHS["embedding"]:
                try:
                    real_model_path = get_hf_snapshot_path(model_path)
                    if os.path.exists(real_model_path):
                        print(f"从本地路径加载模型: {real_model_path}")
                        self.embedding_model = SentenceTransformer(
                            real_model_path,
                            model_kwargs=EMBEDDING_MODEL_KWARGS,
                            tokenizer_kwargs=EMBEDDING_TOKENIZER_KWARGS
                        )
                        print("嵌入模型加载成功")
                        return self.embedding_model
                except Exception as e:
                    print(f"加载模型 {model_path} 失败: {e}")
            
            # 尝试使用备用模型
            self.embedding_model = fallback_embedding_model()
            if self.embedding_model is not None:
                print("使用备用嵌入模型")
                return self.embedding_model
                
            raise ValueError("无法加载嵌入模型")
        except Exception as e:
            print(f"加载嵌入模型失败: {e}")
            return None
    
    def add_document(self, text, source, vector=None):
        """添加单个文档
        
        Args:
            text: 文档文本
            source: 文档来源
            vector: 预计算的向量，如果为None则自动生成
            
        Returns:
            文档ID
        """
        try:
            # 如果没有提供向量，且有嵌入模型，则生成向量
            if vector is None and self.embedding_model:
                vector = self.embedding_model.encode(text, normalize_embeddings=True).tolist()
            
            # 添加文档
            result = self.collection.data.insert({
                "text": text,
                "source": source
            }, vector=vector)
            
            print(f"添加文档成功，ID: {result}")
            return result
        except Exception as e:
            print(f"添加文档失败: {e}")
            traceback.print_exc()
            return None
    
    def add_documents(self, texts, sources, vectors=None, batch_size=BATCH_SIZE):
        """批量添加文档
        
        Args:
            texts: 文档文本列表
            sources: 文档来源列表
            vectors: 预计算的向量列表，如果为None则自动生成
            batch_size: 批处理大小
            
        Returns:
            成功添加的文档数量
        """
        if len(texts) != len(sources):
            raise ValueError("texts和sources长度必须相同")
        
        # 如果没有提供向量，且有嵌入模型，则生成向量
        if vectors is None and self.embedding_model:
            print("生成文档向量...")
            vectors = self.embedding_model.encode(texts, normalize_embeddings=True)
        
        # 批量添加文档
        success_count = 0
        with self.collection.batch.dynamic() as batch:
            for i, (text, source, vector) in enumerate(zip(texts, sources, vectors or [None] * len(texts))):
                try:
                    # 确保vector是列表
                    if vector is not None:
                        if hasattr(vector, 'tolist'):
                            vector = vector.tolist()
                        elif not isinstance(vector, list):
                            vector = list(vector)
                    
                    # 添加文档
                    batch.add_object(
                        properties={
                            "text": text,
                            "source": source
                        },
                        vector=vector
                    )
                    success_count += 1
                except Exception as e:
                    print(f"添加文档 {i} 失败: {e}")
                    continue
                
                # 打印进度
                if (i + 1) % 100 == 0 or i == len(texts) - 1:
                    print(f"处理进度: {i+1}/{len(texts)} 文档")
        
        print(f"成功添加 {success_count}/{len(texts)} 个文档")
        return success_count
    
    def get_document_by_id(self, doc_id):
        """根据ID获取文档
        
        Args:
            doc_id: 文档ID
            
        Returns:
            文档对象
        """
        try:
            result = self.collection.query.fetch_object_by_id(doc_id)
            return result
        except Exception as e:
            print(f"获取文档失败: {e}")
            return None
    
    def get_documents_by_source(self, source, limit=100):
        """根据来源获取文档
        
        Args:
            source: 文档来源
            limit: 最大返回数量
            
        Returns:
            文档对象列表
        """
        try:
            results = self.collection.query.fetch_objects(
                filters={
                    "path": ["source"],
                    "operator": "Equal",
                    "valueText": source
                },
                limit=limit
            )
            return results.objects
        except Exception as e:
            print(f"获取文档失败: {e}")
            return []
    
    def search_documents(self, query, limit=10, search_type="hybrid", alpha=ALPHA):
        """搜索文档
        
        Args:
            query: 查询文本
            limit: 最大返回数量
            search_type: 搜索类型，可选值为"hybrid"(混合)、"semantic"(语义)、"keyword"(关键词)
            alpha: 混合搜索中的权重参数，0表示纯关键词，1表示纯语义
            
        Returns:
            文档对象列表
        """
        try:
            search_start_time = time.time()
            print(f"执行{search_type}搜索: {query}，最大返回 {limit} 个结果")
            
            # 加载嵌入模型（如果尚未加载）
            if not self.embedding_model and search_type in ["hybrid", "semantic"]:
                print("加载嵌入模型...")
                self.load_embedding_model()
            
            # 生成查询向量
            query_vector = None
            if self.embedding_model and search_type in ["hybrid", "semantic"]:
                try:
                    print("生成查询向量...")
                    query_vector = self.embedding_model.encode(query, normalize_embeddings=True).tolist()
                    print(f"查询向量维度: {len(query_vector)}")
                except Exception as e:
                    print(f"生成查询向量失败: {e}")
                    if search_type in ["hybrid", "semantic"]:
                        print("无法执行语义搜索，将使用关键词搜索")
                        search_type = "keyword"
            elif search_type in ["hybrid", "semantic"]:
                print("未加载嵌入模型，无法执行语义搜索，将使用关键词搜索")
                search_type = "keyword"
            
            # 执行搜索
            if search_type == "hybrid" and query_vector:
                print(f"执行混合搜索: query={query}, alpha={alpha}, limit={limit}")
                results = self.collection.query.hybrid(
                    query=query,
                    vector=query_vector,
                    alpha=alpha,
                    limit=limit
                )
            elif search_type == "semantic" and query_vector:
                print(f"执行语义搜索: limit={limit}")
                results = self.collection.query.near_vector(
                    vector=query_vector,
                    limit=limit
                )
            else:
                # 默认使用关键词搜索
                print(f"执行关键词搜索: query={query}, limit={limit}")
                results = self.collection.query.bm25(
                    query=query,
                    limit=limit
                )
            
            search_end_time = time.time()
            search_elapsed = search_end_time - search_start_time
            print(f"搜索执行时间: {search_elapsed:.3f}秒")
            
            # 检查结果
            if hasattr(results, 'objects') and results.objects:
                count = len(results.objects)
                print(f"搜索找到 {count} 个结果")
                
                # 为每个结果计算/验证得分
                if self.embedding_model and query != "*":
                    # 为所有文档计算向量
                    docs_to_compute = []
                    docs_without_score = []
                    
                    for i, obj in enumerate(results.objects):
                        # 检查是否已有得分
                        has_score = (hasattr(obj.metadata, "score") and 
                                    obj.metadata.score is not None and 
                                    isinstance(obj.metadata.score, (int, float)))
                        
                        if not has_score:
                            # 收集没有得分的文档
                            docs_without_score.append(i)
                            docs_to_compute.append(obj.properties.get("text", ""))
                    
                    # 如果有需要计算得分的文档
                    if docs_to_compute:
                        print(f"为 {len(docs_to_compute)} 个没有得分的文档计算相似度得分")
                        try:
                            # 计算文档向量
                            doc_embeddings = self.embedding_model.encode(docs_to_compute, normalize_embeddings=True)
                            
                            # 计算查询向量（如果尚未计算）
                            if not isinstance(query_vector, list):
                                query_vector = self.embedding_model.encode(query, normalize_embeddings=True).tolist()
                                
                            # 计算相似度得分
                            for i, idx in enumerate(docs_without_score):
                                doc_vec = doc_embeddings[i]
                                # 计算余弦相似度
                                sim_score = float(torch.tensor(query_vector).dot(torch.tensor(doc_vec)).item())
                                # 确保得分在0-1之间
                                sim_score = max(0.0, min(1.0, sim_score))
                                # 为文档添加得分
                                results.objects[idx].metadata.score = sim_score
                                print(f"文档 {idx} 计算得分: {sim_score:.4f}")
                        except Exception as e:
                            print(f"计算文档相似度得分失败: {e}")
                            # 为没有得分的文档设置默认得分
                            for idx in docs_without_score:
                                results.objects[idx].metadata.score = 0.5
                                print(f"文档 {idx} 设置默认得分: 0.5")
                
                # 打印前3个结果的信息
                for i, obj in enumerate(results.objects[:3]):
                    text_preview = obj.properties.get("text", "")[:50]
                    source = obj.properties.get("source", "未知")
                    score = obj.metadata.score if hasattr(obj.metadata, "score") else "无"
                    print(f"结果 {i+1}: [源: {source}] [分数: {score}] {text_preview}...")
                
                return results.objects
            else:
                print(f"搜索结果为空或格式异常: {type(results)}")
                return []
            
        except Exception as e:
            print(f"搜索文档失败: {e}")
            traceback.print_exc()
            return []
    
    def update_document(self, doc_id, text=None, source=None, vector=None):
        """更新文档
        
        Args:
            doc_id: 文档ID
            text: 新的文档文本，如果为None则不更新
            source: 新的文档来源，如果为None则不更新
            vector: 新的文档向量，如果为None则不更新
            
        Returns:
            是否成功
        """
        try:
            # 准备更新的属性
            properties = {}
            if text is not None:
                properties["text"] = text
            if source is not None:
                properties["source"] = source
            
            # 如果提供了新文本但没有提供向量，则生成向量
            if text is not None and vector is None and self.embedding_model:
                vector = self.embedding_model.encode(text, normalize_embeddings=True).tolist()
            
            # 更新文档
            if properties:
                self.collection.data.update(
                    uuid=doc_id,
                    properties=properties
                )
            
            # 更新向量
            if vector is not None:
                # 确保vector是列表
                if hasattr(vector, 'tolist'):
                    vector = vector.tolist()
                elif not isinstance(vector, list):
                    vector = list(vector)
                
                self.collection.data.update(
                    uuid=doc_id,
                    vector=vector
                )
            
            print(f"更新文档成功: {doc_id}")
            return True
        except Exception as e:
            print(f"更新文档失败: {e}")
            traceback.print_exc()
            return False
    
    def delete_document(self, doc_id):
        """删除文档
        
        Args:
            doc_id: 文档ID
            
        Returns:
            是否成功
        """
        try:
            self.collection.data.delete_by_id(doc_id)
            print(f"删除文档成功: {doc_id}")
            return True
        except Exception as e:
            print(f"删除文档失败: {e}")
            return False
    
    def delete_documents_by_source(self, source):
        """删除指定来源的所有文档
        
        Args:
            source: 文档来源
            
        Returns:
            删除的文档数量
        """
        try:
            # 先查询匹配的文档
            results = self.collection.query.fetch_objects(
                filters={
                    "path": ["source"],
                    "operator": "Equal",
                    "valueText": source
                },
                limit=10000  # 设置一个较大的限制以获取所有匹配的文档
            )
            
            if not results.objects:
                print(f"没有找到来源为 {source} 的文档")
                return 0
            
            # 获取所有文档ID
            doc_ids = [obj.uuid for obj in results.objects]
            count = len(doc_ids)
            
            # 分批删除
            batch_size = 100
            for i in range(0, count, batch_size):
                batch_ids = doc_ids[i:i+batch_size]
                self.collection.data.delete_many(batch_ids)
                print(f"已删除 {i+len(batch_ids)}/{count} 个文档")
            
            print(f"成功删除 {count} 个来源为 {source} 的文档")
            return count
        except Exception as e:
            print(f"删除文档失败: {e}")
            traceback.print_exc()
            return 0
    
    def count_documents(self, source=None):
        """统计文档数量
        
        Args:
            source: 文档来源，如果为None则统计所有文档
            
        Returns:
            文档数量
        """
        try:
            if source:
                # 统计指定来源的文档数量
                count = self.collection.aggregate.over_all(
                    filters={
                        "path": ["source"],
                        "operator": "Equal",
                        "valueText": source
                    }
                ).total_count
            else:
                # 统计所有文档数量
                count = self.collection.aggregate.over_all().total_count
            
            return count
        except Exception as e:
            print(f"统计文档数量失败: {e}")
            return 0
    
    def list_sources(self, limit=1000):
        """列出所有文档来源
        
        Args:
            limit: 最大返回数量
            
        Returns:
            来源列表
        """
        try:
            # 使用聚合查询获取所有唯一的source值
            results = self.collection.aggregate.over("source").with_limit(limit).with_fields("source").do()
            
            # 提取source值
            sources = []
            for group in results:
                if hasattr(group, "groupedBy") and group.groupedBy.get("value"):
                    sources.append(group.groupedBy.get("value"))
            
            return sources
        except Exception as e:
            print(f"列出文档来源失败: {e}")
            traceback.print_exc()
            return []
    
    def export_documents(self, output_file, source=None, limit=10000):
        """导出文档到JSON文件
        
        Args:
            output_file: 输出文件路径
            source: 文档来源，如果为None则导出所有文档
            limit: 最大导出数量
            
        Returns:
            导出的文档数量
        """
        try:
            # 查询文档
            if source:
                results = self.collection.query.fetch_objects(
                    filters={
                        "path": ["source"],
                        "operator": "Equal",
                        "valueText": source
                    },
                    limit=limit
                )
            else:
                results = self.collection.query.fetch_objects(limit=limit)
            
            if not results.objects:
                print("没有找到要导出的文档")
                return 0
            
            # 准备导出数据
            export_data = []
            for obj in results.objects:
                export_data.append({
                    "id": obj.uuid,
                    "text": obj.properties.get("text", ""),
                    "source": obj.properties.get("source", ""),
                    "vector": obj.vector if hasattr(obj, "vector") else None
                })
            
            # 写入文件
            with open(output_file, "w", encoding="utf-8") as f:
                json.dump(export_data, f, ensure_ascii=False, indent=2)
            
            count = len(export_data)
            print(f"成功导出 {count} 个文档到 {output_file}")
            return count
        except Exception as e:
            print(f"导出文档失败: {e}")
            traceback.print_exc()
            return 0
    
    def import_documents(self, input_file):
        """从JSON文件导入文档
        
        Args:
            input_file: 输入文件路径
            
        Returns:
            导入的文档数量
        """
        try:
            # 读取文件
            with open(input_file, "r", encoding="utf-8") as f:
                import_data = json.load(f)
            
            if not import_data:
                print("没有找到要导入的文档")
                return 0
            
            # 准备导入数据
            texts = []
            sources = []
            vectors = []
            
            for item in import_data:
                texts.append(item.get("text", ""))
                sources.append(item.get("source", ""))
                if "vector" in item and item["vector"]:
                    vectors.append(item["vector"])
                else:
                    vectors.append(None)
            
            # 导入文档
            if all(v is not None for v in vectors):
                # 如果所有向量都存在，直接使用
                count = self.add_documents(texts, sources, vectors)
            else:
                # 否则重新生成向量
                count = self.add_documents(texts, sources)
            
            print(f"成功导入 {count} 个文档从 {input_file}")
            return count
        except Exception as e:
            print(f"导入文档失败: {e}")
            traceback.print_exc()
            return 0
    
    def close(self):
        """关闭连接"""
        if self.client:
            try:
                self.client.close()
                print("已关闭Weaviate连接")
            except:
                pass
            self.client = None

def main_document_manager():
    """文档管理器主函数"""
    print("启动Weaviate文档管理器...")
    
    # 连接Weaviate
    client = connect_to_weaviate(WEAVIATE_URL)
    
    # 创建文档管理器
    manager = WeaviateDocumentManager(client)
    
    try:
        while True:
            print("\n=== Weaviate文档管理器 ===")
            print("1. 添加文档")
            print("2. 搜索文档")
            print("3. 更新文档")
            print("4. 删除文档")
            print("5. 统计文档数量")
            print("6. 列出文档来源")
            print("7. 导出文档")
            print("8. 导入文档")
            print("9. 退出")
            
            choice = input("\n请选择操作 (1-9): ")
            
            if choice == "1":
                # 添加文档
                text = input("请输入文档内容: ")
                source = input("请输入文档来源: ")
                manager.add_document(text, source)
            
            elif choice == "2":
                # 搜索文档
                query = input("请输入搜索内容: ")
                search_type = input("请选择搜索类型 (hybrid/semantic/keyword，默认hybrid): ") or "hybrid"
                limit = int(input("请输入返回结果数量 (默认10): ") or "10")
                
                results = manager.search_documents(query, limit, search_type)
                
                print(f"\n找到 {len(results)} 个结果:")
                for i, obj in enumerate(results):
                    print(f"{i+1}. ID: {obj.uuid}")
                    print(f"   来源: {obj.properties.get('source', '未知')}")
                    text = obj.properties.get("text", "")
                    print(f"   内容: {text[:100]}..." if len(text) > 100 else f"   内容: {text}")
                    if hasattr(obj.metadata, "score"):
                        print(f"   分数: {obj.metadata.score}")
                    print()
            
            elif choice == "3":
                # 更新文档
                doc_id = input("请输入要更新的文档ID: ")
                text = input("请输入新的文档内容 (留空表示不更新): ")
                source = input("请输入新的文档来源 (留空表示不更新): ")
                
                text = text if text else None
                source = source if source else None
                
                manager.update_document(doc_id, text, source)
            
            elif choice == "4":
                # 删除文档
                print("删除选项:")
                print("1. 按ID删除")
                print("2. 按来源删除")
                
                delete_choice = input("请选择删除方式 (1-2): ")
                
                if delete_choice == "1":
                    doc_id = input("请输入要删除的文档ID: ")
                    manager.delete_document(doc_id)
                elif delete_choice == "2":
                    source = input("请输入要删除的文档来源: ")
                    confirm = input(f"确定要删除所有来源为 '{source}' 的文档吗? (y/n): ")
                    if confirm.lower() == "y":
                        manager.delete_documents_by_source(source)
                else:
                    print("无效的选择")
            
            elif choice == "5":
                # 统计文档数量
                source = input("请输入要统计的文档来源 (留空表示统计所有): ")
                count = manager.count_documents(source if source else None)
                if source:
                    print(f"来源为 '{source}' 的文档数量: {count}")
                else:
                    print(f"总文档数量: {count}")
            
            elif choice == "6":
                # 列出文档来源
                sources = manager.list_sources()
                print(f"\n找到 {len(sources)} 个文档来源:")
                for i, source in enumerate(sources):
                    count = manager.count_documents(source)
                    print(f"{i+1}. {source} ({count} 个文档)")
            
            elif choice == "7":
                # 导出文档
                output_file = input("请输入导出文件路径: ")
                source = input("请输入要导出的文档来源 (留空表示导出所有): ")
                limit = int(input("请输入最大导出数量 (默认10000): ") or "10000")
                
                manager.export_documents(output_file, source if source else None, limit)
            
            elif choice == "8":
                # 导入文档
                input_file = input("请输入导入文件路径: ")
                manager.import_documents(input_file)
            
            elif choice == "9":
                # 退出
                print("正在退出...")
                break
            
            else:
                print("无效的选择，请重试")
    
    finally:
        # 关闭连接
        manager.close()
        print("程序已退出")

if __name__ == "__main__":
    # 显示菜单选择功能
    print("=== Weaviate向量数据库工具 ===")
    print("1. 运行原始功能 (文档索引与搜索)")
    print("2. 运行文档管理器 (增删改查)")
    
    choice = input("\n请选择功能 (1-2): ")
    
    if choice == "2":
        main_document_manager()
    else:
        # 默认使用原始主函数
        main() 