import torch
import os
import re
from datetime import datetime

# 文件和路径配置
DATA_DIR = "data"  # 数据文件目录
os.makedirs(DATA_DIR, exist_ok=True)

# 文件加载配置
DEFAULT_FOLDER = "default"  # 默认处理的文件夹
PROCESS_ALL_FOLDERS = True  # 是否处理所有文件夹
FORCE_REINDEX = False  # 避免重复索引文档

# 数据处理配置
VECTORIZE_ON_STARTUP = True  # 启动时是否向量化文档
VECTORIZE_ON_CHANGE = True    # 文件变化时是否更新向量
ASYNC_VECTORIZATION = True    # 是否异步进行向量化
MAX_WORKERS = 4               # 并行处理的最大工作线程数

# 文件删除配置
AUTO_DELETE_INDEX = True      # 文件删除后自动删除对应索引
CHECK_DELETED_FILES = True    # 启动时检查已删除文件

# 文件监控配置
FILE_MONITORING = True        # 是否启用文件监控
FILE_MONITOR_RECURSIVE = True # 是否递归监控子目录

# 模型目录配置
MODELS_DIR = "models"  # 本地模型存储目录
os.makedirs(MODELS_DIR, exist_ok=True)

# HuggingFace 环境配置
HF_ENDPOINT = "https://hf-mirror.com"  # 使用镜像站点
HF_HUB_DISABLE_XET = "1"  # 禁用 XET

# 设置环境变量
os.environ["HF_ENDPOINT"] = HF_ENDPOINT
os.environ["HF_HUB_DISABLE_XET"] = HF_HUB_DISABLE_XET

# 文档索引配置
DOCUMENT_HASH_FILE = os.path.join(DATA_DIR, "document_hashes.json")
HASH_ALGORITHM = "sha256"
RECURSIVE_DIRECTORY_SCAN = True
SUPPORTED_FILE_EXTENSIONS = [".txt", ".epub"]
COLLECTION_PREFIX = "Doc_"

# Weaviate 连接配置
WEAVIATE_HOST = "localhost"
WEAVIATE_PORT = 8080
WEAVIATE_GRPC_PORT = 50051
WEAVIATE_URL = f"http://{WEAVIATE_HOST}:{WEAVIATE_PORT}"

# 集合名称和配置
COLLECTION_NAME = "Document"
BATCH_SIZE = 100
CHUNK_SIZE = 1000  # 文本分块大小
CHUNK_OVERLAP = 200  # 分块重叠大小

# 模型配置
EMBEDDING_CONFIG = {
    "model_id": "Qwen/Qwen3-Embedding-0.6B",
    "local_model_path":"D:/ProjectWorkspace/shellMakefileDeploymentProjects/windowsDeploymentProjects/hfd-download-project/models/Qwen3-Embedding-0.6B",
    "use_flash_attention": False,
    "use_cross_encoder": False
}

RERANKER_CONFIG = {
    "model_id": "Qwen/Qwen3-Reranker-0.6B", 
    "local_model_path": "D:/ProjectWorkspace/shellMakefileDeploymentProjects/windowsDeploymentProjects/hfd-download-project/models/Qwen3-Reranker-0.6B",
    "use_flash_attention": False,
    "use_cross_encoder": False,
    "use_fp16": False,
    "max_length": 8192
}

# 模型路径配置
DEFAULT_MODEL_PATHS = {
    "embedding": [
        EMBEDDING_CONFIG["local_model_path"],  # 优先使用本地模型
        EMBEDDING_CONFIG["model_id"]  # 备用在线模型
    ],
    "reranker": [
        RERANKER_CONFIG["local_model_path"],  # 优先使用本地模型
        RERANKER_CONFIG["model_id"]  # 备用在线模型
    ],
    "fallback_embedding": [
        "paraphrase-multilingual-MiniLM-L12-v2",  # 备用小型多语言模型
        "all-MiniLM-L6-v2"  # 备用小型英文模型
    ],
    "fallback_reranker": [
        "cross-encoder/ms-marco-MiniLM-L-6-v2",  # 备用小型重排序模型
    ]
}

# 使用外部模型进行向量化
EMBEDDING_MODEL_NAME = "Qwen/Qwen3-Embedding-0.6B"
RERANKER_MODEL_NAME = "Qwen/Qwen3-Reranker-0.6B"

# 模型参数
EMBEDDING_MODEL_KWARGS = {
    "device_map": "auto"
}
EMBEDDING_TOKENIZER_KWARGS = {
    "padding_side": "left"
}

# Reranker模型参数
RERANKER_TOKENIZER_KWARGS = {
    "padding": True,
    "truncation": True,
    "max_length": 512,  # 减小长度以避免OOM
    "padding_side": "right"
}
RERANKER_MODEL_KWARGS = {
    "device_map": "auto"
}

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

# 混合搜索参数
ALPHA = 0.5  # 平衡语义和关键词（0=纯关键词，1=纯语义）
QUERY_LIMIT = 10
USE_RERANKER = False  # 默认关闭重排序
PRELOAD_MODELS = False  # 默认不预加载模型

# 查询示例
QUERY = "中国的首都是哪里？"
QUERY_INSTRUCTION = "Given a web search query, retrieve relevant passages that answer the query"

# 示例文档
SAMPLE_DOCUMENTS = [
    "中国的首都是北京。",
    "重力是两个物体相互吸引的力。",
    "Python 是一种广泛用于 AI 和 Web 开发的编程语言。",
    "北京是中国的文化和政治中心。"
]

# 重排序优化参数
EXACT_MATCH_BONUS = 1.5  # 精确匹配加成
KEYWORD_MATCH_BONUS = 1.2  # 关键词匹配加成
FIRST_SENTENCE_WEIGHT = 1.3  # 首句权重（首句含答案的加权）

# 关键词提取配置
QUESTION_PATTERNS = {
    r'谁是|谁|何人': ['人物'],
    r'哪里|什么地方|何处|在哪': ['地点', '位置'],
    r'什么时候|何时|几时': ['时间'],
    r'为什么|为何|何故': ['原因'],
    r'如何|怎么|怎样': ['方法'],
    r'是什么|什么是': ['定义'],
    r'多少|几个': ['数量']
}
