# -*- coding: utf-8 -*-
"""
简单配置模块

提供基本的配置常量，便于新手理解和修改。
"""

import os

# HuggingFace 镜像配置（加速国内下载）
HF_MIRROR_ENDPOINT = "https://hf-mirror.com"
HF_HUB_CACHE = os.path.expanduser("~/.cache/huggingface")
SENTENCE_TRANSFORMERS_CACHE = os.path.expanduser("~/.cache/sentence_transformers")

# 设置环境变量以使用国内镜像（强制立即生效）
os.environ["HF_ENDPOINT"] = HF_MIRROR_ENDPOINT
os.environ["HUGGINGFACE_HUB_CACHE"] = HF_HUB_CACHE
os.environ["SENTENCE_TRANSFORMERS_HOME"] = SENTENCE_TRANSFORMERS_CACHE
os.environ["TRANSFORMERS_CACHE"] = HF_HUB_CACHE
os.environ["HF_HOME"] = HF_HUB_CACHE

# 确保在导入任何transformers或sentence-transformers库之前设置
# 这些额外的变量确保所有HF相关库都使用镜像
os.environ["HUGGINGFACE_CO_URL_HOME"] = HF_MIRROR_ENDPOINT
os.environ["HUGGINGFACE_HUB_URL"] = HF_MIRROR_ENDPOINT
os.environ["HF_HUB_URL"] = HF_MIRROR_ENDPOINT
os.environ["HF_HUB_OFFLINE"] = "0"
os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"

# 为 sentence-transformers 特别设置
os.environ["SENTENCE_TRANSFORMERS_CACHE"] = SENTENCE_TRANSFORMERS_CACHE

# 基本路径配置
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(PROJECT_ROOT, 'questionretrieval', 'io')
QUESTIONS_FILE = os.path.join(DATA_DIR, 'questions.json')

# 模型配置
BERT_CHINESE_MODEL = 'bert-base-chinese'
SENTENCE_TRANSFORMER_MODEL = 'paraphrase-multilingual-MiniLM-L12-v2'

# 缓存配置
CACHE_ENABLED = True
CACHE_DIR = os.path.join(PROJECT_ROOT, '.cache')

# 性能配置
BATCH_SIZE = 32
TOP_K_DEFAULT = 5

# ==================== Rerank 模型配置 ====================

# 默认设备配置
DEFAULT_DEVICE = 'cuda' if os.environ.get('CUDA_VISIBLE_DEVICES') else 'cpu'

# 模型缓存目录
MODELS_CACHE_DIR = os.path.join(PROJECT_ROOT, 'models_cache')
RESULTS_CACHE_DIR = os.path.join(PROJECT_ROOT, 'results_cache')

# BGE Reranker模型配置
BGE_RERANKER_CONFIG = {
    'base': {
        'model_name': 'BAAI/bge-reranker-base',
        'max_length': 512,
        'batch_size': 32,
        'device': DEFAULT_DEVICE,
        'trust_remote_code': True,
        'cache_dir': os.path.join(MODELS_CACHE_DIR, 'bge_reranker_base')
    },
    'large': {
        'model_name': 'BAAI/bge-reranker-large',
        'max_length': 512,
        'batch_size': 16,  # 大模型使用较小的batch size
        'device': DEFAULT_DEVICE,
        'trust_remote_code': True,
        'cache_dir': os.path.join(MODELS_CACHE_DIR, 'bge_reranker_large')
    },
    'v2_m3': {
        'model_name': 'BAAI/bge-reranker-v2-m3',
        'max_length': 512,
        'batch_size': 16,  # v2-m3模型使用较小的batch size
        'device': DEFAULT_DEVICE,
        'trust_remote_code': True,
        'cache_dir': os.path.join(MODELS_CACHE_DIR, 'bge_reranker_v2_m3')
    }
}

# BGE-M3 Embedding模型配置（用于embedding+rerank实验）
BGE_M3_CONFIG = {
    'model_name': 'BAAI/bge-m3',
    'max_length': 8192,
    'batch_size': 32,
    'device': DEFAULT_DEVICE,
    'normalize_embeddings': True,
    'cache_dir': os.path.join(MODELS_CACHE_DIR, 'bge_m3')
}

# Rerank实验配置
RERANK_EXPERIMENT_CONFIG = {
    'embedding_rerank': {
        'top_k_embedding': 50,  # embedding阶段返回的候选数量
        'top_k_rerank': 10,     # rerank阶段返回的结果数量
        'similarity_threshold': 0.0,  # 相似度阈值
        'enable_cache': True,
        'cache_dir': os.path.join(RESULTS_CACHE_DIR, 'reports/rerank/embedding_rerank')
    },
    'rerank_only': {
        'top_k': 10,
        'enable_parallel': True,
        'max_workers': 4,
        'enable_cache': True,
        'cache_dir': os.path.join(RESULTS_CACHE_DIR, 'reports/rerank/rerank_only')
    }
}

# 评估指标配置
RERANK_EVALUATION_CONFIG = {
    'metrics': [
        'precision_at_k',
        'recall_at_k', 
        'ndcg_at_k',
        'map_at_k',
        'mrr'
    ],
    'k_values': [1, 3, 5, 10],
    'speed_metrics': [
        'total_time',
        'avg_time_per_query',
        'throughput'
    ]
}

# 可视化配置
RERANK_VISUALIZATION_CONFIG = {
    'figure_size': (12, 8),
    'dpi': 300,
    'style': 'seaborn-v0_8',
    'color_palette': 'Set2',
    'save_formats': ['png', 'pdf'],
    'font_size': 12,
    'title_size': 14,
    'legend_size': 10
}

# 可用模型列表
AVAILABLE_MODELS = {
    'rerankers': {
        'bge_base': 'BAAI/bge-reranker-base',
        'bge_large': 'BAAI/bge-reranker-large',
        'bge_v2_m3': 'BAAI/bge-reranker-v2-m3'
    },
    'embeddings': {
        'bge_m3': 'BAAI/bge-m3',
        'bge_large_zh': 'BAAI/bge-large-zh-v1.5',
        'text2vec': 'shibing624/text2vec-base-chinese'
    }
}

# 推荐配置组合
RERANK_RECOMMENDED_CONFIGS = {
    'quick_test': {
        'models': ['bge_base'],
        'top_k_embedding': 20,
        'top_k_rerank': 5,
        'batch_size': 16
    },
    'full_comparison': {
        'models': ['bge_base', 'bge_large'],
        'top_k_embedding': 50,
        'top_k_rerank': 10,
        'batch_size': 32
    },
    'production': {
        'models': ['bge_base'],
        'top_k_embedding': 100,
        'top_k_rerank': 20,
        'batch_size': 64
    }
}

# ==================== 便捷函数 ====================
def get_questions_file():
    """获取问题文件路径"""
    return QUESTIONS_FILE

def get_cache_dir():
    """获取缓存目录路径"""
    return CACHE_DIR

def is_cache_enabled():
    """检查是否启用缓存"""
    return CACHE_ENABLED

def get_huggingface_mirror():
    """获取HuggingFace镜像地址"""
    return HF_MIRROR_ENDPOINT

# Rerank相关便捷函数
def get_rerank_model_config(model_type: str, model_name: str):
    """获取rerank模型配置"""
    if model_type == 'reranker':
        if model_name in ['bge_base', 'BAAI/bge-reranker-base']:
            return BGE_RERANKER_CONFIG['base'].copy()
        elif model_name in ['bge_large', 'BAAI/bge-reranker-large']:
            return BGE_RERANKER_CONFIG['large'].copy()
        elif model_name in ['bge_v2_m3', 'BAAI/bge-reranker-v2-m3']:
            return BGE_RERANKER_CONFIG['v2_m3'].copy()
    elif model_type == 'embedding':
        if model_name in ['bge_m3', 'BAAI/bge-m3']:
            return BGE_M3_CONFIG.copy()
    
    raise ValueError(f"未知的模型类型或名称: {model_type}/{model_name}")

def get_rerank_experiment_config(experiment_type: str):
    """获取rerank实验配置"""
    if experiment_type in RERANK_EXPERIMENT_CONFIG:
        return RERANK_EXPERIMENT_CONFIG[experiment_type].copy()
    
    raise ValueError(f"未知的实验类型: {experiment_type}")

def get_rerank_recommended_config(config_name: str):
    """获取rerank推荐配置"""
    if config_name in RERANK_RECOMMENDED_CONFIGS:
        return RERANK_RECOMMENDED_CONFIGS[config_name].copy()
    
    raise ValueError(f"未知的推荐配置: {config_name}")

def validate_rerank_config(config: dict, config_type: str) -> bool:
    """验证rerank配置的有效性"""
    required_fields = {
        'reranker': ['model_name', 'device'],
        'embedding': ['model_name', 'device'],
        'experiment': ['top_k'],
        'api': ['api_url']
    }
    
    if config_type in required_fields:
        for field in required_fields[config_type]:
            if field not in config:
                return False
    
    return True