import logging
import tomli
import json
from pathlib import Path
from typing import Any, Dict, List
from urllib.parse import urlparse

from .utils import save_json_file

GROUPS_CONFIG_DIR = Path("config/groups")
OUTPUT_DIR = Path("data/output")

def _load_group_configs() -> Dict[str, List[str]]:
    """加载所有分组配置文件"""
    keyword_groups = {}
    try:
        for config_file in GROUPS_CONFIG_DIR.glob("*.toml"):
            with open(config_file, "rb") as f:
                data = tomli.load(f)
                for group_name, keywords in data.items():
                    if isinstance(keywords, list):
                        keyword_groups[group_name] = keywords
    except Exception as e:
        logging.error(f"加载分组配置时出错: {e}")
    return keyword_groups

def _get_domain(url: str) -> str:
    """从URL中提取域名"""
    try:
        return urlparse(url).netloc
    except Exception:
        return ""

def _evaluate_source_quality(source: Dict[str, Any]) -> int:
    """评估书源规则的完善度"""
    score = 0
    # 基础规则存在性
    for rule in ["ruleSearch", "ruleToc", "ruleContent"]:
        if source.get(rule):
            score += 1
    
    # 关键子字段检查
    if source.get("ruleSearch"):
        score += sum(1 for field in ["bookList", "bookUrl"] if field in source["ruleSearch"])
    
    if source.get("ruleToc"):
        score += sum(1 for field in ["chapterList", "chapterUrl"] if field in source["ruleToc"])
        
    return score

def _is_source_rule_valid(source: Dict[str, Any]) -> bool:
    """校验书源核心规则是否有效"""
    if not source.get("bookSourceName") or not source.get("bookSourceUrl"):
        return False
    return True

def process_sources(sources: List[Dict[str, Any]]):
    """
    处理和分类有效书源。
    - 过滤无效书源
    - 按URL分组，去重并保留最优的3个
    - 排序、重新分组和分类输出
    """
    logging.info(f"开始处理 {len(sources)} 个书源...")

    # 1. 过滤规则无效的书源
    valid_sources = [s for s in sources if _is_source_rule_valid(s)]
    logging.info(f"过滤无效规则后剩余 {len(valid_sources)} 个书源。")

    # 2. 按 bookSourceUrl 分组
    grouped_by_url: Dict[str, List[Dict[str, Any]]] = {}
    for source in valid_sources:
        url = source["bookSourceUrl"]
        if url not in grouped_by_url:
            grouped_by_url[url] = []
        grouped_by_url[url].append(source)
    
    # 3. 对每个分组进行去重、排序和筛选
    final_sources = []
    for url, group_sources in grouped_by_url.items():
        # 3.1. 在分组内通过内容指纹去重
        unique_in_group = {}
        for source in group_sources:
            fingerprint = (
                json.dumps(source.get("ruleSearch", {}), sort_keys=True),
                json.dumps(source.get("ruleContent", {}), sort_keys=True),
                json.dumps(source.get("ruleToc", {}), sort_keys=True)
            )
            if fingerprint not in unique_in_group:
                unique_in_group[fingerprint] = source
            else:
                # 如果指纹重复，保留质量更高或带"官方"标识的
                current_source = unique_in_group[fingerprint]
                current_is_official = "官方" in current_source.get("bookSourceName", "")
                new_is_official = "官方" in source.get("bookSourceName", "")
                
                if new_is_official and not current_is_official:
                    unique_in_group[fingerprint] = source
                    continue
                if not new_is_official and current_is_official:
                    continue

                if _evaluate_source_quality(source) > _evaluate_source_quality(current_source):
                    unique_in_group[fingerprint] = source
        
        deduped_sources = list(unique_in_group.values())

        # 3.2. 排序：官方优先，然后按质量分
        deduped_sources.sort(
            key=lambda s: (
                "官方" in s.get("bookSourceName", ""), 
                _evaluate_source_quality(s)
            ),
            reverse=True
        )
        
        # 3.3. 每个URL最多保留3个
        final_sources.extend(deduped_sources[:3])

    sources_list = final_sources
    logging.info(f"去重和筛选后剩余 {len(sources_list)} 个书源。")

    # 3. 排序 (按域名)
    sources_list.sort(key=lambda s: _get_domain(s.get("bookSourceUrl", "")))
    logging.info("书源已按域名排序。")

    # 4. 重新分组
    keyword_groups = _load_group_configs()
    for source in sources_list:
        source_name = source.get("bookSourceName", "")
        source_group = source.get("bookSourceGroup", "")
        
        new_group = "其他" # 默认分组
        for group, keywords in keyword_groups.items():
            if any(keyword in source_name for keyword in keywords) or \
               any(keyword in source_group for keyword in keywords):
                new_group = group
                break
        source["bookSourceGroup"] = new_group
    logging.info("书源已重新分组。")

    # 5. 分类输出 (18禁和非18禁)
    sources_18 = []
    sources_17 = []
    for source in sources_list:
        group = source.get("bookSourceGroup", "")
        if "18" in group or "禁" in group or "NSFW" in group:
            sources_18.append(source)
        else:
            sources_17.append(source)
            
    save_json_file(OUTPUT_DIR / "sources_18_plus.json", sources_18)
    save_json_file(OUTPUT_DIR / "sources_clean.json", sources_17)

    logging.info("处理完成，已将书源分类保存到输出目录。")
