import json
import logging
import os
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from bson import ObjectId
from pymongo import ASCENDING
from tqdm import tqdm
from entity.function_candidate import FunctionCandidate
from entity.function_result import FunctionResult
from config.config import MatchType, Config
from config.log_config import get_logger
from matching.single_match import find_best_match_for_target
from utils.mongo_utils import *

# 初始化日志
logger = get_logger(logging.INFO)


def save_interim_results(data: dict, path: str):
    """
    保存中间结果，处理 set 的序列化问题。
    """
    data_to_save = data.copy()
    if 'libraries_processed' in data_to_save and isinstance(data_to_save['libraries_processed'], set):
        data_to_save['libraries_processed'] = list(data_to_save['libraries_processed'])
    try:
        with open(path, 'w', encoding='utf-8') as f:
            json.dump(data_to_save, f, indent=2, ensure_ascii=False)
    except Exception as e:
        logging.error(f"保存中间结果到 {path} 失败: {e}")


def load_interim_results(path: str) -> dict:
    """
    加载中间结果，将 libraries_processed 从 list 转换回 set。
    """
    if not os.path.exists(path):
        # 返回一个空字典，让调用方处理默认值
        return {}
    try:
        with open(path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        if 'libraries_processed' in data and isinstance(data['libraries_processed'], list):
            data['libraries_processed'] = set(data['libraries_processed'])
        return data
    except Exception as e:
        logging.warning(f"加载中间结果失败: {e}，将使用新统计。")
        # 返回一个空字典，让调用方处理默认值
        return {}


def get_result_info(arkts_id, arkts_lib, arkts_version, function_candidate: FunctionCandidate):
    npm_functions = get_npm_collection()
    try:
        doc = npm_functions.find_one(
            {"_id": ObjectId(function_candidate.candidate_id)},
            {"name": 1, "library": 1, "version": 1}
        )
        if not doc:
            logging.warning(f"未找到候选函数 {function_candidate.candidate_id} 的详细信息。")
            return None

        result = FunctionResult()
        fc = function_candidate
        result.npm_name = doc['name']
        result.npm_library = doc['library']
        version = doc['version']
        result.npm_version = ', '.join(str(v) for v in version) if isinstance(version, list) else str(version)
        result.processing_time = fc.processing_time
        result.match_method = fc.match_method
        result.distance = fc.distance
        result.isomorphic_ratio = fc.isomorphic_ratio
        result.vector_similarity = fc.vector_similarity
        result.diff_similarity = fc.diff_similarity
        result.arkts_id = arkts_id
        result.arkts_library = arkts_lib
        result.npm_id = fc.candidate_id
        result.arkts_version = arkts_version
    except Exception as e:
        logging.error(f"获取结果信息时出错: {e}")
        return None

    return result


def process_arkts_collection(
        out_dir_base="./matching_results",
        max_workers=Config.MAX_WORKERS,
        batch_size_per_library=200,
):
    arkts_collection = get_arkts_collection()
    result_collection = get_result_collection()

    Path(out_dir_base).mkdir(parents=True, exist_ok=True)
    interim_results_path = f"{out_dir_base}/matching_interim_results.json"

    # === 1. 加载已完成的 arkts_id（用于断点续传）===
    completed_arkts_ids = set()
    try:
        cursor = result_collection.find({}, {'arkts_id': 1, '_id': 0})
        completed_arkts_ids = {doc['arkts_id'] for doc in cursor}
        logging.info(f"从 MongoDB 检测到 {len(completed_arkts_ids)} 个已完成任务。")
    except Exception as e:
        logging.warning(f"查询已完成任务失败: {e}")

    # === 2. 定义默认结构并加载/合并中间统计 ===
    DEFAULT_MATCHING_RESULTS = {
        'total_arkts_functions': 0,
        'processed_count': 0,
        'error_count': 0,
        'libraries_processed': set(),
        'current_library': None,
        'current_library_version': None,
        'current_library_progress': 0,
    }

    loaded_results = load_interim_results(interim_results_path)
    # 合并默认值和加载的值，确保所有键都存在
    matching_results = DEFAULT_MATCHING_RESULTS.copy()
    matching_results.update(loaded_results)
    logging.info("中间统计结果已加载或初始化。")

    # === 3. 获取所有待处理的 library 名称 ===
    pipeline = [
        {"$match": {
            "ast_feature": {"$exists": True},
            "_id": {"$nin": [ObjectId(oid) for oid in completed_arkts_ids]}
        }},
        {"$group": {
            "_id": {
                "library": "$library",
                "version": {"$ifNull": ["$version", ""]}
            },
            "count": {"$sum": 1}
        }},
        {"$sort": {"count": 1}}
    ]

    try:
        library_stats = list(arkts_collection.aggregate(pipeline))
        logging.info(f"共 {len(library_stats)} 个待处理 library。")
    except Exception as e:
        logging.critical(f"获取 library 列表失败: {e}")
        return

    total_pending = sum(item['count'] for item in library_stats)
    total_tasks = len(completed_arkts_ids) + total_pending
    matching_results['total_arkts_functions'] = total_tasks

    logging.info(f"总计需处理函数数: {matching_results['total_arkts_functions']}")

    initial_completed = len(completed_arkts_ids)
    pbar = tqdm(
        total=total_tasks,
        initial=initial_completed,
        desc="三方库比较进度",
        unit="func",
        file=sys.stderr,
        ncols=100,
        bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]",
    )

    total_libs = len(library_stats)
    pbar.set_postfix_str(
        f"Waiting...\n"
        f"Libs: 0/{total_libs}",
        refresh=True
    )

    if matching_results.get('processed_count', 0) > 0:
        already_processed_in_run = matching_results['processed_count']
        pbar.update(already_processed_in_run)

    # === 4. 按 library 逐个处理 ===
    for item in library_stats:
        group_key = item['_id'] or 'unknown'
        library_name = group_key.get('library', 'unknown')
        arkts_version = group_key.get('version', '')
        lib_func_count = item['count']
        lib_version_key = (library_name, arkts_version)

        if lib_version_key in matching_results['libraries_processed']:
            processed_libs = len(matching_results['libraries_processed'])
            pbar.set_postfix_str(
                f"Skipping: {library_name}@{arkts_version}\n"
                f"Groups: {processed_libs}/{total_libs}",
                refresh=True
            )
            logging.info(f"Library '{library_name}@{arkts_version}' 已完成，跳过。")
            continue

        current_processed_libs = len(matching_results['libraries_processed'])
        remaining_libs = total_libs - current_processed_libs
        pbar.set_postfix_str(
            f"Current: {library_name}@{arkts_version} ({lib_func_count} funcs)\n"
            f"Libs: {current_processed_libs}/{total_libs} (Rem: {remaining_libs})",
            refresh=True
        )
        logger.debug(f"开始处理 Library: '{library_name}@{arkts_version}' ({item['count']} 个函数)")
        matching_results['current_library'] = library_name
        matching_results['current_library_version'] = arkts_version
        matching_results['current_library_progress'] = 0

        skip = 0
        total_in_lib = item['count']

        while skip < total_in_lib:
            docs = list(arkts_collection.find(
                {
                    "library": library_name,
                    "version": arkts_version,
                    "ast_feature": {"$exists": True},
                    "_id": {"$nin": [ObjectId(oid) for oid in completed_arkts_ids]}
                },
                {"_id": 1},
                skip=skip,
                limit=batch_size_per_library
            ))

            if not docs:
                break

            arkts_ids = [str(doc["_id"]) for doc in docs]

            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                future_to_id = {
                    executor.submit(find_best_match_for_target, aid, MatchType.TPL_MATCH): aid
                    for aid in arkts_ids
                }

                for future in as_completed(future_to_id):
                    arkts_id = future_to_id[future]
                    try:
                        result = future.result()
                        if result is None:
                            continue
                        best_match, all_matches = result
                        if best_match:
                            result_info = get_result_info(arkts_id, library_name, arkts_version, best_match)
                            if result_info:
                                result_collection.insert_one(result_info.to_dict())
                                matching_results['processed_count'] += 1
                            else:
                                matching_results['error_count'] += 1
                        else:
                            matching_results['error_count'] += 1
                    except Exception as exc:
                        matching_results['error_count'] += 1
                        logging.error(f"处理函数 {arkts_id} 时出错: {exc}")
                    pbar.update(1)
                    matching_results['current_library_progress'] += 1

            skip += len(arkts_ids)
            save_interim_results(matching_results, interim_results_path)

        matching_results['libraries_processed'].add(lib_version_key)
        matching_results['current_library'] = None
        matching_results['current_library_version'] = None
        matching_results['current_library_progress'] = 0
        save_interim_results(matching_results, interim_results_path)
        logger.debug(f"✅ Library '{library_name}@{arkts_version}' 处理完成。")

    pbar.set_postfix_str(
        f"All Done!\n"
        f"Libs: {total_libs}/{total_libs} | Total Errors: {matching_results['error_count']}",
        refresh=True
    )
    pbar.close()

    # === 5. 保存最终统计 ===
    final_stats_file = f"{out_dir_base}/final_matching_statistics.json"
    try:
        final_data = matching_results.copy()
        if 'libraries_processed' in final_data and isinstance(final_data['libraries_processed'], set):
            final_data['libraries_processed'] = list(final_data['libraries_processed'])

        Path(final_stats_file).write_text(
            json.dumps(final_data, indent=2, ensure_ascii=False)
        )
        logging.info(f"✅ 最终统计已保存至: {final_stats_file}")
    except Exception as e:
        logging.error(f"保存最终统计失败: {e}")


def add_fixed_indexes(collection):
    indexes_to_add = [
        ("name", ASCENDING),
        ("library", ASCENDING),
        ("ast_feature", ASCENDING),
        ("vector", ASCENDING),
        ("version", ASCENDING)
    ]

    for field, index_type in indexes_to_add:
        existing_indexes = collection.list_indexes()
        index_exists = any(index['key'].get(field) == index_type for index in existing_indexes)

        if not index_exists:
            if index_type == "text":
                collection.create_index([(field, "text")])
            else:
                collection.create_index([(field, index_type)])
            logging.info(f"Index created for field '{field}' with type '{index_type}'.")
        else:
            logging.info(f"Index already exists for field '{field}' with type '{index_type}'.")


def process_arkts_libraries():
    npm = get_npm_collection()
    add_fixed_indexes(npm)
    process_arkts_collection(
        out_dir_base=f"./matching_results",
        max_workers=8,
        batch_size_per_library=100,
    )
