import logging
import os
from pymongo import UpdateOne
from tqdm import tqdm
from typing import Dict, List, Set, Tuple
import csv
from config.config import Config
from config.log_config import get_logger
from utils.mongo_utils import get_result_collection, get_library_info_collection, get_lib_res_collection


logger = get_logger()


def load_library_data():
    """获取结果集中npm_library对应的average_number"""
    result_collection = get_result_collection()
    npm_libraries = result_collection.distinct("npm_library")
    npm_libraries = [lib for lib in npm_libraries if lib and lib.strip()]
    library_info_collection = get_library_info_collection()
    cursor = library_info_collection.find(
        {"library": {"$in": npm_libraries}},
        {"library": 1, "average_number": 1, "_id": 0}
    )
    result_dict = {
        doc['library']: doc['average_number']
        for doc in cursor
    }
    return result_dict


def calculate_and_save_usage_rates(
        existing_libraries: Dict[str, Dict]
) -> None:
    function_result_collection = get_result_collection()
    lib_res_collection = get_lib_res_collection()
    try:
        arkts_libraries = function_result_collection.aggregate([
            {
                "$group": {
                    "_id": {
                        "arkts_library": "$arkts_library",
                        "arkts_version": "$arkts_version"
                    },
                    "libraries": {"$push": "$npm_library"}
                }
            }
        ])

        arkts_lib_data: List[Tuple[str, str, Dict[str, int]]] = []
        for arkts_lib in arkts_libraries:
            arkts_library = arkts_lib['_id']['arkts_library']
            arkts_version = arkts_lib['_id']['arkts_version']
            libraries = arkts_lib['libraries']

            library_counts = {}
            for library in libraries:
                if library and library.strip():
                    library_counts[library] = library_counts.get(library, 0) + 1

            if library_counts:
                arkts_lib_data.append((arkts_library, arkts_version, library_counts))

        lib_res_updates = []
        for arkts_library, arkts_version, library_counts in tqdm(arkts_lib_data, desc="计算rate"):
            for library_name, res_function_number in library_counts.items():
                if library_name in existing_libraries:
                    used_average: float = existing_libraries.get(library_name, 0)
                    if used_average > 0:
                        rate = res_function_number / used_average
                        lib_res_updates.append(
                            UpdateOne(
                                # 修改：添加arkts_version到查询条件
                                {
                                    "arkts_library": arkts_library,
                                    "arkts_version": arkts_version,
                                    "library": library_name
                                },
                                {
                                    "$set": {
                                        "rate": rate,
                                        "res_function_number": res_function_number,
                                        "used_average": used_average,
                                        "arkts_version": arkts_version  # 确保保存版本信息
                                    }
                                },
                                upsert=True
                            )
                        )

        if lib_res_updates:
            batch_size = 1000
            for i in range(0, len(lib_res_updates), batch_size):
                batch = lib_res_updates[i:i + batch_size]
                lib_res_collection.bulk_write(batch)
    except Exception as e:
        logging.error(f"计算rate过程中出现错误: {e}")


def process_arkts_libraries_to_csv(output_dir: str = None) -> None:
    """
    处理结果集合，并输出为CSV文件。
    参数:
        output_dir: 输出CSV文件的目录。如果为None，则使用Config中定义的RESULT_DIR。
    """
    # 1. 确定输出目录
    if output_dir is None:
        output_dir = Config.RESULT_DIR

    # 2. 确定完整的输出文件路径
    output_file = os.path.join(output_dir, Config.RESULT_CSV_FILE_NAME)
    # 3. 确保输出目录存在，如果不存在则创建
    os.makedirs(output_dir, exist_ok=True)
    try:
        lib_res_collection = get_lib_res_collection()
        # 修改：按arkts_library和arkts_version分组
        pipeline = [
            {"$match": {"rate": {"$gt": 0.2}}},
            {
                "$group": {
                    "_id": {
                        "arkts_library": "$arkts_library",
                        "arkts_version": "$arkts_version"
                    },
                    "npm_libraries": {
                        "$push": {
                            "library": "$library",
                            "rate": "$rate",
                            "res_function_number": "$res_function_number",
                            "used_average": "$used_average"
                        }
                    }
                }
            }
        ]
        results = lib_res_collection.aggregate(pipeline)
        csv_data: List[Dict[str, str]] = []
        for result in results:
            arkts_library = result['_id']['arkts_library']
            arkts_version = result['_id']['arkts_version']
            npm_libs = result['npm_libraries']

            if not npm_libs:
                continue

            # 选择rate最高的npm库
            best_npm_lib = max(npm_libs, key=lambda x: x['rate'])
            csv_data.append({
                "arktsLib": arkts_library,
                "arktsVersion": arkts_version,
                "npmLib": best_npm_lib['library'],
                "npmVersion": ""
            })

        # 4. 写入CSV文件
        with open(output_file, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.DictWriter(file, fieldnames=["arktsLib", "arktsVersion", "npmLib", "npmVersion"])
            writer.writeheader()
            writer.writerows(csv_data)

        logging.info(f"结果CSV文件已成功生成: Result/{os.path.basename(output_file)}, 共包含 {len(csv_data)} 条记录")
    except Exception as e:
        logging.error(f"处理过程中出现错误: {e}")
        raise


def get_library_result_csv():
    # 第一步: 读取数据库的包信息
    existing_libraries = load_library_data()
    # 第二步: 计算并保存使用率
    calculate_and_save_usage_rates(existing_libraries)
    # 第三步: 处理结果并生成CSV文件
    process_arkts_libraries_to_csv()

