from os import path

from tqdm import tqdm

from config.config import Config
from config.log_config import get_logger
from version_match.version_match_corev2 import find_closest_npm_versions

logger = get_logger()


def batch_process_csv_with_progress(input_file: str, output_file: str = None):
    """
    带进度条的批量处理，读取CSV文件中的arktsLib和npmLib列，
    调用版本匹配逻辑，将结果填入npmVersion列
    """
    import pandas as pd

    if output_file is None:
        import os
        name, ext = os.path.splitext(input_file)
        output_file = f"{name}{ext}"

    logger.info(f"开始批量处理: {input_file} -> {output_file}")

    try:
        df = pd.read_csv(input_file)

        # 检查必需的列是否存在
        required_columns = ['arktsLib', 'npmLib']
        missing_columns = [col for col in required_columns if col not in df.columns]
        if missing_columns:
            raise ValueError(f"CSV文件必须包含以下列: {', '.join(missing_columns)}")

        # 如果npmVersion列不存在，则创建它
        if 'npmVersion' not in df.columns:
            df['npmVersion'] = ''

        # 统计变量
        total_rows = len(df)
        processed_count = 0
        error_count = 0
        df['npmVersion'] = df['npmVersion'].astype(str)
        for idx, row in tqdm(df.iterrows(), total=total_rows, desc="版本比较进度"):
            arkts_lib = str(row['arktsLib']).strip()
            npm_lib = str(row['npmLib']).strip()

            # 检查库名是否为空或无效
            if arkts_lib == 'nan' or npm_lib == 'nan' or not arkts_lib or not npm_lib:
                df.at[idx, 'npmVersion'] = 'ERROR: Empty library name'
                error_count += 1
                continue

            try:
                result = find_closest_npm_versions(npm_lib, arkts_lib)
                # 处理不同类型的结果
                if isinstance(result, list):
                    if len(result) == 1:
                        df.at[idx, 'npmVersion'] = result[0]
                    else:
                        df.at[idx, 'npmVersion'] = f"[{', '.join(result)}]"
                else:
                    df.at[idx, 'npmVersion'] = str(result)

                processed_count += 1

            except Exception as e:
                df.at[idx, 'npmVersion'] = f"ERROR: {str(e)}"
                error_count += 1
                logger.warning(f"处理第{idx + 1}行时出错 - arktsLib: {arkts_lib}, npmLib: {npm_lib}, 错误: {str(e)}")

        # 保存结果
        df.to_csv(output_file, index=False)

        # 输出统计信息
        logger.info(f"比较完成！")
        logger.info(f"成功处理: {processed_count}")
        logger.info(f"处理出错: {error_count}")
        logger.info(f"结果已保存到: {"Result/"+path.basename(output_file) }")

        return {
            'total_rows': total_rows,
            'processed_count': processed_count,
            'error_count': error_count,
            'output_file': output_file
        }

    except Exception as e:
        logger.error(f"批量处理失败: {str(e)}")
        raise


def batch_process_csv_directory(directory_path: str, output_suffix: str = "_with_versions"):
    """
    批量处理指定目录下的所有CSV文件
    """
    import os
    import glob

    if not os.path.exists(directory_path):
        raise ValueError(f"目录不存在: {directory_path}")

    # 查找目录下所有CSV文件
    csv_files = glob.glob(os.path.join(directory_path, "*.csv"))

    if not csv_files:
        logger.warning(f"目录 {directory_path} 中没有找到CSV文件")
        return

    logger.info(f"在目录 {directory_path} 中找到 {len(csv_files)} 个CSV文件")

    results = []
    for csv_file in csv_files:
        try:
            logger.info(f"开始处理文件: {csv_file}")
            result = batch_process_csv_with_progress(csv_file)
            results.append(result)
        except Exception as e:
            logger.error(f"处理文件 {csv_file} 时出错: {str(e)}")
            results.append({'file': csv_file, 'error': str(e)})

    return results


# 使用示例
def process_versions():
    # 处理单个文件
    batch_process_csv_with_progress(Config.RESULT_CSV_FILE)
