import pandas as pd
import json
import hashlib


def generate_md5_from_list(ids_string):
    """
    根据id字符串列表生成MD5

    Args:
        ids_string: 逗号分隔的id字符串，如 "1,2,3"

    Returns:
        str: 32位MD5字符串
    """
    if not ids_string or ids_string.strip() == "":
        return None

    try:
        # 将逗号分隔的字符串转换为数字列表，然后连接
        ids = ids_string.split(',')
        input_string = ''.join(id_str.strip() for id_str in ids)

        # 生成MD5
        md5_hash = hashlib.md5(input_string.encode('utf-8')).hexdigest()

        # 确保是32位（实际上md5.hexdigest()已经是32位）
        return f"{md5_hash:0>32}"

    except Exception as e:
        print(f"生成MD5时出错: {e}")
        return None


def determine_plan_type(name):
    """
    根据name内容判断计划类型

    Args:
        name: planDescription内容

    Returns:
        str: 计划类型
    """
    name_str = str(name)

    if "预习" in name_str:
        return "新知预习"
    elif "提升" in name_str:
        return "专项提升"
    elif "基础" in name_str:
        return "基础夯实"
    else:
        return "要点学习"


def process_csv_excel_data(csv_file_path, excel_file_path):
    """
    处理CSV和Excel文件，根据条件过滤和匹配数据

    Args:
        csv_file_path: CSV文件路径
        excel_file_path: Excel文件路径

    Returns:
        list: 包含处理结果的字典列表
    """

    # 1. 读取第一个CSV文件
    print("正在读取CSV文件...")
    df_csv = pd.read_csv(csv_file_path)

    # 2. 不再过滤政治数据，保留所有数据
    print(f"CSV文件共有 {len(df_csv)} 条记录")

    # 3. 对planDescriptions字段进行去重（整个字段去重，不分割）
    print("对planDescriptions字段进行去重...")

    # 获取所有不重复的planDescriptions，同时保留对应的subject
    df_unique = df_csv[['subject', 'planDescriptions']].drop_duplicates(subset=['planDescriptions']).dropna(
        subset=['planDescriptions'])
    print(f"去重后共有 {len(df_unique)} 个不同的planDescription")

    # 4. 读取第二个Excel文件
    print("正在读取Excel文件...")
    df_excel = pd.read_excel(excel_file_path)

    # 5. 处理每个去重后的planDescription
    print("进行数据匹配...")
    matched_results = []

    for _, row in df_unique.iterrows():
        plan_desc = row['planDescriptions']
        subject = row['subject']

        # 将planDescription按顿号分割
        individual_plans = [plan.strip() for plan in str(plan_desc).split('，') if plan.strip()]

        # 收集所有匹配的id
        all_matched_ids = []

        for plan in individual_plans:
            # 在Excel文件中查找匹配的name
            matching_rows = df_excel[df_excel['name'] == plan]

            if not matching_rows.empty:
                # 收集匹配的id
                matched_ids = [str(row['id']) for _, row in matching_rows.iterrows()]
                all_matched_ids.extend(matched_ids)

        if all_matched_ids:
            # 用逗号连接所有id
            id_string = ','.join(all_matched_ids)

            # 生成MD5
            md5_code = generate_md5_from_list(id_string)

            # 判断计划类型
            plan_type = determine_plan_type(plan_desc)

            # 生成planName
            plan_name = f"{subject}{plan_type}计划"

            matched_results.append({
                "name": plan_desc,  # 保留原始的完整planDescription
                "subject": subject,  # 学科
                "planType": plan_type,  # 计划类型
                "planName": plan_name,  # 生成的计划名称
                "id": id_string,  # 用逗号分隔的id字符串
                "md5Code": md5_code  # 生成的MD5
            })
        else:
            print(f"{plan_desc}")

    print(f"总共处理了 {len(matched_results)} 个planDescription")

    return matched_results


def generate_sql(results):
    """
    生成INSERT SQL语句

    Args:
        results: 处理结果列表

    Returns:
        str: SQL语句
    """
    if not results:
        return ""

    sql_statements = []

    # 生成INSERT语句
    i=1
    for result in results:
        if result['md5Code']:  # 只有MD5不为空才生成SQL
            sql = f"""INSERT INTO `diagnostic_template_plan` 
(`id`, `name`, `code`, `desk_template`, `biz_code`, `deleted`) 
VALUES 
({i}, '{result['planName']}', '{result['md5Code']}', '{result['id']}', 'desk', 0);"""
            sql_statements.append(sql)
        i=i+1

    return '\n'.join(sql_statements)


def main():
    # 文件路径
    csv_file_path = "D:\\project_file\\诊断式学习\\enhanced_dialog_paths_1(1).csv"
    excel_file_path = "D:\\project_file\\诊断式学习\\最新完整生产模板计划\\生产模板计划062615点完整.xlsx"

    try:
        # 处理数据
        results = process_csv_excel_data(csv_file_path, excel_file_path)

        # 输出结果
        print("\n匹配结果:")
        # print(json.dumps(results, ensure_ascii=False, indent=2))

        # 保存结果到JSON文件
        with open('matched_results_v2.json', 'w', encoding='utf-8') as f:
            json.dump(results, f, ensure_ascii=False, indent=2)

        print(f"\n结果已保存到 matched_results.json 文件")

        # 生成SQL语句
        sql_content = generate_sql(results)

        if sql_content:
            # 保存SQL到文件
            with open('insert_statements.sql', 'w', encoding='utf-8') as f:
                f.write(sql_content)

            print(f"\nSQL语句已保存到 insert_statements.sql 文件")
            print(f"生成了 {len([r for r in results if r['md5Code']])} 条INSERT语句")
        else:
            print("\n没有生成有效的SQL语句")

    except FileNotFoundError as e:
        print(f"文件未找到: {e}")
    except Exception as e:
        print(f"处理过程中出现错误: {e}")


if __name__ == "__main__":
    main()