import os
import re
import pandas as pd

from datetime import datetime

# 配置参数
DUMP_DIR_FILE = "dump_dir.txt"  # tree生成的文件列表
EXCEL_ROOT = "excel_data"  # Excel文件根目录
OUTPUT_ROOT = "output_script"  # 生成的脚本目录
STORE_DIR = "offline_dump"  # 备份存储目录
LOG_FILE = f"validation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"  # 日志文件


def parse_dump_dir(tree_file):
    """精确解析包含多层目录结构的tree输出"""
    dump_files = set()

    with open(tree_file, 'r', encoding='utf-8') as f:
        for line in f:
            name = os.path.basename(line.strip())
            if name.endswith('.sql.gz'):
                line = line.replace('\n', '')
                dump_files.add(line)

    return dump_files


def extract_database_name(file_name):
    """从Excel文件名提取数据库名"""
    base_name = os.path.splitext(file_name)[0]
    return re.sub(r'_\d{8}$', '', base_name)


def generate_expected_path(db_name, table_name):
    """根据表名生成预期的备份文件路径"""
    # 提取日期信息
    prefix, year, month = extract_date_info(table_name)
    base_prefix = re.sub(r'(_\d+)+$', '', prefix)

    # 构建文件名
    if year and month:
        filename = f"{base_prefix}_{year}_{month}.sql.gz"
    elif year:
        filename = f"{base_prefix}_{year}.sql.gz"
    else:
        filename = f"{base_prefix}.sql.gz"

    # 生成完整路径
    return os.path.normpath(os.path.join(STORE_DIR, db_name, base_prefix, filename))


def collect_expected_data(excel_root, store_dir):
    """
    收集预期数据：预期备份文件路径集合 和 所有表名集合
    返回格式: (expected_files_set, all_tables_set)
    """
    expected_files = set()
    all_tables = set()

    # 遍历所有Excel文件
    for root, _, files in os.walk(excel_root):
        for file in files:
            if not file.endswith('.xlsx'):
                continue

            excel_path = os.path.join(root, file)
            database_name = extract_database_name(file)

            try:
                xls = pd.ExcelFile(excel_path)
                # 遍历每个sheet页
                for sheet_name in xls.sheet_names:
                    df = pd.read_excel(xls, sheet_name=sheet_name, header=None)

                    # 确定表名列索引（与process_sheet逻辑完全一致）
                    col_index = 1 if df.shape[1] >= 2 else 0

                    # 处理每行数据
                    for _, row in df.iterrows():
                        table = str(row[col_index]).strip() if pd.notna(row[col_index]) else ""
                        if not table:
                            continue

                        # 记录完整表名 (database.table)
                        full_table = f"{database_name}.{table}"
                        all_tables.add(full_table)

                        # 解析基础前缀和日期（与process_sheet逻辑一致）
                        prefix, year, month = extract_date_info(table)
                        base_prefix = re.sub(r'(_\d+)+$', '', prefix)

                        # 生成预期的备份文件路径（关键：保持与dump命令相同的生成逻辑）
                        if year and month:
                            filename = f"{base_prefix}_{year}_{month}.sql.gz"
                        elif year:
                            filename = f"{base_prefix}_{year}.sql.gz"
                        else:
                            filename = f"{base_prefix}.sql.gz"

                        # 构建完整路径（使用相同路径结构）
                        dump_path = os.path.join(store_dir, database_name, base_prefix, filename)
                        dump_path = dump_path.replace("\\", "/")  # 统一路径分隔符

                        expected_files.add(dump_path)

            except Exception as e:
                print(f"处理文件 {excel_path} 失败: {str(e)}")
                continue

    return expected_files, all_tables


def collect_dropped_tables():
    """收集所有生成的drop语句中的表名"""
    dropped_tables = set()

    for root, _, files in os.walk(OUTPUT_ROOT):
        for file in files:
            if file.startswith('drop_') and file.endswith('.sql'):
                with open(os.path.join(root, file), 'r', encoding='utf-8') as f:
                    content = f.read()
                    matches = re.findall(r'drop table if exists (\w+)\.(\w+);', content)
                    for db, table in matches:
                        dropped_tables.add(f"{db}.{table}")

    return dropped_tables


def validate_and_log():
    """执行校验并保存日志"""
    # 收集数据
    actual_files = parse_dump_dir(DUMP_DIR_FILE)
    expected_files, all_tables = collect_expected_data(EXCEL_ROOT, STORE_DIR)
    dropped_tables = collect_dropped_tables()

    # 执行校验
    missing_files = expected_files - actual_files
    extra_files = actual_files - expected_files
    missing_drop = all_tables - dropped_tables

    # 生成日志
    log_content = [
        f"校验时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
        f"总表数量: {len(all_tables)}",
        f"预期备份文件数: {len(expected_files)}",
        f"实际备份文件数: {len(actual_files)}",
        "\n[缺失备份文件]",
        *sorted(missing_files),
        "\n[多余备份文件]",
        *sorted(extra_files),
        "\n[缺失drop语句的表]",
        *sorted(missing_drop)
    ]

    # 写入日志文件
    with open(LOG_FILE, 'w', encoding='utf-8') as f:
        f.write("\n".join(log_content))

    print(f"校验完成，结果已保存到: {LOG_FILE}")


# 需要从原始代码复用的函数
def extract_date_info(table_name):
    """从原始代码复用的日期解析函数"""
    pattern = r'^(.*?)(?:_|$)(20\d{2})?(_?(\d{1,2}))?(_\d{1,2})*$'
    match = re.match(pattern, table_name)
    if match:
        prefix = match.group(1).rstrip('_')
        year = match.group(2)
        month = match.group(4)
        if not month and year and len(year) == 6:
            real_year = year[:4]
            month = year[4:6].lstrip('0')
            year = real_year
        return prefix, year, month
    return table_name, None, None


if __name__ == "__main__":
    """
    验证dump的备份文件是否都存在
    用下面的命令生成备份目录清单
    tree -N --noreport -i -f --charset=ascii offline_dump > dump_dir.txt 
    """
    validate_and_log()
    # actual_file = parse_dump_dir(DUMP_DIR_FILE)
    # print(actual_file)
    #
    # print('\r\n' * 5 )
    #
    # expect_file, s2 = collect_expected_data(EXCEL_ROOT, STORE_DIR)
    # print(expect_file)
    #
    # s3 = collect_dropped_tables()
    # print(s3)

    # missing_files = expect_file - actual_file
    # print(missing_files)
