import pandas as pd
import pyodbc
import os
import re
import numpy as np
from pathlib import Path
from datetime import datetime
import traceback

from numpy.ma.extras import column_stack


def load_field_mapping_config(file_path):
    """
    从Excel映射文件中加载映射配置
    返回所有可能的表头行配置
    """
    try:
        df = pd.read_excel(file_path, sheet_name='Sheet1', header=None)
        access_field_names = df.iloc[0].tolist()  # 第一行是Access字段名

        # 存储所有可能的表头行配置
        header_configs = []

        # 从第二行开始，每一行都是一个可能的表头配置
        for row_idx in range(1, len(df)):
            excel_header_row = df.iloc[row_idx].tolist()

            # 创建当前行的映射配置
            row_field_mapping = {}
            row_expression_mapping = {}
            row_required_columns = set()

            for col_idx in range(len(excel_header_row)):
                if col_idx >= len(access_field_names) or pd.isna(access_field_names[col_idx]):
                    continue

                excel_header = excel_header_row[col_idx]
                access_field = access_field_names[col_idx]

                if pd.notna(excel_header) and pd.notna(access_field):
                    excel_header_str = str(excel_header).strip()
                    access_field_str = str(access_field).strip()

                    if re.search(r'[+\-*/()\[\]]', excel_header_str):
                        row_expression_mapping[access_field_str] = excel_header_str
                        # 解析表达式中的必需列
                        pattern = r'\[([^\]]+)\]'
                        expr_columns = re.findall(pattern, excel_header_str)
                        row_required_columns.update(expr_columns)
                    else:
                        row_field_mapping[excel_header_str] = access_field_str
                        row_required_columns.add(excel_header_str)

            # 只有当这一行有有效的映射时才添加到配置中
            if row_field_mapping or row_expression_mapping:
                header_configs.append({
                    'row_index': row_idx + 1,  # Excel中的行号（从1开始）
                    'field_mapping': row_field_mapping,
                    'header': df.iloc[row_idx].tolist(),
                    'expression_mapping': row_expression_mapping,
                    'required_columns': list(row_required_columns)
                })

        access_field_order = []
        for field in access_field_names:
            if pd.notna(field):
                access_field_order.append(str(field).strip())

        return header_configs, access_field_order

    except Exception as e:
        print(f"读取映射配置文件时出错: {str(e)}")
        return [], []


def find_matching_header_row(file_path, header_configs, max_rows_to_check=20):
    """
    在Excel文件中查找匹配的表头行
    返回匹配的配置索引、表头行号和DataFrame
    """
    try:
        # 读取前max_rows_to_check行来查找表头
        for excel_header_row in range(max_rows_to_check):
            try:
                df_samples = pd.read_excel(file_path, header=excel_header_row, nrows=1,sheet_name=None)
                for sheet_name, df_sample in df_samples.items():
                    print(f"try find row on sheet :{sheet_name}")

                    if len(df_sample.columns) <= 1:
                        continue

                    current_columns = [str(col).strip() for col in df_sample.columns if pd.notna(col)]

                    # 检查每个配置是否匹配
                    for config_idx, config in enumerate(header_configs):
                        required_columns = config['required_columns']

                        # 检查所有必需列是否存在
                        missing_columns = []
                        for req_col in required_columns:
                            found = False
                            for col in current_columns:
                                clean_col = re.sub(r'[*()（）\s]', '', col).lower()
                                clean_req = re.sub(r'[*()（）\s]', '', req_col).lower()
                                if clean_col == clean_req:
                                    found = True
                                    break
                            if not found:
                                missing_columns.append(req_col)

                        # 如果所有必需列都存在，找到匹配
                        if not missing_columns:
                            print(f"找到匹配的表头在第 {excel_header_row + 1} 行，使用配置第 {config['row_index']} 行")
                            print(f"匹配的配置列: {list(config['field_mapping'].keys())}")

                            # 重新读取完整数据
                            df = pd.read_excel(file_path, header=excel_header_row, sheet_name=sheet_name)
                            return config_idx, excel_header_row, df

            except Exception as e:
                print(f"查找表头时出错: {str(e)}")
                continue

        return None, None, None

    except Exception as e:
        print(f"查找表头时出错: {str(e)}")
        return None, None, None


def map_columns_using_config(df_columns, config):
    """
    使用配置映射列名
    """
    column_mapping = {}

    for excel_col in df_columns:
        excel_col_str = str(excel_col).strip()

        # 检查是否在配置的字段映射中
        if excel_col_str in config['field_mapping']:
            column_mapping[excel_col_str] = config['field_mapping'][excel_col_str]
            continue

        # 模糊匹配
        clean_col = re.sub(r'[*()（）\s]', '', excel_col_str).lower()
        for mapping_key, access_field in config['field_mapping'].items():
            clean_key = re.sub(r'[*()（）\s]', '', mapping_key).lower()
            if clean_col == clean_key:
                column_mapping[excel_col_str] = access_field
                break

    return column_mapping


def parse_expression(expression, df_columns):
    """解析表达式，提取需要的列名"""
    pattern = r'\[([^\]]+)\]'
    required_columns = re.findall(pattern, expression)
    if len(required_columns) == 0:
        return required_columns, f"df['{expression}']"

    eval_expression = expression
    for col in required_columns:
        eval_expression = eval_expression.replace(f'[{col}]', f"df['{col}']")

    return required_columns, eval_expression


def evaluate_expression(df, expression, access_field_name):
    """计算表达式的值"""
    try:
        cols = df.columns.astype(str).tolist()
        required_columns, eval_expr = parse_expression(expression, cols)

        missing_columns = [col for col in required_columns if col not in cols]
        if missing_columns:
            print(f"警告: 表达式 '{expression}' 需要列 {missing_columns} 但不存在")
            return pd.Series([np.nan] * len(df))
        df = df.rename(columns=lambda x: str(x))
        # for col in required_columns:
        #     df[col].fillna(0, inplace=True)
        local_vars = {'df': df, 'np': np}
        result = pd.eval(eval_expr, local_dict=local_vars)

        return result

    except Exception as e:
        print(f"计算表达式 '{expression}' 时出错: {str(e)}")
        return pd.Series([np.nan] * len(df))


def get_all_excel_files(root_dir):
    """递归获取所有子目录中的Excel文件"""
    excel_files = []
    excel_extensions = ['.xlsx', '.xls', '.xlsm']

    print(f"开始遍历目录: {root_dir}")

    try:
        for root, dirs, files in os.walk(root_dir):
            dirs[:] = [d for d in dirs if not d.startswith('.') and not d.startswith('~')]

            for file in files:
                if file.startswith('~$'):
                    continue

                file_ext = os.path.splitext(file)[1].lower()
                if file_ext in excel_extensions:
                    file_path = os.path.join(root, file)
                    excel_files.append(file_path)

        print(f"找到 {len(excel_files)} 个Excel文件")
        return excel_files

    except Exception as e:
        print(f"遍历目录时出错: {str(e)}")
        return []


def import_excel_to_access(access_db_path,root_dir,target_table):
    """
    批量导入Excel数据到Access数据库，支持表达式计算和完整目录遍历
    """
    # 配置数据库连接

    conn_str = (
        r"DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};"
        f"DBQ={access_db_path};"
    )

    # 加载映射配置
    mapping_file = "表头映射.xlsx"
    header_configs, access_field_order = load_field_mapping_config(mapping_file)

    if not header_configs:
        print("错误: 无法加载映射配置，程序退出")
        return

    print("\n映射配置加载成功:")
    print(f"找到 {len(header_configs)} 个表头配置")
    for i, config in enumerate(header_configs):
        print(f"配置 {i + 1} (第{config['row_index']}行):")
        print(f"  字段映射: {len(config['field_mapping'])} 个")
        print(f"  表达式映射: {len(config['expression_mapping'])} 个")
        print(f"  必需列: {config['required_columns']}")

    # 添加系统字段
    access_field_order.extend(['导入时间', '文件路径'])

    # 目标Access表名



    # 获取所有Excel文件
    all_excel_files = get_all_excel_files(root_dir)

    if not all_excel_files:
        print("未找到任何Excel文件，程序退出")
        return

    try:
        conn = pyodbc.connect(conn_str)
        cursor = conn.cursor()

        processed_count = 0
        total_rows = 0
        skipped_files = []
        failed_files = []

        # 处理每个Excel文件
        for i, file_path in enumerate(all_excel_files, 1):
            print(f"\n{'=' * 60}")
            print(f"处理文件 [{i}/{len(all_excel_files)}]: {file_path}")

            try:
                # 查找匹配的表头行和配置
                config_idx, header_row, df = find_matching_header_row(file_path, header_configs)

                if config_idx is None or header_row is None or df is None:
                    print("未找到匹配的表头配置，跳过文件")
                    skipped_files.append((file_path, "未找到匹配的表头配置"))
                    continue

                # 获取匹配的配置
                matched_config = header_configs[config_idx]
                print(f"使用配置: 第{matched_config['row_index']}行")
                print(f"原始列名: {df.columns.tolist()}")

                # 映射列名
                column_mapping = map_columns_using_config(df.columns, matched_config)
                print(f"列映射关系: {column_mapping}")

                # 重命名列
                df.rename(columns=column_mapping, inplace=True)

                # 处理表达式计算字段
                if matched_config['expression_mapping']:
                    print("处理表达式计算字段...")
                    for access_field, expression in matched_config['expression_mapping'].items():
                        try:
                            result = evaluate_expression(df, expression, access_field)
                            df[access_field] = result
                            print(f"  计算 {access_field} = {expression}")
                        except Exception as e:
                            print(f"  计算字段 {access_field} 时出错: {str(e)}")
                            df[access_field] = np.nan

                # 添加系统字段
                current_time = datetime.now()
                df['导入时间'] = current_time
                df['文件路径'] = file_path

                # 筛选有效字段并排序
                valid_columns = []
                for col in access_field_order:
                    if col in df.columns:
                        valid_columns.append(col)

                # 确保系统字段存在
                system_columns = ['导入时间', '文件路径']
                for sys_col in system_columns:
                    if sys_col not in valid_columns and sys_col in df.columns:
                        valid_columns.append(sys_col)

                df = df[valid_columns]

                # 清理数据
                df = clean_dataframe(df)

                if len(df) == 0:
                    print("警告: 无有效数据行")
                    skipped_files.append((file_path, "无有效数据行"))
                    continue

                print(f"处理后数据: {len(df)}行 x {len(df.columns)}列")
                print(f"最终列名: {df.columns.tolist()}")

                # 批量插入数据
                rows_inserted = batch_insert(cursor, target_table, df)
                total_rows += rows_inserted

                conn.commit()
                processed_count += 1
                print(f"✓ 成功导入: {file_path} (需导入数据: {len(df)}行 ,成功导入 {rows_inserted} 行数据)")

            except Exception as e:
                print(f"✗ 处理文件 {file_path} 时出错: {str(e)}")
                traceback.print_exc()
                failed_files.append((file_path, str(e)))
                conn.rollback()

        # 输出处理结果统计
        print(f"\n{'=' * 60}")
        print("处理结果统计:")
        print(f"总文件数: {len(all_excel_files)}")
        print(f"成功导入: {processed_count}")
        print(f"跳过文件: {len(skipped_files)}")
        print(f"失败文件: {len(failed_files)}")
        print(f"总导入行数: {total_rows}")

        if skipped_files:
            print(f"\n跳过文件列表 (前10个):")
            for file_path, reason in skipped_files[:10]:
                print(f"  {file_path} - 原因: {reason}")
            if len(skipped_files) > 10:
                print(f"  ... 还有{len(skipped_files) - 10}个跳过文件")

        if failed_files:
            print(f"\n失败文件列表:")
            for file_path, error in failed_files:
                print(f"  {file_path} - 错误: {error}")

    except Exception as e:
        print(f"数据库连接错误: {str(e)}")
        traceback.print_exc()
    finally:
        if 'conn' in locals():
            conn.close()


def clean_dataframe(df):
    """清理DataFrame数据"""
    df = df.dropna(how='all')
    df = df.dropna(axis=1, how='all')
    # 删除第一列为空值的所有行
    df = df.dropna(subset=[df.columns[0]])

    for col in df.columns:
        if col not in ['导入时间', '文件路径']:
            if df[col].dtype == 'object':
                df[col] = df[col].fillna('').astype(str).str.strip()
                df[col] = df[col].replace({'': None})
            elif pd.api.types.is_datetime64_any_dtype(df[col]):
                df[col] = df[col].replace({'': None})
            else:
                df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0)

    return df


def batch_insert(cursor, table_name, df, batch_size=50):
    """批量插入数据"""
    if len(df) == 0:
        return 0

    inserted_rows = 0
    columns = ', '.join([f'[{col}]' for col in df.columns])
    placeholders = ', '.join('?' * len(df.columns))
    sql = f"INSERT INTO {table_name} ({columns}) VALUES ({placeholders})"

    for i in range(0, len(df), batch_size):
        batch = df[i:i + batch_size]
        values = []

        for _, row in batch.iterrows():
            row_values = []
            for val in row.values:
                if pd.isna(val) or val == '' or val == 'NaN':
                    row_values.append(None)
                else:
                    row_values.append(val)
            values.append(tuple(row_values))

        try:
            cursor.executemany(sql, values)
            inserted_rows += len(batch)
        except Exception as e:
            print(f"批量插入时出错: {str(e)},\n SQL:{sql}\nvalue:{values[0]}")
            for value_tuple in values:
                try:
                    cursor.execute(sql, value_tuple)
                    inserted_rows += 1
                except:
                    continue

    return inserted_rows


if __name__ == "__main__":
    # 设置根目录（根据实际情况修改）
    root_directory = r"D:\存档"
    access_db_path = r"D:\programData\access\test.accdb"
    target_table = "清关资料"
    if not os.path.exists(root_directory):
        print(f"错误: 目录 {root_directory} 不存在")
    else:
        import_excel_to_access(access_db_path,root_directory,target_table)