import csv
import logging

import pandas as pd  # 新增：导入pandas处理Excel文件

logger = logging.getLogger(__name__)


def merge_table_files(files, output_file):
    """
    Merge multiple CSV/Excel files with enhanced deduplication and business logic

    Args:
        files: List of input file paths (supports .csv, .xlsx, .xls)
        output_file: Output file path (supports .csv or .xlsx)
    """
    accepted_rows = {}
    rejected_rows = {}
    headers = None  # Unified header for all files
    info_list = []

    # 新增：读取文件的通用函数（支持CSV和Excel）
    def read_table_file(file_path):
        if file_path.lower().endswith(".csv"):
            return pd.read_csv(file_path, encoding="utf-8-sig")
        elif file_path.lower().endswith((".xlsx", ".xls")):
            return pd.read_excel(file_path, engine="openpyxl")  # 需要openpyxl支持xlsx
        else:
            raise ValueError(f"不支持的文件格式: {file_path}")

    for file in files:
        try:
            # 新增：根据文件格式读取数据
            df = read_table_file(file)
            current_header = df.columns.tolist()

            # Validate and process header (逻辑保持不变)
            if not headers:
                headers = current_header
                if len(headers) < 4:
                    raise ValueError("表格必须包含至少4列数据")
            elif current_header != headers:
                warning_info = f"文件 {file} 标题头与其他文件不一致，已跳过"
                info_list.append(warning_info)
                logger.warning(warning_info)
                continue

            # Process data rows with business rules
            for _, row in df.iterrows():  # 新增：使用pandas迭代行
                row_data = row.tolist()
                row_data = [str(item).strip() for item in row_data]

                # Skip malformed rows (逻辑保持不变)
                if len(row_data) < 4:
                    warning_info = f"文件 {file} 存在数据异常行'{row_data}'，已跳过"
                    info_list.append(warning_info)
                    logger.warning(warning_info)
                    continue

                # 数据提取逻辑保持不变，但确保转为字符串处理
                student_id = row_data[0]  # Column 0: Student ID
                archive_received = row_data[2]  # Column 2: 团员档案接收状态
                record_received = row_data[3]  # Column 3: 学生档案接收状态

                # Business logic conditions (保持不变)
                condition_met = not (
                    archive_received == "否" and record_received == "否"
                )
                # Deduplication and condition check (保持不变)

                if condition_met:
                    # 检查是否已存在相同的学生ID
                    if student_id in accepted_rows.keys():
                        # 比较行数据是否相同
                        if accepted_rows[student_id] != tuple(row_data):
                            warning_info = (
                                f"文件 {file} 存在冲突的重复学生ID {student_id}，已跳过"
                            )
                            info_list.append(warning_info)
                            logger.warning(warning_info)
                            continue
                    else:
                        accepted_rows[student_id] = tuple(row_data)
                    if student_id in rejected_rows:
                        del rejected_rows[student_id]
                else:
                    rejected_rows[student_id] = tuple(row_data)

        except Exception as e:
            warning_info = f"文件 {file} 处理时发生错误：{str(e)}，已跳过该文件"
            info_list.append(warning_info)
            logger.error(warning_info)
            continue

    # 准备合并后的数据
    combined_rows = sorted(list(accepted_rows.values()) + list(rejected_rows.values()))

    logger.info(
        f"合并 {len(files)} 个文件， {len(accepted_rows)} 条数据已收到， {len(rejected_rows)} 条数据未收到"
    )

    # 新增：根据输出文件格式写入结果
    if output_file.lower().endswith(".xlsx"):
        pd.DataFrame(combined_rows, columns=headers).to_excel(
            output_file, index=False, engine="openpyxl"
        )
    else:  # 默认CSV格式
        with open(output_file, "w", newline="", encoding="utf-8-sig") as outfile:
            writer = csv.writer(outfile)
            writer.writerow(headers)
            for row in combined_rows:
                writer.writerow(row)

    return info_list
