import pandas as pd
from datetime import datetime, timedelta
import match

import logging

from utils.verifyParameters import verify

# 配置日志
logging.basicConfig(level=logging.INFO)


def get_closest_sheet(file_path, station_config, recon_end_time):
    excel_file = pd.ExcelFile(file_path)
    sheet_names = excel_file.sheet_names

    # 收集所有需要合并的sheet数据
    sheets_to_merge = []

    for sheet in sheet_names:
        header_row = None
        df_tmp = pd.read_excel(file_path, sheet_name=sheet)
        if all(item in df_tmp.columns.tolist() for item in list(station_config["columns"].values())):
            header_row = -1
        else:
            for row in df_tmp.head(2).itertuples():
                if all(item in list(row[1:]) for item in list(station_config["columns"].values())):
                    header_row = row.Index
                    break

        # 如果sheet名包含明细（出）东区或明细（进）西区，并且找到了表头
        if ('明细（出）' in sheet or '明细（进）' in sheet) and header_row is not None:
            df = pd.read_excel(file_path, sheet_name=sheet, skiprows=header_row + 1)
            # 设置 station_name
            if '明细（出）' in sheet:
                df["station_name"] = "东区"
            elif '明细（进）' in sheet:
                df["station_name"] = "西区"
            elif '东区' in sheet:
                df["station_name"] = "东区"
            elif '西区' in sheet:
                df["station_name"] = "西区"
            else:
                df["station_name"] = "未知"
            sheets_to_merge.append(df)
            logging.info(f"找到匹配的sheet: {sheet}, 数据行数: {len(df)}")

    # 如果找到了需要合并的sheet
    if sheets_to_merge:
        # 合并所有sheet
        merged_sheet = pd.concat(sheets_to_merge, ignore_index=True)
        logging.info(f"合并了{len(sheets_to_merge)}个sheet，总数据行数: {len(merged_sheet)}")

        # 处理日期列
        try:
            merged_sheet[station_config["columns"]["create_time"]] = pd.to_datetime(
                merged_sheet[station_config["columns"]["create_time"]], errors="coerce"
            )
            merged_sheet = merged_sheet.dropna(subset=[station_config["columns"]["create_time"]])
            merged_sheet = merged_sheet.dropna(subset=[station_config["columns"]["gas_num"]])
            return merged_sheet, "合并sheet(东区+西区)"
        except Exception as e:
            logging.error(f"处理合并数据时出错: {e}")
            return None, None
    else:
        # 如果没有找到南站或北站的sheet，使用原来的逻辑
        closest_sheet = None
        min_diff = None
        closest_sheet_name = None
        for sheet in sheet_names:
            header_row = None
            df_tmp = pd.read_excel(file_path, sheet_name=sheet)
            if all(item in df_tmp.columns.tolist() for item in list(station_config["columns"].values())):
                header_row = -1
            else:
                for row in df_tmp.head(2).itertuples():
                    if all(item in list(row[1:]) for item in list(station_config["columns"].values())):
                        header_row = row.Index
                        break
            if header_row is not None:
                df = pd.read_excel(file_path, sheet_name=sheet, skiprows=header_row + 1)
                try:
                    df[station_config["columns"]["create_time"]] = pd.to_datetime(
                        df[station_config["columns"]["create_time"]], errors="coerce"
                    )
                    df = df.dropna(subset=[station_config["columns"]["create_time"]])
                    df = df.dropna(subset=[station_config["columns"]["gas_num"]])
                    latest_time = df[station_config["columns"]["create_time"]].max()
                    if pd.notnull(latest_time):
                        time_diff = abs(
                            (latest_time.date() - datetime.strptime(recon_end_time, "%Y-%m-%d %H:%M:%S").date()).days
                        )
                        if min_diff is None or time_diff < min_diff:
                            min_diff = time_diff
                            closest_sheet = df
                            closest_sheet_name = sheet
                except Exception as e:
                    logging.error(f"处理{sheet}时出错: {e}")
        return closest_sheet, closest_sheet_name


def process_guanyindingsheng_reconciliation(recon_start_time, recon_end_time, file_path, fault_tolerant, station_id, ignore_time):
    """
        处理冠英鼎胜对账单的特殊逻辑

        Args:
            recon_start_time (str): 对账开始时间，格式为 "YYYY-MM-DD HH:MM:SS"
            recon_end_time (str): 对账结束时间，格式为 "YYYY-MM-DD HH:MM:SS"
            file_path (str): 上传的Excel文件路径

        Returns:
            dict: 处理结果的JSON数据
        """
    try:
        # region 配置
        # 配置冠英鼎胜气站相关信息，包括名称、ID、文件关键字、字段映射和差异阈值
        station_config = {
            "name": "冠英鼎胜",
            "ids": [222, 223],
            "main_body_gas_station": 130,
            "nms": ["(.*)西区(.*)", "(.*)东区(.*)"],
            "file_keyword": "(.*)冠英鼎胜(.*)",
            "columns": {
                "car_number": "车牌号",
                "create_time": "日期",
                "gas_num": "加气量（Kg）",
                "money": "单价（元/公斤）",
            },
            "diff_num": 1,  # 差异在x公斤以内的设置为疑似匹配
        }

        verify(fault_tolerant, station_id, ignore_time, station_config)

        # 处理时间范围
        # 将对账开始时间提前1小时，结束时间延后1小时，扩大时间窗口以确保匹配的全面性
        start_time = datetime.strptime(recon_start_time, "%Y-%m-%d %H:%M:%S")
        start_time = (start_time - timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")
        end_time = datetime.strptime(recon_end_time, "%Y-%m-%d %H:%M:%S")
        end_time = (end_time + timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")

        # 从上传的Excel文件中获取与气站配置最匹配的sheet，依据create_time列的最新时间与对账结束时间的接近度
        closest_sheet, closest_sheet_name = get_closest_sheet(file_path, station_config, recon_end_time)
        logging.info(f"{len(closest_sheet)} {closest_sheet_name}")

        try:
            # 选取配置中指定的列，过滤掉加气量或创建时间为空的行，确保数据完整性
            station_data = closest_sheet[list(station_config["columns"].values())]
            station_data = station_data[station_data[station_config["columns"]["gas_num"]].notnull() & station_data[
                station_config["columns"]["create_time"]].notnull()]
            station_data = match.set_station_id_column(station_data, station_config)


        except Exception as e:
            # 如果配置文件中的列与Excel文件不匹配，抛出异常，提示用户检查配置或文件
            logging.error('配置文件与excel不一致')
            raise RuntimeError('配置文件与excel不一致') from e


        logging.info(f"数据量: {len(station_data)}, sheet名: {closest_sheet_name}")

        station_dfs_dict = {}

        # step 2 处理Excel 如果是多个气站并且Excel中的字段配置包含station_name，就需要分多个气站进行对比。
        if "station_name" in station_config["columns"] and len(station_config["ids"]) > 1 and "nms" in station_config:
            station_names = station_config["nms"]
            station_name_col = station_config["columns"]["station_name"]
            for idx, pattern in enumerate(station_names):
                matched_df = station_data[station_data[station_name_col].astype(str).str.match(pattern, na=False)]
                # 如果有详细时间，需要对excel数据按照详细时间进行排序
                non_midnight_count = (
                        matched_df[station_config["columns"]["create_time"]].dt.strftime("%H:%M:%S") != "00:00:00"
                ).sum()
                if len(matched_df) > 0 and (non_midnight_count / len(matched_df)) > 0.5:
                    # 超过50%即为"绝大多数"
                    matched_df = matched_df.sort_values(by=station_config["columns"]["create_time"])
                station_dfs_dict[station_config["ids"][idx]] = matched_df

        match_result = []

        # step 3 如果有station_dfs_dict就循环这个，然后每个站点单独获取数据并对比，否则就获取配置中的所有站点数据并进行对比
        if len(station_dfs_dict) > 0:
            has_data_to_process = False
            for i in station_dfs_dict:
                # 检查该站点是否有数据
                if not station_dfs_dict[i].empty and len(station_dfs_dict[i]) > 0:
                    logging.info(f"处理站点 {i}，数据量: {len(station_dfs_dict[i])}")
                    # 从线上系统获取对应站点和时间范围的加气数据
                    online_data = match.get_online_data([i], start_time, end_time)
                    # 开始对比本地文件数据与线上数据，生成匹配结果
                    match_result.append(match.match_data_v1(station_dfs_dict[i], online_data, station_config))
                    has_data_to_process = True
                else:
                    online_data = match.get_online_data([i], start_time, end_time)
                    match_result = process_unmatched_online_data(match_result, online_data)
                    logging.info(f"站点 {i} 没有匹配的数据，跳过处理")

            # 如果所有站点都没有数据，则使用原始数据进行处理
            if not has_data_to_process:
                logging.info("所有站点都没有匹配的数据，使用原始数据进行处理")
                # excel获取时间区间内的数据，若配置要求则过滤时间范围内数据
                if station_config.get("excel_get_time_range", False):
                    station_data = station_data[
                        station_data[station_config["columns"]["create_time"]].between(start_time, end_time)]
                # 从线上系统获取所有配置站点的加气数据
                online_data = match.get_online_data(station_config['ids'], start_time, end_time)
                # 开始对比本地文件数据与线上数据，生成匹配结果
                match_result.append(match.match_data_v1(station_data, online_data, station_config))
        else:
            # excel获取时间区间内的数据，若配置要求则过滤时间范围内数据
            if station_config.get("excel_get_time_range", False):
                station_data = station_data[
                    station_data[station_config["columns"]["create_time"]].between(start_time, end_time)]
            # 从线上系统获取所有配置站点的加气数据
            online_data = match.get_online_data(station_config['ids'], start_time, end_time)
            # 开始对比本地文件数据与线上数据，生成匹配结果
            match_result.append(match.match_data_v1(station_data, online_data, station_config))

        # 生成最终JSON格式的匹配结果，方便前端展示或后续处理
        json_result = match.create_json_result_v2(match_result, station_config, start_time, end_time)
        return json_result

    except Exception as e:
        raise Exception(f"处理山西安洁对账单时出错: {str(e)}")


# 处理匹配结果，确保未匹配的在线数据也被包含
def process_unmatched_online_data(match_result, online_data):
    """
    处理未匹配的在线数据，将其添加到匹配结果中
    """
    # 从匹配结果中收集已使用的在线数据ID
    used_online_ids = set()
    for result_group in match_result:
        for item in result_group:
            if item["online"] is not None:
                used_online_ids.add(item["online"]["id"])

    # 找出未匹配的在线数据
    unmatched_online_data = []
    for online_item in online_data:
        if online_item["id"] not in used_online_ids:
            unmatched_online_data.append(online_item)

    # 如果有未匹配的在线数据，创建新的匹配组
    if unmatched_online_data:
        unmatched_group = []
        # 按时间排序
        unmatched_online_data.sort(key=lambda x: x["create_time"])

        for online_item in unmatched_online_data:
            unmatched_group.append({
                "df": None,
                "online": online_item
            })

        # 将未匹配组添加到结果中
        if unmatched_group:
            match_result.append(unmatched_group)

    return match_result