from datetime import datetime, timedelta
import pandas as pd
import match as match
import time
import logging

from utils.verifyParameters import verify

# 配置日志
logging.basicConfig(level=logging.INFO)


def process_guizhoukeyu_reconciliation(recon_start_time, recon_end_time, file_path, fault_tolerant, station_id, ignore_time):
    total_start_time = time.time()
    # region 配置
    station_config = {
        "name": "贵州科宇",
        "ids": [1002],
        "main_body_gas_station": 147,
        "file_keyword": "(.*)贵州科宇(.*)",
        "columns": {
            "create_time": "加气时间",
            "car_number": "车牌号",
            "gas_num": "充装量",
            "gas_price": "单价/千克",
        },
        "is_time_shuffled": True,
        "diff_num": 1,
    }

    verify(fault_tolerant, station_id, ignore_time, station_config)

    # 时间预处理
    time_process_start = time.time()
    start_dt = datetime.strptime(recon_start_time, "%Y-%m-%d %H:%M:%S")
    start_time = (start_dt - timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")
    end_dt = datetime.strptime(recon_end_time, "%Y-%m-%d %H:%M:%S")
    end_time = (end_dt + timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")
    time_process_duration = time.time() - time_process_start
    logging.info(f"时间预处理耗时: {time_process_duration:.2f}秒")

    # step 1 获取Excel中的可用sheet
    step1_start_time = time.time()
    closest_sheet = None
    min_diff = None
    closest_sheet_name = None

    excel_file = pd.ExcelFile(file_path)
    sheet_names = excel_file.sheet_names

    for sheet in sheet_names:
        header_row = None
        df_tmp = pd.read_excel(file_path, sheet_name=sheet)
        if all(item in df_tmp.columns.tolist() for item in list(station_config["columns"].values())):
            header_row = -1
        else:
            for row in df_tmp.head(3).itertuples():
                if all(item in list(row[1:]) for item in list(station_config["columns"].values())):
                    header_row = row.Index
                    break
        if header_row is not None:
            df = pd.read_excel(file_path, sheet_name=sheet, skiprows=header_row + 1)
            try:
                df[station_config["columns"]["create_time"]] = pd.to_datetime(
                    df[station_config["columns"]["create_time"]], errors="coerce"
                )
                df = df.dropna(subset=[station_config["columns"]["create_time"]])
                df = df.dropna(subset=[station_config["columns"]["gas_num"]])
                latest_time = df[station_config["columns"]["create_time"]].max()
                if pd.notnull(latest_time):
                    # 计算与当天的差值
                    time_diff = abs(
                        (latest_time.date() - datetime.strptime(recon_end_time, "%Y-%m-%d %H:%M:%S").date()).days
                    )
                    # 更新最接近的sheet
                    if min_diff is None or time_diff < min_diff:
                        min_diff = time_diff
                        closest_sheet = df
                        closest_sheet_name = sheet
            except Exception as e:
                print(f"处理{sheet}时出错: {e}")

    if closest_sheet is None:
        raise ValueError("未找到有效数据表")

    try:
        station_data = closest_sheet[list(station_config["columns"].values())]
        station_data = match.set_station_id_column(station_data, station_config)
    except KeyError:
        raise ValueError("配置文件与Excel列名不一致")

    step1_duration = time.time() - step1_start_time
    logging.info(f"步骤1(获取Excel中的可用sheet)耗时: {step1_duration:.2f}秒")

    print(len(station_data), closest_sheet_name)
    station_dfs_dict = {}

    # step 2 处理Excel
    step2_start_time = time.time()
    if "station_name" in station_config.get("columns", {}) and len(
            station_config["ids"]) > 1 and "nms" in station_config:
        station_names = station_config["nms"]
        station_name_col = station_config["columns"]["station_name"]
        for idx, pattern in enumerate(station_names):
            matched_df = station_data[station_data[station_name_col].astype(str).str.match(pattern, na=False)]
            non_midnight_count = (
                    matched_df[station_config["columns"]["create_time"]].dt.strftime("%H:%M:%S") != "00:00:00"
            ).sum()
            if len(matched_df) > 0 and (non_midnight_count / len(matched_df)) > 0.5:
                matched_df = matched_df.sort_values(by=station_config["columns"]["create_time"])
            station_dfs_dict[station_config["ids"][idx]] = matched_df

    step2_duration = time.time() - step2_start_time
    logging.info(f"步骤2(处理Excel数据)耗时: {step2_duration:.2f}秒")

    match_result = []

    # step 3 匹配逻辑
    step3_start_time = time.time()
    if station_dfs_dict:
        for station_id, df in station_dfs_dict.items():
            online_data = match.get_online_data([station_id], start_time, end_time)
            match_result.append(match.match_data_v1(df, online_data, station_config))
    else:
        if station_config.get("excel_get_time_range", False):
            time_col = station_config["columns"]["create_time"]
            station_data = station_data[station_data[time_col].between(start_time, end_time)]
        online_data = match.get_online_data(station_config['ids'], start_time, end_time)
        match_result.append(match.match_data_v1(station_data, online_data, station_config))

    step3_duration = time.time() - step3_start_time
    logging.info(f"步骤3(数据匹配)耗时: {step3_duration:.2f}秒")

    # 生成JSON结果
    step4_start_time = time.time()
    json_result = match.create_json_result_v2(match_result, station_config, start_time, end_time)
    step4_duration = time.time() - step4_start_time
    logging.info(f"步骤4(生成JSON结果)耗时: {step4_duration:.2f}秒")

    total_duration = time.time() - total_start_time
    logging.info(f"贵州科宇对账单处理总耗时: {total_duration:.2f}秒")

    return json_result
