import os
import pandas as pd
import numpy as np
import time
import logging
from pathlib import Path
from datetime import datetime, timedelta
import multiprocessing as mp
from functools import partial
from math import radians, sin, cos, sqrt, atan2
import itertools

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("fire_lightning_matching.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 地球半径（公里）
EARTH_RADIUS = 6371.0


def check_paths():
    """检查并创建必要的路径"""
    fire_file = Path("data/fire/2017-2024_indexed-3.xlsx")
    lightning_dir = Path("results/lightning-for-year-CG")
    output_dir = Path("results/fire_lightning-matching")

    if not fire_file.exists():
        logger.error(f"火点数据文件不存在: {fire_file}")
        return None, None, None

    if not lightning_dir.exists():
        logger.error(f"闪电数据目录不存在: {lightning_dir}")
        return None, None, None

    output_dir.mkdir(parents=True, exist_ok=True)
    logger.info(f"输出目录已创建: {output_dir}")

    return fire_file, lightning_dir, output_dir


def haversine_distance(lon1, lat1, lon2, lat2):
    """计算两点间的球面距离（公里），优化精度"""
    # 将经纬度转换为弧度，处理边界值
    lon1, lat1, lon2, lat2 = map(
        lambda x: max(min(radians(x), radians(90)), radians(-90))
        if 'lat' in str(x).lower() else radians(x),
        [lon1, lat1, lon2, lat2]
    )

    dlon = lon2 - lon1
    dlat = lat2 - lat1
    a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
    c = 2 * atan2(sqrt(a), sqrt(1 - a))
    distance = EARTH_RADIUS * c
    return max(0, distance)  # 确保距离非负


def load_fire_data(fire_file):
    """加载火点数据，添加数据验证"""
    start_time = time.time()
    logger.info(f"开始加载火点数据: {fire_file}")

    try:
        df = pd.read_excel(fire_file)

        # 数据验证
        required_columns = ['longitude', 'latitude', 'year', 'month', 'day', 'hour']
        missing = [col for col in required_columns if col not in df.columns]
        if missing:
            logger.error(f"缺失必要列: {missing}")
            return None

        # 转换日期时间，处理异常值
        df['date'] = pd.to_datetime(df[['year', 'month', 'day', 'hour']], errors='coerce')
        df = df.dropna(subset=['date'])  # 移除日期无效的记录

        # 筛选2018-2023年，且日期有效的数据
        df = df[(df['year'] >= 2018) & (df['year'] <= 2023) & (df['date'].dt.year.between(2018, 2023))]

        # 创建唯一ID，处理空数据
        if df.empty:
            logger.warning("火点数据为空")
            return {}

        df['fire_id'] = range(1, len(df) + 1)
        fire_by_year = {year: df[df['year'] == year] for year in df['year'].unique()}

        logger.info(f"加载完成: {len(df)}条记录，{len(fire_by_year)}个年份")
        return fire_by_year
    except Exception as e:
        logger.error(f"加载失败: {str(e)}")
        return None


def load_lightning_data(lightning_dir):
    """加载闪电数据，强化时间格式验证"""
    start_time = time.time()
    logger.info(f"开始加载闪电数据: {lightning_dir}")

    all_data = {}
    csv_files = list(lightning_dir.glob("*.csv"))
    logger.info(f"找到 {len(csv_files)} 个CSV文件")

    for file in csv_files:
        try:
            year = os.path.basename(file).split('_')[1]
            if year in ['2017', '2024']:
                logger.info(f"跳过{year}年数据")
                continue

            df = pd.read_csv(file)
            if 'DATETIME' not in df.columns:
                logger.warning(f"文件{file}缺少DATETIME列")
                continue

            # 严格验证时间格式
            df['DATETIME'] = pd.to_datetime(df['DATETIME'], errors='coerce')
            df = df.dropna(subset=['DATETIME'])  # 移除时间无效的记录

            if df.empty:
                logger.warning(f"文件{file}无有效数据")
                continue

            # 提取时间组件
            df['YEAR'] = df['DATETIME'].dt.year
            df['MONTH'] = df['DATETIME'].dt.month
            df['DAY'] = df['DATETIME'].dt.day
            df['HOUR'] = df['DATETIME'].dt.hour

            # 过滤2018-2023年数据
            df = df[df['YEAR'].between(2018, 2023)]
            if df.empty:
                logger.info(f"{year}年无有效数据")
                continue

            # 创建唯一ID
            df['lightning_id'] = range(1, len(df) + 1)

            # 按月分组
            month_groups = {month: grp for month, grp in df.groupby('MONTH') if not grp.empty}
            if month_groups:
                all_data[year] = month_groups
                logger.info(f"加载{year}年: {len(df)}条记录，{len(month_groups)}个月份")
        except Exception as e:
            logger.error(f"处理{file}失败: {str(e)}")

    elapsed = time.time() - start_time
    logger.info(f"加载完成，耗时{elapsed:.2f}秒，{len(all_data)}个年份数据")
    return all_data


def calculate_spatiotemporal_index(time_diff_days, distance_km, t_max, s_max):
    """计算时空指数A，严格处理边界条件"""
    if time_diff_days < 0:
        return 0  # 闪电在火灾之后，指数为0

    # 处理无穷大阈值
    time_ratio = time_diff_days / t_max if t_max != float('inf') else 1.0
    distance_ratio = distance_km / s_max if s_max != float('inf') else 1.0

    # 确保比例在[0,1]范围内
    time_ratio = max(0, min(time_ratio, 1))
    distance_ratio = max(0, min(distance_ratio, 1))

    return max(0, (1 - time_ratio) * (1 - distance_ratio))  # 确保指数非负


def match_fire_lightning_chunk(fire_chunk, lightning_data, t_max, s_max):
    """匹配数据块，严格筛选闪电在火灾之前的事件"""
    matched_fires = []
    matched_lightnings = []

    for _, fire in fire_chunk.iterrows():
        fire_year, fire_month = str(fire['year']), fire['month']
        fire_date = fire['date']

        # 检查年份和月份数据有效性
        if fire_year not in lightning_data or fire_month not in lightning_data[fire_year]:
            continue

        month_lightnings = lightning_data[fire_year][fire_month].copy()
        if month_lightnings.empty:
            continue

        # 重置索引并筛选闪电在火灾之前的事件
        month_lightnings = month_lightnings.reset_index(drop=True)
        valid_lightnings = month_lightnings[month_lightnings['DATETIME'] < fire_date]

        if valid_lightnings.empty:
            continue

        # 计算时间差（天），仅保留有效闪电
        time_diffs = (fire_date - valid_lightnings['DATETIME']).dt.total_seconds() / (24 * 3600)
        time_diffs = time_diffs.reset_index(drop=True)

        # 计算距离
        distances = [
            haversine_distance(
                fire['longitude'], fire['latitude'],
                lightning['LONGITUDE'], lightning['LATITUDE']
            ) for _, lightning in valid_lightnings.iterrows()
        ]

        # 计算时空指数
        a_values = [
            calculate_spatiotemporal_index(td, d, t_max, s_max)
            for td, d in zip(time_diffs, distances)
        ]

        if a_values:
            max_a_index = np.argmax(a_values)
            max_a = a_values[max_a_index]

            if max_a > 0:  # 仅当指数>0时视为有效匹配
                matched_lt = valid_lightnings.iloc[max_a_index].copy()
                matched_lt['fire_id'] = fire['fire_id']
                matched_lt['time_diff_days'] = time_diffs.iloc[max_a_index]
                matched_lt['distance_km'] = distances[max_a_index]
                matched_lt['spatiotemporal_index'] = max_a

                matched_fires.append(fire['fire_id'])
                matched_lightnings.append(matched_lt)

    return list(set(matched_fires)), pd.DataFrame(matched_lightnings) if matched_lightnings else pd.DataFrame()


def match_fire_lightning(fire_data, lightning_data, t_max, s_max, num_processes=4):
    """匹配主函数，优化多进程参数"""
    start_time = time.time()
    logger.info(f"开始匹配: T={t_max}天, S={s_max}公里")

    if not fire_data or not lightning_data:
        logger.error("数据为空，无法匹配")
        return {'total_fires': 0, 'matched_fires_count': 0, 'match_rate': 0}, [], pd.DataFrame()

    # 合并所有火点数据
    all_fire_data = pd.concat(fire_data.values(), ignore_index=True)
    if all_fire_data.empty:
        logger.warning("无有效火点数据")
        return {'total_fires': 0, 'matched_fires_count': 0, 'match_rate': 0}, [], pd.DataFrame()

    # 数据分块，优化块大小
    chunk_size = max(100, len(all_fire_data) // max(num_processes, 1))
    fire_chunks = [all_fire_data[i:i + chunk_size] for i in range(0, len(all_fire_data), chunk_size)]
    logger.info(f"分{len(fire_chunks)}块，每块约{chunk_size}条记录")

    # 多进程匹配
    with mp.Pool(processes=num_processes) as pool:
        match_func = partial(
            match_fire_lightning_chunk,
            lightning_data=lightning_data,
            t_max=t_max,
            s_max=s_max
        )
        results = pool.map(match_func, fire_chunks)

    # 合并结果
    all_matched_fires = []
    all_matched_lightnings = []
    for fires, lts in results:
        all_matched_fires.extend(fires)
        if not lts.empty:
            all_matched_lightnings.append(lts)

    # 统计结果
    total_fires = len(all_fire_data)
    matched_count = len(set(all_matched_fires))
    match_rate = matched_count / total_fires if total_fires > 0 else 0

    # 合并闪电数据
    matched_lt_data = pd.concat(all_matched_lightnings, ignore_index=True) if all_matched_lightnings else pd.DataFrame()

    logger.info(
        f"匹配完成: 总火灾{total_fires}，匹配{matched_count}，匹配率{match_rate:.2%}，耗时{time.time() - start_time:.2f}秒")
    return {
        'time_threshold': t_max,
        'distance_threshold': s_max,
        'total_fires': total_fires,
        'matched_fires_count': matched_count,
        'match_rate': match_rate
    }, set(all_matched_fires), matched_lt_data


def main():
    """主函数，优化执行流程"""
    overall_start = time.time()
    logger.info("=== 开始时空匹配程序 ===")

    # 检查路径
    fire_file, lightning_dir, output_dir = check_paths()
    if not all([fire_file, lightning_dir, output_dir]):
        logger.error("路径检查失败，程序终止")
        return

    # 加载数据
    fire_data = load_fire_data(fire_file)
    if not fire_data:
        logger.error("火点数据加载失败")
        return

    lightning_data = load_lightning_data(lightning_dir)
    if not lightning_data:
        logger.error("闪电数据加载失败")
        return

    # 定义阈值组合
    time_thresholds = [i for i in range(1,15)]
    distance_thresholds = [i for i in range(1,51)]
    all_stats = []

    # 执行匹配
    for t, s in itertools.product(time_thresholds, distance_thresholds):
        try:
            logger.info(f"开始处理阈值组合: T={t}天, S={s}公里")
            stats, _, matched_lt = match_fire_lightning(fire_data, lightning_data, t, s)
            all_stats.append(stats)

            # 保存结果
            if not matched_lt.empty:
                t_label = f"{t}d" if t != float('inf') else "inf"
                s_label = f"{s}km" if s != float('inf') else "inf"
                lt_file = output_dir / f"maxa_matched_lightnings_{t_label}_{s_label}.csv"
                matched_lt.to_csv(lt_file, index=False)
                logger.info(f"保存{len(matched_lt)}条匹配闪电到{lt_file}")

        except Exception as e:
            logger.error(f"处理阈值{t, s}失败: {str(e)}")

    # 保存统计结果
    if all_stats:
        stats_df = pd.DataFrame(all_stats)
        stats_file = output_dir / "maxa_matching_statistics.csv"
        stats_df.to_csv(stats_file, index=False)
        logger.info(f"保存统计结果到{stats_file}")

    # 输出总结
    total_time = time.time() - overall_start
    logger.info(f"=== 程序完成，总耗时{total_time / 60:.2f}分钟 ===")


if __name__ == "__main__":
    main()