import pandas as pd
import numpy as np
import os
from datetime import timedelta, datetime
from sklearn.cluster import DBSCAN
from math import radians, sin, cos, sqrt, asin
from scipy.spatial import cKDTree
import time


def haversine_distance(lat1, lon1, lat2, lon2):
    """计算两点间的球面距离（单位：km）"""
    R = 6371  # 地球半径（km）
    lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])

    dlat = lat2 - lat1
    dlon = lon2 - lon1
    a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
    c = 2 * asin(sqrt(a))
    return R * c


def calculate_bearing(lat1, lon1, lat2, lon2):
    """计算从点1到点2的方位角（单位：度），返回值为从哪个方向来"""
    lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])

    dlon = lon2 - lon1
    X = cos(lat2) * sin(dlon)
    Y = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(dlon)
    bearing = np.arctan2(X, Y)
    bearing = np.degrees(bearing)
    bearing = (bearing + 360) % 360  # 转换为0-360度
    return (bearing + 180) % 360  # 返回从哪个方向来


def load_and_preprocess_data(file_path):
    """加载并预处理雷电数据"""
    print("加载数据中...")
    df = pd.read_csv(file_path)

    # 转换日期时间列
    print("转换日期时间格式...")
    df['DATETIME'] = pd.to_datetime(df['DATETIME'])

    # 按时间排序
    df.sort_values('DATETIME', inplace=True)

    # 重置索引
    df.reset_index(drop=True, inplace=True)

    return df


def lightning_clustering(df, temporal_threshold=2280, spatial_threshold=25):
    """
    基于KD树优化的闪电聚类算法

    参数:
    df: DataFrame，包含闪电数据
    temporal_threshold: 时间阈值（秒）
    spatial_threshold: 空间阈值（km）

    返回:
    clusters: 聚类结果DataFrame
    """
    print("开始闪电聚类...")
    start_time = time.time()

    # 提取经纬度和时间戳
    coords = df[['LATITUDE', 'LONGITUDE']].values
    timestamps = df['DATETIME'].values.astype(np.int64) // 10 ** 9  # 转换为秒

    # 初始化聚类标签
    cluster_labels = np.full(len(df), -1, dtype=np.int32)
    current_cluster_id = 0

    # 构建KD树用于快速空间查询（将距离阈值从km转换为度）
    # 注意：这种转换在小区域内是近似的，更精确的转换需要考虑纬度
    spatial_threshold_deg = spatial_threshold / 111.32  # 1度约等于111.32km
    tree = cKDTree(coords)

    # 已处理的点的集合
    processed = set()

    # 对每个数据点进行处理
    total_points = len(df)
    for i in range(total_points):
        if i in processed:
            continue

        # 开始新聚类
        current_cluster_id += 1
        cluster_labels[i] = current_cluster_id
        processed.add(i)

        # 当前聚类的点列表
        cluster_points = [i]

        # 处理当前聚类中的所有点
        while cluster_points:
            idx = cluster_points.pop(0)

            # 找出空间距离内的所有点
            spatial_neighbors = tree.query_ball_point(coords[idx], spatial_threshold_deg)

            # 在空间邻居中找出时间范围内且未处理的点
            for j in spatial_neighbors:
                if j not in processed and abs(timestamps[j] - timestamps[idx]) <= temporal_threshold:
                    cluster_labels[j] = current_cluster_id
                    processed.add(j)
                    cluster_points.append(j)

        # 打印进度
        if (i + 1) % 1000 == 0:
            elapsed_time = time.time() - start_time
            progress = (i + 1) / total_points * 100
            estimated_total_time = elapsed_time / progress * 100
            remaining_time = estimated_total_time - elapsed_time
            print(f"已处理 {i + 1}/{total_points} 条记录 ({progress:.2f}%) - "
                  f"耗时: {elapsed_time:.2f}秒 - 预计剩余时间: {remaining_time:.2f}秒")

    # 为数据添加聚类标签
    df['LIGHTNING_CLUSTER'] = cluster_labels

    # 计算每个闪电聚类的质心
    print("计算闪电聚类质心...")
    cluster_centroids = df.groupby('LIGHTNING_CLUSTER').agg({
        'LATITUDE': 'mean',
        'LONGITUDE': 'mean',
        'DATETIME': ['min', 'max']
    })
    cluster_centroids.columns = ['CENTROID_LAT', 'CENTROID_LON', 'START_TIME', 'END_TIME']
    cluster_centroids.reset_index(inplace=True)

    elapsed_time = time.time() - start_time
    print(f"闪电聚类完成! 共识别出 {current_cluster_id} 个聚类，耗时: {elapsed_time:.2f}秒")

    return df, cluster_centroids


def thunderstorm_grouping(cluster_centroids, t2_threshold=2520, d2_threshold=60):
    """
    基于闪电聚类质心的雷暴云团分组

    参数:
    cluster_centroids: DataFrame，包含闪电聚类质心信息
    t2_threshold: 时间阈值（秒）
    d2_threshold: 空间阈值（km）

    返回:
    thunderstorms: 雷暴云团DataFrame
    """
    print("开始雷暴云团分组...")
    start_time = time.time()

    # 提取质心坐标和时间
    coords = cluster_centroids[['CENTROID_LAT', 'CENTROID_LON']].values
    timestamps = cluster_centroids['START_TIME'].values.astype(np.int64) // 10 ** 9  # 转换为秒

    # 初始化雷暴云团ID
    thunderstorm_id = 0
    cluster_centroids['THUNDERSTORM_ID'] = -1  # 初始化为-1表示未分组

    # 构建KD树用于快速空间查询
    spatial_threshold_deg = d2_threshold / 111.32
    tree = cKDTree(coords)

    # 对每个聚类进行处理
    total_clusters = len(cluster_centroids)
    for i in range(total_clusters):
        if cluster_centroids.iloc[i]['THUNDERSTORM_ID'] == -1:
            # 创建新的雷暴云团
            thunderstorm_id += 1
            cluster_centroids.loc[i, 'THUNDERSTORM_ID'] = thunderstorm_id

            # 找出空间距离内的所有聚类
            spatial_neighbors = tree.query_ball_point(coords[i], spatial_threshold_deg)

            # 在空间邻居中找出时间范围内且未分组的聚类
            for j in spatial_neighbors:
                if j > i and cluster_centroids.iloc[j]['THUNDERSTORM_ID'] == -1:
                    # 计算时间差
                    time_diff = abs(timestamps[j] - timestamps[i])

                    # 如果在时间阈值内，则归为同一雷暴云团
                    if time_diff <= t2_threshold:
                        cluster_centroids.loc[j, 'THUNDERSTORM_ID'] = thunderstorm_id

        # 打印进度
        if (i + 1) % 100 == 0:
            elapsed_time = time.time() - start_time
            progress = (i + 1) / total_clusters * 100
            estimated_total_time = elapsed_time / progress * 100
            remaining_time = estimated_total_time - elapsed_time
            print(f"已处理 {i + 1}/{total_clusters} 个聚类 ({progress:.2f}%) - "
                  f"耗时: {elapsed_time:.2f}秒 - 预计剩余时间: {remaining_time:.2f}秒")

    elapsed_time = time.time() - start_time
    print(f"雷暴云团分组完成! 共识别出 {thunderstorm_id} 个雷暴云团，耗时: {elapsed_time:.2f}秒")

    return cluster_centroids


def calculate_thunderstorm_metrics(lightning_df, cluster_centroids, output_dir='results'):
    """
    计算雷暴云团的各种统计指标

    参数:
    lightning_df: DataFrame，包含闪电数据及聚类标签
    cluster_centroids: DataFrame，包含闪电聚类及雷暴云团标签

    返回:
    thunderstorm_metrics: 雷暴云团统计指标DataFrame
    """
    print("计算雷暴云团统计指标...")
    start_time = time.time()

    # 创建输出目录
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 合并闪电数据和聚类质心数据
    lightning_with_clusters = pd.merge(
        lightning_df,
        cluster_centroids[['LIGHTNING_CLUSTER', 'THUNDERSTORM_ID']],
        on='LIGHTNING_CLUSTER',
        how='left'
    )

    # 按雷暴云团ID分组计算统计指标
    thunderstorm_metrics = []

    # 获取所有雷暴云团ID
    thunderstorm_ids = lightning_with_clusters['THUNDERSTORM_ID'].dropna().unique()
    total_storms = len(thunderstorm_ids)

    # 对每个雷暴云团进行处理
    for i, thunderstorm_id in enumerate(thunderstorm_ids):
        group = lightning_with_clusters[lightning_with_clusters['THUNDERSTORM_ID'] == thunderstorm_id]

        # 1. 总闪电次数
        total_flashes = len(group)

        # 2. 雷暴起始时间
        initiation_time = group['DATETIME'].min()

        # 3. 雷暴结束时间
        cessation_time = group['DATETIME'].max()

        # 4. 雷暴持续时间
        duration = cessation_time - initiation_time
        duration_minutes = duration.total_seconds() / 60

        # 5. 每分钟闪电次数
        flashes_per_minute = total_flashes / duration_minutes if duration_minutes > 0 else 0

        # 6. 移动距离
        # 按时间排序
        sorted_group = group.sort_values('DATETIME')

        # 计算前10%和后10%的闪电
        ten_percent = max(1, int(len(sorted_group) * 0.1))
        first_ten_percent = sorted_group.head(ten_percent)
        last_ten_percent = sorted_group.tail(ten_percent)

        # 计算前10%和后10%的平均位置
        first_mean_lat = first_ten_percent['LATITUDE'].mean()
        first_mean_lon = first_ten_percent['LONGITUDE'].mean()
        last_mean_lat = last_ten_percent['LATITUDE'].mean()
        last_mean_lon = last_ten_percent['LONGITUDE'].mean()

        # 计算移动距离
        distance_travelled = haversine_distance(
            first_mean_lat, first_mean_lon,
            last_mean_lat, last_mean_lon
        )

        # 7. 雷暴移动方向
        bearing = calculate_bearing(
            first_mean_lat, first_mean_lon,
            last_mean_lat, last_mean_lon
        )

        # 8. 雷暴面积（简化计算，使用边界框面积）
        # 注意：完整实现需要使用核密度估计，这里简化处理
        lat_min = group['LATITUDE'].min()
        lat_max = group['LATITUDE'].max()
        lon_min = group['LONGITUDE'].min()
        lon_max = group['LONGITUDE'].max()

        # 转换为公里
        lat_diff = haversine_distance(lat_min, lon_min, lat_max, lon_min)
        lon_diff = haversine_distance(lat_min, lon_min, lat_min, lon_max)
        area = lat_diff * lon_diff

        # 9. 每平方公里闪电次数
        flashes_per_km2 = total_flashes / area if area > 0 else 0

        # 10. 雷暴移动速度
        # 调整持续时间（减少10%）
        adjusted_duration_hours = (duration.total_seconds() / 3600) * 0.9
        speed = distance_travelled / adjusted_duration_hours if adjusted_duration_hours > 0 else 0

        # 保存雷暴云团指标
        thunderstorm_metrics.append({
            'THUNDERSTORM_ID': thunderstorm_id,
            'TOTAL_FLASHES': total_flashes,
            'INITIATION_TIME': initiation_time,
            'CESSATION_TIME': cessation_time,
            'DURATION_MINUTES': duration_minutes,
            'FLASHES_PER_MINUTE': flashes_per_minute,
            'DISTANCE_TRAVELLED_KM': distance_travelled,
            'BEARING_DEGREES': bearing,
            'AREA_KM2': area,
            'FLASHES_PER_KM2': flashes_per_km2,
            'SPEED_KM_H': speed,
            'MIN_LONGITUDE': lon_min,
            'MAX_LONGITUDE': lon_max,
            'MIN_LATITUDE': lat_min,
            'MAX_LATITUDE': lat_max
        })

        # 打印进度
        if (i + 1) % 100 == 0:
            elapsed_time = time.time() - start_time
            progress = (i + 1) / total_storms * 100
            estimated_total_time = elapsed_time / progress * 100
            remaining_time = estimated_total_time - elapsed_time
            print(f"已计算 {i + 1}/{total_storms} 个雷暴云团指标 ({progress:.2f}%) - "
                  f"耗时: {elapsed_time:.2f}秒 - 预计剩余时间: {remaining_time:.2f}秒")

    # 转换为DataFrame
    thunderstorm_metrics = pd.DataFrame(thunderstorm_metrics)

    # 保存结果
    print("保存分析结果...")
    lightning_with_clusters.to_csv(f"{output_dir}/filtered_黑龙江2021-2023_lightning_with_clusters.csv", index=False)
    thunderstorm_metrics.to_csv(f"{output_dir}/filtered_黑龙江2021-2023_thunderstorm_metrics.csv", index=False)

    elapsed_time = time.time() - start_time
    print(f"雷暴云团指标计算完成! 共计算 {len(thunderstorm_metrics)} 个指标，耗时: {elapsed_time:.2f}秒")

    return thunderstorm_metrics


def main():
    # 配置参数
    file_path = "results/lightning-hulunbeier-daxinganling/filtered_黑龙江2021-2023.csv"  # 替换为实际数据文件路径
    output_dir = "results\\Galanaki_1"

    print(f"===== 开始雷电数据聚类分析 =====")
    start_time = time.time()

    # 加载和预处理数据
    df = load_and_preprocess_data(file_path)
    elapsed_time = time.time() - start_time
    print(f"数据加载和预处理完成! 共 {len(df)} 条记录，耗时: {elapsed_time:.2f}秒")

    # 闪电聚类
    lightning_df, cluster_centroids = lightning_clustering(df)
    elapsed_time = time.time() - start_time
    print(f"闪电聚类阶段完成! 总耗时: {elapsed_time:.2f}秒")

    # 雷暴云团分组
    cluster_centroids = thunderstorm_grouping(cluster_centroids)
    elapsed_time = time.time() - start_time
    print(f"雷暴云团分组阶段完成! 总耗时: {elapsed_time:.2f}秒")

    # 计算雷暴云团指标
    thunderstorm_metrics = calculate_thunderstorm_metrics(lightning_df, cluster_centroids, output_dir)
    elapsed_time = time.time() - start_time
    print(f"雷暴云团指标计算阶段完成! 总耗时: {elapsed_time:.2f}秒")

    print("===== 分析完成! =====")
    print(f"结果已保存至 {output_dir} 目录")
    print(f"总耗时: {elapsed_time:.2f}秒")

    # 打印一些统计信息
    print(f"\n共分析 {len(df)} 条闪电数据")
    print(f"识别出 {len(cluster_centroids['LIGHTNING_CLUSTER'].unique())} 个闪电聚类")
    print(f"识别出 {len(thunderstorm_metrics)} 个雷暴云团")


if __name__ == "__main__":
    main()