import pandas as pd
import numpy as np
from datetime import datetime
import dask.dataframe as dd
from sklearn.cluster import DBSCAN
from haversine import haversine, Unit
import os


# 1. 读取原始数据（使用Dask处理大文件）
def load_data(file_path):
    """使用Dask读取大文件，避免内存溢出"""
    try:
        # 手动指定数据类型，解决dtype推断错误
        custom_dtypes = {
            'ERROR': 'float64',  # 原错误中显示为float64
            'LOCATION': 'object',  # 包含非整数值，使用object类型
            'SLOPE': 'float64'  # 原错误中显示为float64
        }

        df = dd.read_csv(file_path, dtype=custom_dtypes)
        print(f"成功加载数据，行数: {df.shape[0].compute()}, 列数: {df.shape[1]}")
        return df
    except Exception as e:
        print(f"数据加载错误: {e}")
        return None


# 2. 预处理：筛选CG数据并保存
def preprocess_data(df, output_path):
    """筛选云地闪数据并保存"""
    try:
        # 筛选CG数据
        cg_df = df[df['CG_IC'] == 'CG']
        print(f"筛选后CG数据行数: {cg_df.shape[0].compute()}")

        # 保存为新文件
        cg_df.to_csv(output_path, index=False)
        print(f"CG数据已保存至: {output_path}")
        return cg_df
    except Exception as e:
        print(f"数据预处理错误: {e}")
        return None


# 主预处理流程
def preprocess_main(input_file, output_file):
    """预处理主流程"""
    df = load_data(input_file)
    if df is not None:
        return preprocess_data(df, output_file)
    return None


# 示例调用
if __name__ == "__main__":
    input_file = "2018-1.csv"
    preprocessed_file = "2018_cg_lightning_data.csv"
    clustered_file = "2018_clustered_storms.csv"

    preprocess_main(input_file, preprocessed_file)