#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
干旱指标计算模块

本模块实现了两个干旱指标的计算：
1. 长周期旱涝急转指数 (LDFAI)
2. 短周期旱涝急转指数 (DWAAI)

作者: AI Assistant
日期: 2024
"""
import os
import glob
import argparse

import numpy as np
import pandas as pd
import xarray as xr

from datetime import datetime
from typing import Dict, List, Tuple, Optional, Set

# 导入DailyParser类用于解析ERA5日降水数据
try:
    from parse_era5_data import DailyParser
except ImportError:
    print("警告: 无法导入DailyParser，将使用简化的日期处理方法")
    DailyParser = None

# 定义缓存和输出目录
CACHE_DIR = "../cache_data"
OUTPUT_DIR = "../output_data"



# 目标北方省份列表
TARGET_PROVINCES = [
    '黑龙江', '新疆', '山西', '宁夏', '山东', '河南', '吉林', '辽宁',
    '天津', '青海', '陕西', '内蒙古', '河北', '北京', '甘肃'
]

# 省份边界数据（从 province_bounds_table.txt 读取）
PROVINCE_BOUNDS = {
    '黑龙江': {'min_lon': 121.188532, 'min_lat': 43.419300, 'max_lon': 134.981033, 'max_lat': 53.513562},
    '新疆': {'min_lon': 73.530848, 'min_lat': 34.396244, 'max_lon': 96.375838, 'max_lat': 49.156872},
    '山西': {'min_lon': 110.281372, 'min_lat': 34.572533, 'max_lon': 114.523865, 'max_lat': 40.784574},
    '宁夏': {'min_lon': 104.268991, 'min_lat': 35.240039, 'max_lon': 107.664898, 'max_lat': 39.370838},
    '山东': {'min_lon': 114.840635, 'min_lat': 34.392154, 'max_lon': 122.675788, 'max_lat': 38.414819},
    '河南': {'min_lon': 110.352192, 'min_lat': 31.391876, 'max_lon': 116.630567, 'max_lat': 36.356128},
    '吉林': {'min_lon': 121.632720, 'min_lat': 40.861617, 'max_lon': 131.230238, 'max_lat': 46.285084},
    '辽宁': {'min_lon': 118.801131, 'min_lat': 38.724161, 'max_lon': 125.716590, 'max_lat': 43.465273},
    '天津': {'min_lon': 116.668669, 'min_lat': 38.571271, 'max_lon': 118.019208, 'max_lat': 40.237236},
    '青海': {'min_lon': 89.387704, 'min_lat': 31.623272, 'max_lon': 103.021554, 'max_lat': 39.208221},
    '陕西': {'min_lon': 105.502845, 'min_lat': 31.699524, 'max_lon': 111.274666, 'max_lat': 39.612638},
    '内蒙古': {'min_lon': 97.150654, 'min_lat': 37.388945, 'max_lon': 125.999845, 'max_lat': 53.296875},
    '河北': {'min_lon': 113.457664, 'min_lat': 36.070422, 'max_lon': 119.816951, 'max_lat': 42.589842},
    '北京': {'min_lon': 115.416166, 'min_lat': 39.440279, 'max_lon': 117.497682, 'max_lat': 41.056872},
    '甘肃': {'min_lon': 92.361523, 'min_lat': 32.638758, 'max_lon': 108.700159, 'max_lat': 42.792213}
}

# 省份网格点数据目录
GRID_POINTS_DIR = "../province_grid_points"


class DroughtIndicesCalculator:
    """
    干旱指标计算器类
    """

    def __init__(self, data_path: str = None, target_provinces: List[str] = None):
        """
        初始化计算器
        
        Args:
            data_path: ERA5数据文件路径
            target_provinces: 目标省份列表，默认为None，表示使用全局TARGET_PROVINCES
        """
        self.data_path = data_path
        self.precipitation_data = None
        self.parser = None
        self.main_var_name = "P"
        self.lat_dim, self.lon_dim = None, None
        self.target_provinces = target_provinces if target_provinces else TARGET_PROVINCES

    def load_era5_data(self, year: int) -> xr.Dataset:
        """
        加载指定年份的ERA5降水数据
        
        Args:
            year: 年份
            
        Returns:
            xarray.Dataset: 降水数据
        """
        if self.data_path is None:
            # 使用默认路径
            file_path = f"Daily_ERA5(1960-2024)/ERA5.P.daily.{year}.nc"
        else:
            file_path = f"{self.data_path}/ERA5.P.daily.{year}.nc"

        try:
            # 使用DailyParser加载数据
            ds = xr.open_dataset(file_path)
            self.parser = DailyParser(ds, file_path)
            self.precipitation_data = ds
            return ds
        except FileNotFoundError:
            raise FileNotFoundError(f"找不到文件: {file_path}")

    def extract_province_data(self, ds: xr.Dataset, province: str) -> xr.Dataset:
        """
        提取指定省份的数据
        
        Args:
            ds: 原始数据集
            province: 省份名称
            
        Returns:
            xarray.Dataset: 省份数据
        """
        # 检查省份是否在支持的列表中
        if province not in self.target_provinces:
            raise ValueError(f"不支持的省份: {province}，支持的省份有: {', '.join(self.target_provinces)}")
        # 尝试使用精确的省份网格点数据
        # 加载省份网格点数据
        grid_points = self.load_province_grid_points(province)
        if grid_points is not None and not grid_points.empty:
            print(f"使用精确网格点数据提取{province}省份数据，共{len(grid_points)}个点位")
            # 获取空间维度名称
            lat_dim, lon_dim = self.get_spatial_dimensions(ds)
            # 获取所有网格点的经纬度列表
            lons = grid_points['经度'].tolist()
            lats = grid_points['纬度'].tolist()
            # 创建布尔掩码，标记所有选中的网格点
            mask = xr.DataArray(
                np.zeros((len(ds[lat_dim]), len(ds[lon_dim])), dtype=bool),
                dims=(lat_dim, lon_dim),
                coords={lat_dim: ds[lat_dim], lon_dim: ds[lon_dim]}
            )
            # 标记所有选中的点
            for lon, lat in zip(lons, lats):
                # 找到最近的网格点索引
                lat_idx = abs(ds[lat_dim] - lat).argmin()
                lon_idx = abs(ds[lon_dim] - lon).argmin()
                # 标记该点
                mask[lat_idx, lon_idx] = True
            # 使用掩码选择数据，保留所有维度
            province_data = ds.where(mask, drop=True)
            # # 打印实际点位
            # for i, (lat, lon) in enumerate(zip(province_data[lat_dim].values, province_data[lon_dim].values)):
            #     print(f"实际点位{i+1}: 经度={lon:.4f}, 纬度={lat:.4f}")
            return province_data
        else:
            # 回退方法：使用省份边界框提取数据
            print(f"使用边界框方法提取{province}省份数据")
            # 获取省份边界
            if province not in PROVINCE_BOUNDS:
                raise ValueError(f"找不到省份边界数据: {province}")

            bounds = PROVINCE_BOUNDS[province]
            min_lon, max_lon = bounds['min_lon'], bounds['max_lon']
            min_lat, max_lat = bounds['min_lat'], bounds['max_lat']

            # 获取空间维度名称
            lat_dim, lon_dim = self.get_spatial_dimensions(ds)

            # 使用边界框提取数据
            province_data = ds.sel(
                {lon_dim: slice(min_lon, max_lon), lat_dim: slice(min_lat, max_lat)}
            )
            return province_data

    def load_province_grid_points(self, province: str) -> pd.DataFrame:
        """
        加载省份网格点数据
        
        Args:
            province: 省份名称
            
        Returns:
            pandas.DataFrame: 包含经纬度点位的数据框，如果加载失败则返回None
        """
        try:
            # 尝试加载单个省份的网格点文件
            file_path = os.path.join(GRID_POINTS_DIR, f"{province}_grid_points.csv")
            if os.path.exists(file_path):
                df = pd.read_csv(file_path)
                print(f"成功加载{province}网格点数据，共{len(df)}个点位")
                return df

            # 如果单个省份文件不存在，尝试从汇总文件中提取
            all_provinces_file = os.path.join(GRID_POINTS_DIR, "all_provinces_grid_points.csv")
            if os.path.exists(all_provinces_file):
                all_df = pd.read_csv(all_provinces_file)
                province_df = all_df[all_df['省份'] == province]

                if not province_df.empty:
                    print(f"从汇总文件中提取{province}网格点数据，共{len(province_df)}个点位")
                    return province_df

            print(f"找不到{province}的网格点数据文件")
            return None

        except Exception as e:
            print(f"加载{province}网格点数据时出错: {e}")
            return None

    def get_spatial_dimensions(self, ds: xr.Dataset) -> Tuple[str, str]:
        """
        获取数据集的空间维度名称
        
        Args:
            ds: 数据集
            
        Returns:
            Tuple[str, str]: 纬度和经度维度名称
        """
        if self.lat_dim is not None and self.lon_dim is not None:
            return self.lat_dim, self.lon_dim
            # 尝试常见的维度名称
        lat_candidates = ['lat', 'latitude', 'y']
        lon_candidates = ['lon', 'longitude', 'x']

        lat_dim = None
        for lat_name in lat_candidates:
            if lat_name in ds.dims:
                lat_dim = lat_name
                break

        lon_dim = None
        for lon_name in lon_candidates:
            if lon_name in ds.dims:
                lon_dim = lon_name
                break

        if lat_dim is None or lon_dim is None:
            raise ValueError(f"无法识别数据集的空间维度，可用维度: {list(ds.dims)}")
        self.lat_dim, self.lon_dim = lat_dim, lon_dim
        return lat_dim, lon_dim

    def save_results(self, results: Dict, output_file: str = "drought_indices_results.csv"):
        """
        保存计算结果到CSV文件
        
        Args:
            results: 计算结果字典
            output_file: 输出文件名
        """
        # 转换为DataFrame
        df_data = []
        for province, indices in results.items():
            row = {'Province': province}
            row.update(indices)
            df_data.append(row)

        df = pd.DataFrame(df_data)
        df.to_csv(output_file, index=False, encoding='utf-8-sig')
        print(f"结果已保存到: {output_file}")
        
    def _collect_all_regions_data(self, start_year: int, end_year: int, months: List[int], months_str: str, cache_file: str) -> Tuple[Dict, Set]:
        """
        收集所有区域指定月份的降水数据并缓存
        
        Args:
            start_year: 起始年份
            end_year: 结束年份
            months: 要计算的月份列表
            months_str: 月份字符串表示，如"5-6月"
            cache_file: 缓存文件路径
            
        Returns:
            Tuple[Dict, Set]: 包含所有省份数据的字典和所有年份的集合
        """
        print(f"开始收集{start_year}-{end_year}年所有区域{months_str}数据...")
        
        # 初始化数据结构
        province_data = {}
        all_years = set()
        
        # 初始化每个省份的数据结构
        for province in self.target_provinces:
            province_data[province] = {}
        
        # 收集所有年份所有区域的数据
        for year in range(start_year, end_year + 1):
            print(f"处理 {year}年所有区域数据...")
            # 加载数据
            ds = self.load_era5_data(year)

            # 遍历所有省份
            for province in self.target_provinces:

                # 提取省份数据
                province_data_year = self.extract_province_data(ds, province)

                # 获取经纬度信息
                lat_dim, lon_dim = self.get_spatial_dimensions(province_data_year)
                # 可以优化
                grid_points = []
                for lat in province_data_year[lat_dim].values:
                    for lon in province_data_year[lon_dim].values:
                        grid_points.append((lat, lon))

                # 获取原始降水数据（不进行空间平均）
                precip_data = province_data_year.P

                # 获取bday维度的值
                bday_values = precip_data.bday.values

                # 创建月份信息
                if self.parser:
                    # 使用parser将bday转换为日期
                    dates = [self.parser.format_date(bday) for bday in bday_values]
                    data_months = [int(date.split('-')[1]) for date in dates]  # 提取月份
                else:
                    # 如果没有parser，使用简化方法估计月份
                    data_months = [min(12, max(1, int(bday / 30) + 1)) for bday in bday_values]

                # 提取指定月份的降水量数据
                month_indices = []
                for month in months:
                    month_indices.extend([i for i, m in enumerate(data_months) if m == month])

                # 获取指定月份的所有网格点数据
                month_precip_data = precip_data.isel(bday=month_indices)

                # 存储每个网格点的降水数据
                grid_point_data = []
                for i, (lat, lon) in enumerate(grid_points):
                    # 找到对应的网格点索引
                    lat_idx = np.argmin(np.abs(month_precip_data[lat_dim].values - lat))
                    lon_idx = np.argmin(np.abs(month_precip_data[lon_dim].values - lon))

                    # 获取该网格点的所有指定月份数据
                    point_data = month_precip_data.isel({lat_dim: lat_idx, lon_dim: lon_idx}).values.tolist()
                    grid_point_data.append({
                        'lat': lat,
                        'lon': lon,
                        'precip_values': point_data
                    })

                province_data[province][year] = {
                    "grid_point_data": grid_point_data,
                    "grid_points": grid_points,
                }
                if self.parser:
                    # 使用parser将bday转换为日期
                    date_info = [self.parser.format_date(bday_values[idx]) for idx in month_indices]
                    province_data[province][year]['date_values'] = date_info
                all_years.add(year)

        # 保存数据到缓存文件
        try:
            cache_data = {
                'province_data': province_data,
                'all_years': all_years
            }
            np.save(cache_file, cache_data)
            print(f"已将{months_str}数据保存到缓存文件: {cache_file}")
        except Exception as e:
            print(f"保存缓存数据失败: {e}")
            
        return province_data, all_years
    
    def calculate_standardized_precipitation_all_regions(self, months: List[int] = [5, 6]) -> Dict[str, float]:
        """
        计算所有区域指定月份的降雨标准差数据
        
        Args:
            months: 要计算的月份列表，默认为[5, 6]表示5-6月
            
        Returns:
            Dict: 包含所有区域指定月份降雨标准差数据的字典
        """
        start_year = 1960
        end_year = 2024
        
        # 构建月份字符串，例如"5-6月"
        months_str = "-".join([str(m) for m in months]) + "月"
        
        # 存储所有区域所有年份的降水数据
        # 结构: province_data[province][year] = {"precip": [...], "grid_points": [...], "mean": float}
        province_data = {}
        all_years = set()
        
        # 检查是否已有缓存数据
        cache_filename = f"cache_all_regions_{months_str}_{start_year}_{end_year}.npy"
        cache_file = os.path.join(CACHE_DIR, cache_filename)
        
        if os.path.exists(cache_file):
            try:
                print(f"从缓存文件加载{months_str}数据...")
                cached_data = np.load(cache_file, allow_pickle=True).item()
                province_data = cached_data['province_data']
                all_years = cached_data['all_years']
                print(f"成功从缓存加载{len(all_years)}年的数据")
            except Exception as e:
                print(f"加载缓存数据失败: {e}，将重新计算")
                province_data = {}
                all_years = set()
        
        # 如果没有缓存数据，则计算
        if not province_data:
            province_data, all_years = self._collect_all_regions_data(start_year, end_year, months, months_str, cache_file)
        
        # 将年份集合转换为排序列表
        years_list = sorted(list(all_years))
        
        # 收集所有网格点的所有降水数据，用于计算全局统计值
        all_precip_values = []
        yearly_means = {}
        
        for year in years_list:
            year_all_values = []
            for province in self.target_provinces:
                if year not in province_data[province]:
                    continue
                grid_point_data = province_data[province][year].get('grid_point_data', [])
                for point_data in grid_point_data:
                    precip_values = point_data.get('precip_values', [])
                    # 过滤掉NaN和无效值
                    # valid_values = [v for v in precip_values if not np.isnan(v) and np.isfinite(v)]
                    point_sum = np.sum(precip_values)
                    if np.isnan(point_sum) or not np.isfinite(point_sum):
                        continue
                    year_all_values.append(point_sum)
                    all_precip_values.append(point_sum)
            
            # 计算该年所有网格点的平均值
            if year_all_values:
                yearly_means[year] = np.mean(year_all_values)
            else:
                yearly_means[year] = np.nan
        
        # 计算所有数据的全局平均值和标准差
        if all_precip_values:
            mean_precip = np.mean(all_precip_values)
            std_precip = np.std(all_precip_values, ddof=1)
        else:
            mean_precip = np.nan
            std_precip = np.nan
        
        print(f"所有区域 {months_str} 降水量平均值: {mean_precip:.4f}, 标准差: {std_precip:.4f}")
        print(f"总数据点数量: {len(all_precip_values)}")
        # print(f"有效数据点数量: {len([v for v in all_precip_values if not np.isnan(v)])}")
        
        # 计算每年的标准化降水量（基于年平均值）
        results = {}
        for year in years_list:
            if not np.isnan(yearly_means[year]) and not np.isnan(std_precip) and std_precip > 0:
                standardized_precip = (yearly_means[year] - mean_precip) / std_precip
                results[year] = {
                    f'R{months_str}': standardized_precip,
                    f'Precip_{months_str}': yearly_means[year]
                }
        
        # 返回结果字典，包含平均值、标准差、每年的标准化降水量和原始省份数据
        return {
            'mean': mean_precip,
            'std': std_precip,
            'yearly_data': results,
            'province_data': province_data
        }

    def calculate_ldfai_all_years(self, results_1: Dict, results_2: Dict) -> Dict[int, float]:
        """
        计算所有年份的长周期旱涝急转指数 (LDFAI)
        
        Args:
            results_1: 第一个时间段的计算结果字典
            results_2: 第二个时间段的计算结果字典
            
        Returns:
            Dict[int, float]: 包含所有年份LDFAI指数的字典
        """
        # 获取所有年份
        all_years = sorted(set(
            list(results_1['yearly_data'].keys()) + 
            list(results_2['yearly_data'].keys())
        ))
        
        # 从结果字典中获取R键名
        r_key_1 = None
        r_key_2 = None
        
        # 获取第一个年份的键来提取R键名
        first_year_1 = next(iter(results_1['yearly_data'])) if results_1['yearly_data'] else None
        first_year_2 = next(iter(results_2['yearly_data'])) if results_2['yearly_data'] else None
        
        if first_year_1:
            r_key_1 = next((k for k in results_1['yearly_data'][first_year_1].keys() if k.startswith('R')), None)
        
        if first_year_2:
            r_key_2 = next((k for k in results_2['yearly_data'][first_year_2].keys() if k.startswith('R')), None)
        
        # 计算每年的LDFAI
        ldfai_results = {}
        for year in all_years:
            # 获取两个时间段的标准化降水指数
            r1 = 0
            r2 = 0
            
            if year in results_1['yearly_data'] and r_key_1:
                r1 = results_1['yearly_data'][year].get(r_key_1, 0)
            
            if year in results_2['yearly_data'] and r_key_2:
                r2 = results_2['yearly_data'][year].get(r_key_2, 0)
            
            # 计算LDFAI
            if abs(r1) + abs(r2) > 0:
                ldfai = (r2 - r1) * (abs(r1) + abs(r2)) * (1.8 ** (-(r1 + r2)))
            else:
                ldfai = 0
            
            ldfai_results[year] = ldfai
        
        return ldfai_results
    
    def save_all_regions_precipitation_and_ldfai_to_excel(self, results_1: Dict, results_2: Dict, ldfai_results: Dict[int, float], output_file: str):
        """
        将所有区域的标准化降水量和LDFAI结果保存到Excel文件
        
        Args:
            results_1: 第一个时间段的计算结果字典
            results_2: 第二个时间段的计算结果字典
            ldfai_results: LDFAI结果字典
            output_file: 输出文件名
        """
        # 从结果字典中获取月份字符串
        # 获取第一个年份的键来提取月份信息
        first_year_1 = next(iter(results_1['yearly_data'])) if results_1['yearly_data'] else None
        first_year_2 = next(iter(results_2['yearly_data'])) if results_2['yearly_data'] else None
        
        # 提取月份字符串
        months_str_1 = None
        months_str_2 = None
        
        if first_year_1:
            # 从结果字典中找到以'R'开头的键，该键包含月份信息
            for key in results_1['yearly_data'][first_year_1].keys():
                if key.startswith('R'):
                    months_str_1 = key[1:]  # 去掉'R'前缀
                    break
        
        if first_year_2:
            for key in results_2['yearly_data'][first_year_2].keys():
                if key.startswith('R'):
                    months_str_2 = key[1:]  # 去掉'R'前缀
                    break
        
        # 如果无法从结果中提取月份信息，使用默认值
        months_str_1 = months_str_1 or '未知月份1'
        months_str_2 = months_str_2 or '未知月份2'
        
        # 创建一个Excel写入器
        writer = pd.ExcelWriter(output_file, engine='openpyxl')
        
        # 保存统计信息
        stats_data = [
            {'月份': months_str_1, '平均值': results_1['mean'], '标准差': results_1['std']},
            {'月份': months_str_2, '平均值': results_2['mean'], '标准差': results_2['std']}
        ]
        stats_df = pd.DataFrame(stats_data)
        stats_df.to_excel(writer, sheet_name='统计信息', index=False)
        
        # 获取所有年份
        all_years = sorted(set(
            list(results_1['yearly_data'].keys()) + 
            list(results_2['yearly_data'].keys())
        ))
        
        # 创建年度数据DataFrame
        yearly_data = []
        for year in all_years:
            row = {'年份': year}
            # 添加第一个时间段数据
            r_key_1 = next((k for k in results_1['yearly_data'][year].keys() if k.startswith('R')), None)
            precip_key_1 = next((k for k in results_1['yearly_data'][year].keys() if k.startswith('Precip_')), None)

            if r_key_1 and precip_key_1:
                row[f'R {months_str_1}'] = results_1['yearly_data'][year][r_key_1]
                row[f'Precip Annual {months_str_1}'] = results_1['yearly_data'][year][precip_key_1]
            else:
                row[f'R {months_str_1}'] = np.nan
                row[f'Precip Annual {months_str_1}'] = np.nan

            # 添加第二个时间段数据
            r_key_2 = next((k for k in results_2['yearly_data'][year].keys() if k.startswith('R')), None)
            precip_key_2 = next((k for k in results_2['yearly_data'][year].keys() if k.startswith('Precip_')), None)

            if r_key_2 and precip_key_2:
                row[f'R {months_str_2}'] = results_2['yearly_data'][year][r_key_2]
                row[f'Precip Annual {months_str_2}'] = results_2['yearly_data'][year][precip_key_2]
            else:
                row[f'R {months_str_2}'] = np.nan
                row[f'Precip Annual {months_str_2}'] = np.nan

            # 添加LDFAI数据
            row['LDFAI'] = ldfai_results.get(year, np.nan)
            
            yearly_data.append(row)
        
        # 创建年度数据DataFrame并保存
        yearly_df = pd.DataFrame(yearly_data)
        yearly_df.to_excel(writer, sheet_name='年度数据', index=False)
        
        # 保存并关闭Excel文件
        writer.close()
        
        print(f"所有区域标准化降水量和LDFAI结果已保存到: {output_file}")


def convert_cache_to_excel(cache_file_path, output_excel_path=None, province=None):
    """
    将npy缓存文件转换为Excel文件，按照简化格式展示：
    第一行是日期，第一二列是经纬度，中间是数据
    
    Args:
        cache_file_path: npy缓存文件路径
        output_excel_path: 输出Excel文件路径，如果为None则自动生成
        province: 指定要输出的省份，如果为None则输出所有省份数据
    
    Returns:
        str: 输出的Excel文件路径
    """
    if not os.path.exists(cache_file_path):
        raise FileNotFoundError(f"缓存文件不存在: {cache_file_path}")
    
    # 加载缓存数据
    print(f"加载缓存文件: {cache_file_path}")
    cached_data = np.load(cache_file_path, allow_pickle=True).item()
    province_data = cached_data['province_data']
    all_years = cached_data['all_years']
    
    # 如果未指定输出路径，则自动生成
    if output_excel_path is None:
        # 从缓存文件名中提取月份信息
        cache_filename = os.path.basename(cache_file_path)
        # 提取月份字符串，格式如 "5-6月"
        import re
        months_match = re.search(r'(\d+(?:-\d+)*)月', cache_filename)
        months_str = months_match.group(0) if months_match else "未知月份"
        
        # 提取年份范围
        years_match = re.search(r'(\d{4})_(\d{4})', cache_filename)
        years_str = f"{years_match.group(1)}-{years_match.group(2)}" if years_match else "未知年份"
        
        # 生成输出文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        province_str = f"{province}_" if province else ""
        output_filename = f"缓存数据_{province_str}{months_str}_{years_str}_{timestamp}.xlsx"
        output_excel_path = os.path.join(OUTPUT_DIR, output_filename)
    
    # 收集所有网格点数据
    all_grid_data = []
    all_years_sorted = sorted(list(all_years))
    
    # 确定要处理的省份列表
    provinces_to_process = [province] if province else province_data.keys()
    
    # 遍历指定省份和年份，收集网格点数据
    for prov in provinces_to_process:
        if prov not in province_data.keys():
            print(f"警告：省份 {prov} 在缓存数据中不存在")
            continue
            
        for year in all_years_sorted:
            if year not in province_data[prov]:
                continue
                
            # 获取该省份该年份的网格点数据
            grid_point_data = province_data[prov][year].get('grid_point_data', [])
            
            for grid_point in grid_point_data:
                lat = grid_point['lat']
                lon = grid_point['lon']
                precip_values = grid_point['precip_values']
                
                # 为每个网格点创建一行数据
                row = {'省份': prov, '纬度': lat, '经度': lon, f'{year}': np.sum(precip_values)}
                # 如果没有日期信息，使用年份作为列名
                all_grid_data.append(row)
    
    # 如果没有数据，创建空的DataFrame
    if not all_grid_data:
        print("警告：没有找到网格点数据")
        df = pd.DataFrame(columns=['省份', '纬度', '经度'] + [str(year) for year in all_years_sorted])
    else:
        # 创建DataFrame
        df = pd.DataFrame(all_grid_data)
        
        # 按省份、经纬度分组，合并同一网格点不同年份的数据
        grouped_data = []
        unique_coords = df[['省份', '纬度', '经度']].drop_duplicates()
        
        for _, coord in unique_coords.iterrows():
            prov, lat, lon = coord['省份'], coord['纬度'], coord['经度']
            
            # 找到该坐标点的所有数据
            coord_data = df[(df['省份'] == prov) & (df['纬度'] == lat) & (df['经度'] == lon)]
            
            # 创建该网格点的行数据
            row = {'省份': prov, '纬度': lat, '经度': lon}
            
            # 为每年添加数据
            for year in all_years_sorted:
                year_data = coord_data[coord_data.get(str(year)).notna()]
                if not year_data.empty:
                    row[str(year)] = year_data[str(year)].iloc[0]
                else:
                    row[str(year)] = np.nan
            
            grouped_data.append(row)
        
        # 重新创建DataFrame
        df = pd.DataFrame(grouped_data)
    
    # 保存到Excel
    df.to_excel(output_excel_path, index=False, engine='openpyxl')
    print(f"缓存数据已转换为Excel文件: {output_excel_path}")
    
    return output_excel_path


def list_cache_files():
    """
    列出所有缓存文件
    
    Returns:
        List[str]: 缓存文件路径列表
    """
    cache_pattern = os.path.join(CACHE_DIR, "cache_all_regions_*.npy")
    cache_files = glob.glob(cache_pattern)
    return sorted(cache_files)


def parse_args():
    """
    解析命令行参数
    
    返回:
        解析后的参数命名空间
    """
    parser = argparse.ArgumentParser(description='北方地区降水标准化计算工具')
    
    # 操作类型参数
    parser.add_argument('--action', type=str, choices=['calculate', 'clear_cache', 'convert_cache', 'calculate_ldfai'],
                        default='calculate',
                        help='操作类型: calculate(计算标准化降水量), clear_cache(清除缓存), convert_cache(转换缓存文件), calculate_ldfai(计算LDFAI)')
    
    # 省份选择参数
    parser.add_argument('--provinces', type=str, nargs='+',
                        help='要计算的省份列表，以空格分隔，默认为所有北方省份')
    
    # 缓存文件转换参数
    parser.add_argument('--cache-file', type=str,
                        help='要转换的缓存文件路径，仅在action=convert_cache时使用')
    parser.add_argument('--convert-all', action='store_true',
                        help='转换所有缓存文件，仅在action=convert_cache时使用')
    parser.add_argument('--province', type=str,
                        help='指定要输出的省份数据，仅在action=convert_cache时使用')
    
    # 输出文件参数
    parser.add_argument('--output-file', type=str,
                        help='输出文件路径，默认自动生成')
    
    return parser.parse_args()


def main():
    """
    主函数 - 计算所有区域1960-2024年的标准化降水量
    """
    # 确保目录存在
    os.makedirs(CACHE_DIR, exist_ok=True)
    os.makedirs(OUTPUT_DIR, exist_ok=True)
    
    # 解析命令行参数
    args = parse_args()
    
    # 确定要使用的省份
    if args.provinces:
        # 验证输入的省份是否有效
        selected_provinces = []
        for province in args.provinces:
            if province in TARGET_PROVINCES:
                selected_provinces.append(province)
            else:
                print(f"警告: 省份 '{province}' 不在支持的北方省份列表中，将被忽略")
        
        # 如果没有有效省份，使用默认省份列表
        if not selected_provinces:
            print("警告: 没有指定有效的省份，将使用默认的北方省份列表")
            selected_provinces = TARGET_PROVINCES.copy()
    else:
        # 默认使用所有目标省份
        selected_provinces = TARGET_PROVINCES.copy()
    
    # 创建计算器实例
    calculator = DroughtIndicesCalculator(target_provinces=selected_provinces)
    
    try:
        # 根据操作类型执行相应的功能
        if args.action == 'clear_cache':
            # 清除缓存
            cache_files = [
                os.path.join(CACHE_DIR, "cache_all_regions_5-6月_1960_2024.npy"),
                os.path.join(CACHE_DIR, "cache_all_regions_7-8月_1960_2024.npy")
            ]
            for file in cache_files:
                if os.path.exists(file):
                    try:
                        os.remove(file)
                        print(f"已删除缓存文件: {file}")
                    except Exception as e:
                        print(f"删除缓存文件{file}失败: {e}")
            
            # 删除缓存后继续计算
            args.action = 'calculate'
        
        if args.action == 'convert_cache':
            # 转换缓存文件为Excel
            cache_files = list_cache_files()
            
            if not cache_files:
                print("\n没有找到缓存文件！请先运行计算生成缓存文件。")
                return
            
            if args.convert_all:
                # 转换所有文件
                for file in cache_files:
                    output_file = convert_cache_to_excel(file)
                    print(f"已将缓存文件转换为Excel: {output_file}")
            elif args.cache_file:
                # 转换指定文件
                if not os.path.exists(args.cache_file):
                    print(f"错误: 指定的缓存文件不存在: {args.cache_file}")
                    exit(1)
                output_file = convert_cache_to_excel(args.cache_file, province=args.province)
                print(f"已将缓存文件转换为Excel: {output_file}")
            else:
                # 显示可用的缓存文件
                print("\n找到以下缓存文件:")
                for i, file in enumerate(cache_files):
                    print(f"{i+1}. {os.path.basename(file)}")
                print("\n请使用 --cache-file 参数指定要转换的文件，或使用 --convert-all 转换所有文件")
            
            return
        
        # 计算标准化降水量或LDFAI
        if args.action in ['calculate', 'calculate_ldfai']:
            print(f"\n开始计算{selected_provinces}1960-2024年的标准化降水量...")
            # 计算5-6月的标准化降水量
            print("\n计算5-6月标准化降水量...")
            results_56 = calculator.calculate_standardized_precipitation_all_regions(months=[5, 6])
            # 计算7-8月的标准化降水量
            print("\n计算7-8月标准化降水量...")
            results_78 = calculator.calculate_standardized_precipitation_all_regions(months=[7, 8])
            # 生成带时间戳的文件名
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            
            # 计算LDFAI
            print("\n计算长周期旱涝急转指数(LDFAI)...")
            ldfai_results = calculator.calculate_ldfai_all_years(results_56, results_78)

            # 使用指定的输出文件或生成默认文件名
            if args.output_file:
                output_file = args.output_file
            else:
                region_desc = "北方地区" if len(selected_provinces) == len(TARGET_PROVINCES) else f"{len(selected_provinces)}个选定省份"
                output_filename = f"{region_desc}长周期旱涝急转指数_1960-2024_{timestamp}.xlsx"
                output_file = os.path.join(OUTPUT_DIR, output_filename)

            # 保存LDFAI结果
            calculator.save_all_regions_precipitation_and_ldfai_to_excel(
                results_1=results_56,
                results_2=results_78,
                ldfai_results=ldfai_results,
                output_file=output_file
            )
            print(f"\n计算完成！LDFAI结果已保存到: {output_file}")

    except Exception as e:
        print(f"计算过程中出错: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()
