#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
网格点级别干旱指标计算模块

本模块实现了两个干旱指标在网格点级别的计算：
1. 长周期旱涝急转指数 (LDFAI)
2. 短周期旱涝急转指数 (DWAAI)

每个网格点都有独立的指标数据，而不是聚合计算。

作者: AI Assistant
日期: 2024
"""
import os
import glob
import argparse
import numpy as np
import pandas as pd
import xarray as xr

from datetime import datetime
from typing import Dict, List, Tuple, Optional, Set, Any, Union

# 导入DailyParser类用于解析ERA5日降水数据
try:
    from parse_era5_data import DailyParser
except ImportError:
    print("警告: 无法导入DailyParser，将使用简化的日期处理方法")
    DailyParser = None

# 定义缓存和输出目录
CACHE_DIR = "../cache_data"
OUTPUT_DIR = "../output_data"

# 目标北方省份列表
TARGET_PROVINCES = [
    '黑龙江', '新疆', '山西', '宁夏', '山东', '河南', '吉林', '辽宁',
    '天津', '青海', '陕西', '内蒙古', '河北', '北京', '甘肃'
]

# 省份边界数据（从 province_bounds_table.txt 读取）
PROVINCE_BOUNDS = {
    '黑龙江': {'min_lon': 121.188532, 'min_lat': 43.419300, 'max_lon': 134.981033, 'max_lat': 53.513562},
    '新疆': {'min_lon': 73.530848, 'min_lat': 34.396244, 'max_lon': 96.375838, 'max_lat': 49.156872},
    '山西': {'min_lon': 110.281372, 'min_lat': 34.572533, 'max_lon': 114.523865, 'max_lat': 40.784574},
    '宁夏': {'min_lon': 104.268991, 'min_lat': 35.240039, 'max_lon': 107.664898, 'max_lat': 39.370838},
    '山东': {'min_lon': 114.840635, 'min_lat': 34.392154, 'max_lon': 122.675788, 'max_lat': 38.414819},
    '河南': {'min_lon': 110.352192, 'min_lat': 31.391876, 'max_lon': 116.630567, 'max_lat': 36.356128},
    '吉林': {'min_lon': 121.632720, 'min_lat': 40.861617, 'max_lon': 131.230238, 'max_lat': 46.285084},
    '辽宁': {'min_lon': 118.801131, 'min_lat': 38.724161, 'max_lon': 125.716590, 'max_lat': 43.465273},
    '天津': {'min_lon': 116.668669, 'min_lat': 38.571271, 'max_lon': 118.019208, 'max_lat': 40.237236},
    '青海': {'min_lon': 89.387704, 'min_lat': 31.623272, 'max_lon': 103.021554, 'max_lat': 39.208221},
    '陕西': {'min_lon': 105.502845, 'min_lat': 31.699524, 'max_lon': 111.274666, 'max_lat': 39.612638},
    '内蒙古': {'min_lon': 97.150654, 'min_lat': 37.388945, 'max_lon': 125.999845, 'max_lat': 53.296875},
    '河北': {'min_lon': 113.457664, 'min_lat': 36.070422, 'max_lon': 119.816951, 'max_lat': 42.589842},
    '北京': {'min_lon': 115.416166, 'min_lat': 39.440279, 'max_lon': 117.497682, 'max_lat': 41.056872},
    '甘肃': {'min_lon': 92.361523, 'min_lat': 32.638758, 'max_lon': 108.700159, 'max_lat': 42.792213}
}

# 省份网格点数据目录
GRID_POINTS_DIR = "../data/province_grid_points"


class GridDroughtIndicesCalculator:
    """
    网格点级别干旱指标计算器类
    
    与DroughtIndicesCalculator不同，本类计算每个网格点的干旱指标，
    而不是对整个区域进行聚合计算。
    """

    def __init__(self, data_path: str = None, target_provinces: List[str] = None):
        """
        初始化计算器
        
        Args:
            data_path: ERA5数据文件路径
            target_provinces: 目标省份列表，默认为None，表示使用全局TARGET_PROVINCES
        """
        self.data_path = data_path
        self.precipitation_data = None
        self.parser = None
        self.main_var_name = "P"
        self.lat_dim, self.lon_dim = None, None
        self.target_provinces = target_provinces if target_provinces else TARGET_PROVINCES

    def load_era5_data(self, year: int) -> xr.Dataset:
        """
        加载指定年份的ERA5降水数据
        
        Args:
            year: 年份
            
        Returns:
            xarray.Dataset: 降水数据
        """
        if self.data_path is None:
            # 使用默认路径
            file_path = f"Daily_ERA5(1960-2024)/ERA5.P.daily.{year}.nc"
        else:
            file_path = f"{self.data_path}/ERA5.P.daily.{year}.nc"

        try:
            # 使用DailyParser加载数据
            ds = xr.open_dataset(file_path)
            self.parser = DailyParser(ds, file_path)
            self.precipitation_data = ds
            return ds
        except FileNotFoundError:
            raise FileNotFoundError(f"找不到文件: {file_path}")

    def load_province_grid_points(self, province: str) -> pd.DataFrame:
        """
        加载省份网格点数据

        Args:
            province: 省份名称

        Returns:
            pandas.DataFrame: 包含经纬度点位的数据框，如果加载失败则返回None
        """
        try:
            # 尝试加载单个省份的网格点文件
            file_path = os.path.join(GRID_POINTS_DIR, f"{province}_grid_points.csv")
            if os.path.exists(file_path):
                df = pd.read_csv(file_path)
                print(f"成功加载{province}网格点数据，共{len(df)}个点位")
                return df

            # 如果单个省份文件不存在，尝试从汇总文件中提取
            all_provinces_file = os.path.join(GRID_POINTS_DIR, "all_provinces_grid_points.csv")
            if os.path.exists(all_provinces_file):
                all_df = pd.read_csv(all_provinces_file)
                province_df = all_df[all_df['省份'] == province]

                if not province_df.empty:
                    print(f"从汇总文件中提取{province}网格点数据，共{len(province_df)}个点位")
                    return province_df

            print(f"找不到{province}的网格点数据文件")
            return None

        except Exception as e:
            print(f"加载{province}网格点数据时出错: {e}")
            return None

    def get_spatial_dimensions(self, ds: xr.Dataset) -> Tuple[str, str]:
        """
        获取数据集的空间维度名称

        Args:
            ds: 数据集

        Returns:
            Tuple[str, str]: 纬度和经度维度名称
        """
        if self.lat_dim is not None and self.lon_dim is not None:
            return self.lat_dim, self.lon_dim

        # 尝试常见的维度名称
        lat_candidates = ['lat', 'latitude', 'y']
        lon_candidates = ['lon', 'longitude', 'x']

        lat_dim = None
        for lat_name in lat_candidates:
            if lat_name in ds.dims:
                lat_dim = lat_name
                break

        lon_dim = None
        for lon_name in lon_candidates:
            if lon_name in ds.dims:
                lon_dim = lon_name
                break

        if lat_dim is None or lon_dim is None:
            raise ValueError(f"无法识别数据集的空间维度，可用维度: {list(ds.dims)}")

        self.lat_dim, self.lon_dim = lat_dim, lon_dim
        return lat_dim, lon_dim

    def extract_province_data(self, ds: xr.Dataset, province: str = None, grid_points: pd.DataFrame = None) -> Tuple[xr.Dataset, Optional[pd.DataFrame]]:
        """
        提取指定省份或指定经纬度点位集合的数据
        
        Args:
            ds: 原始数据集
            province: 省份名称，如果提供grid_points则可为None
            grid_points: 经纬度点位集合，DataFrame格式，必须包含'经度'和'纬度'列
            
        Returns:
            Tuple[xarray.Dataset, pd.DataFrame]: 提取的数据和使用的网格点数据
        """
        # 获取空间维度名称
        lat_dim, lon_dim = self.get_spatial_dimensions(ds)
        
        # 如果提供了grid_points，直接使用
        if grid_points is not None and not grid_points.empty:
            point_source = "提供的点位集合"
            if province:
                point_source = f"{province}的{point_source}"
            print(f"使用{point_source}提取数据，共{len(grid_points)}个点位")
        # 否则，尝试加载省份网格点数据
        elif province is not None:
            # 检查省份是否在支持的列表中
            if province not in self.target_provinces:
                raise ValueError(f"不支持的省份: {province}，支持的省份有: {', '.join(self.target_provinces)}")
            
            # 加载省份网格点数据
            grid_points = self.load_province_grid_points(province)
            if grid_points is not None and not grid_points.empty:
                print(f"使用精确网格点数据提取{province}省份数据，共{len(grid_points)}个点位")
            else:
                # 回退方法：使用省份边界框提取数据
                print(f"使用边界框方法提取{province}省份数据")
                # 获取省份边界
                if province not in PROVINCE_BOUNDS:
                    raise ValueError(f"找不到省份边界数据: {province}")

                bounds = PROVINCE_BOUNDS[province]
                min_lon, max_lon = bounds['min_lon'], bounds['max_lon']
                min_lat, max_lat = bounds['min_lat'], bounds['max_lat']

                # 使用边界框提取数据
                province_data = ds.sel(
                    {lon_dim: slice(min_lon, max_lon), lat_dim: slice(min_lat, max_lat)}
                )
                return province_data, None
        else:
            raise ValueError("必须提供province或grid_points参数之一")
        
        # 使用网格点提取数据 - 简化版本
        # 直接从grid_points提取经纬度数据
        lats = grid_points['纬度'].values
        lons = grid_points['经度'].values
        
        # 使用sel选择点，method='nearest'确保找到最近点
        province_ds = ds.sel(
            {lat_dim: lats, lon_dim: lons},
            method='nearest'
        )
        
        # 确定时间维度名称，使用next和生成器表达式简化逻辑
        time_dims = ['bday', 'time', 'day']
        time_dim = next((dim for dim in province_ds.dims if dim in time_dims), list(province_ds.dims)[0])
        if time_dim not in time_dims:
            print(f"警告: 未找到标准时间维度名称，使用第一个维度'{time_dim}'作为时间维度")
        
        # 提取每个点位的数据值 - 使用xarray的高级功能
        data_vars = {}
        
        # 为每个变量创建点位维度
        for var_name, var_data in province_ds.data_vars.items():
            # 检查变量的维度
            if set([time_dim, lat_dim, lon_dim]).issubset(set(var_data.dims)):
                # 重新排列维度，确保时间维度在前
                if var_data.dims[0] != time_dim:
                    var_data = var_data.transpose(time_dim, ...)
                
                # 使用xarray的stack功能将lat和lon维度合并为point维度
                # 这样可以避免手动循环提取每个点的数据
                point_data = []
                
                # 对每个点位提取数据
                for i, (lat, lon) in enumerate(zip(lats, lons)):
                    # 使用isel直接按索引选择数据点
                    lat_idx = np.abs(province_ds[lat_dim].values - lat).argmin()
                    lon_idx = np.abs(province_ds[lon_dim].values - lon).argmin()
                    point_values = var_data.isel({lat_dim: lat_idx, lon_dim: lon_idx}).values
                    point_data.append(point_values)
                
                # 将所有点位数据转换为数组并转置，使形状为[时间, 点位]
                all_points_data = np.array(point_data).T
                data_vars[var_name] = ((time_dim, 'point'), all_points_data)
            else:
                print(f"警告: 变量{var_name}的维度{var_data.dims}不包含所需的维度")
        
        # 构建新的二维数据集，使用简洁的字典构建方式
        province_ds = xr.Dataset(
            data_vars=data_vars,
            coords={
                time_dim: province_ds[time_dim],
                'point': np.arange(len(lats)),
                'lat': ('point', lats),
                'lon': ('point', lons)
            }
        )
        
        return province_ds, grid_points

    def calculate_spa(self, precip_values: np.ndarray, mean_precip: float, std_precip: float) -> np.ndarray:
        """
        计算标准化降水异常值 (SPA)
        
        SPA = (P - P̄) / σP
        
        Args:
            precip_values: 降水量数据
            mean_precip: 长期平均降水量
            std_precip: 降水量标准差
            
        Returns:
            np.ndarray: SPA值数组
        """
        pass

    def calculate_dwaai_for_grid_point(self, precip_values: np.ndarray,
                                       drought_period: Tuple[int, int],
                                       flood_period: Tuple[int, int]) -> float:
        """
        计算单个网格点的短周期旱涝急转指数 (DWAAI)
        
        Args:
            precip_values: 降水量数据
            drought_period: 干旱期间（起始日期，结束日期）
            flood_period: 洪涝期间（起始日期，结束日期）
            
        Returns:
            float: DWAAI指数值
        """
        pass

    def calculate_ldfai_for_grid_point(self, r56: float, r78: float) -> float:
        """
        计算单个网格点的长周期旱涝急转指数 (LDFAI)
        
        Args:
            r56: 5-6月标准化降水量
            r78: 7-8月标准化降水量
            
        Returns:
            float: LDFAI指数值
        """
        if np.isnan(r56) or np.isnan(r78):
            return np.nan

        # 计算LDFAI
        if abs(r56) + abs(r78) > 0:
            ldfai = (r78 - r56) * (abs(r56) + abs(r78)) * (1.8 ** (-abs(r56 + r78)))
        else:
            ldfai = 0

        return float(ldfai)

    def _collect_grid_point_data(self, start_year: int, end_year: int, months: List[int], months_str: str) -> Dict[
        str, Any]:
        """
        收集所有网格点的指定月份降水数据
        
        Args:
            start_year: 起始年份
            end_year: 结束年份
            months: 要计算的月份列表
            months_str: 月份字符串表示，如"5-6月"
            
        Returns:
            Dict: 包含所有网格点数据的字典
        """
        print(f"开始收集{start_year}-{end_year}年所有区域{months_str}数据...")

        # 初始化数据结构
        grid_data = {}
        all_years = set()

        # 收集所有年份所有区域的数据
        for year in range(start_year, end_year + 1):
            print(f"处理 {year}年所有区域数据...")
            # 加载数据
            try:
                ds = self.load_era5_data(year)
            except FileNotFoundError as e:
                print(f"警告: {e}，跳过{year}年")
                continue
            # 获取空间维度名称
            lat_dim, lon_dim = self.get_spatial_dimensions(ds)
            # 获取原始降水数据
            precip_data = ds.P
            # 获取bday维度的值
            bday_values = precip_data.bday.values
            # 创建月份信息
            if self.parser:
                # 使用parser将bday转换为日期
                dates = [self.parser.format_date(bday) for bday in bday_values]
                data_months = [int(date.split('-')[1]) for date in dates]  # 提取月份
            else:
                # 如果没有parser，使用简化方法估计月份
                data_months = [min(12, max(1, int(bday / 30) + 1)) for bday in bday_values]
            # 提取指定月份的降水量数据
            month_indices = []
            for month in months:
                month_indices.extend([i for i, m in enumerate(data_months) if m == month])
            if not month_indices:
                print(f"警告: {year}年没有{months_str}的数据")
                continue
            # 获取指定月份的所有网格点数据
            month_precip_data = precip_data.isel(bday=month_indices)
            # 遍历所有省份
            for province in self.target_provinces:
                print(f"处理 {province} 省份数据...")

                # 直接加载省份网格点数据
                grid_points = self.load_province_grid_points(province)
                if grid_points is None or grid_points.empty:
                    print(f"找不到{province}的网格点数据，跳过该省份")
                    continue
                # 遍历网格点数据
                for _, point in grid_points.iterrows():
                    lat = point['纬度']
                    lon = point['经度']
                    
                    # 找到最近的网格点索引
                    lat_idx = abs(ds[lat_dim] - lat).argmin().item()
                    lon_idx = abs(ds[lon_dim] - lon).argmin().item()
                    
                    # 创建网格点唯一标识
                    grid_id = f"{province}_{lat:.4f}_{lon:.4f}"
                    
                    # 初始化网格点数据结构
                    if grid_id not in grid_data:
                        grid_data[grid_id] = {
                            'province': province,
                            'lat': lat,
                            'lon': lon,
                            'yearly_data': {}
                        }
                    
                    # 获取该点的所有指定月份数据
                    point_data = month_precip_data.isel({lat_dim: lat_idx, lon_dim: lon_idx}).values
                    # 计算该点该年所有指定月份数据的总降水量
                    total_precip = np.sum(point_data)
                    # 存储数据
                    grid_data[grid_id]['yearly_data'][year] = {
                        'precip_values': point_data.tolist(),
                        'total_precip': float(total_precip)
                    }
                    # 如果有日期信息，也存储
                    if self.parser:
                        date_info = [self.parser.format_date(bday_values[idx]) for idx in month_indices]
                        grid_data[grid_id]['yearly_data'][year]['dates'] = date_info

                all_years.add(year)

        return {
            'grid_data': grid_data,
            'all_years': sorted(list(all_years)),
            'months_str': months_str
        }

    def calculate_standardized_precipitation_for_grid_points(self, months: List[int] = [5, 6],
                                                             cache: bool = True) -> Dict[str, Any]:
        """
        计算所有网格点指定月份的标准化降水量
        
        Args:
            months: 要计算的月份列表，默认为[5, 6]表示5-6月
            cache: 是否使用缓存，默认为True
            
        Returns:
            Dict: 包含所有网格点标准化降水量的字典
        """
        start_year = 1960
        end_year = 2024

        # 构建月份字符串，例如"5-6月"
        months_str = "-".join([str(m) for m in months]) + "月"

        # 检查是否已有缓存数据
        cache_filename = f"grid_cache_{months_str}_{start_year}_{end_year}.npy"
        cache_file = os.path.join(CACHE_DIR, cache_filename)

        grid_data = {}
        all_years = []

        if cache and os.path.exists(cache_file):
            try:
                print(f"从缓存文件加载{months_str}数据...")
                cached_data = np.load(cache_file, allow_pickle=True).item()
                cached_grid_data = cached_data['grid_data']
                all_years = cached_data['all_years']
                
                # 检查缓存中是否包含当前目标省份的数据
                cached_provinces = set()
                for grid_id, grid_info in cached_grid_data.items():
                    cached_provinces.add(grid_info['province'])
                
                missing_provinces = set(self.target_provinces) - cached_provinces
                
                if missing_provinces:
                    print(f"警告: 缓存中缺少以下省份的数据: {', '.join(missing_provinces)}")
                    print(f"缓存中包含的省份: {', '.join(sorted(cached_provinces))}")
                    print(f"建议使用 --no-cache 参数强制重新计算所有数据")
                    print("或者先清除缓存: python grid_drought_indices.py clear_cache")
                    raise ValueError(f"缓存数据不完整，缺少省份: {', '.join(missing_provinces)}")
                
                grid_data = cached_grid_data
                print(f"成功从缓存加载{len(all_years)}年的数据，包含省份: {', '.join(sorted(cached_provinces))}")
            except Exception as e:
                print(f"加载缓存数据失败: {e}，将重新计算")
                grid_data = {}
                all_years = []

        # 如果没有缓存数据，则计算
        if not grid_data:
            data = self._collect_grid_point_data(start_year, end_year, months, months_str)
            grid_data = data['grid_data']
            all_years = data['all_years']

            # 保存数据到缓存文件
            if cache:
                try:
                    os.makedirs(CACHE_DIR, exist_ok=True)
                    cache_data = {
                        'grid_data': grid_data,
                        'all_years': all_years,
                        'months_str': months_str
                    }
                    np.save(cache_file, cache_data)
                    print(f"已将{months_str}数据保存到缓存文件: {cache_file}")
                except Exception as e:
                    print(f"保存缓存数据失败: {e}")

        # 计算每个网格点的标准化降水量
        for grid_id, grid_info in grid_data.items():
            # 收集该网格点所有年份的总降水量
            yearly_precip = []
            for year in all_years:
                yearly_precip.append(grid_info['yearly_data'][year]['total_precip'])

            # 计算平均值和标准差
            mean_precip = float(np.mean(yearly_precip))
            std_precip = float(np.std(yearly_precip, ddof=1))

            # 计算每年的标准化降水量
            for year in all_years:
                total_precip = grid_info['yearly_data'][year]['total_precip']
                if std_precip > 0:
                    r_value = (total_precip - mean_precip) / std_precip
                else:
                    r_value = 0

                grid_info['yearly_data'][year][f'R_{months_str}'] = float(r_value)

        return {
            'grid_data': grid_data,
            'all_years': all_years,
            'months_str': months_str
        }

    def calculate_ldfai_for_grid_points(self, results_period1: Dict[str, Any], results_period2: Dict[str, Any]) -> Dict[str, Any]:
        """
        计算所有网格点的长周期旱涝急转指数 (LDFAI)
        
        Args:
            results_period1: 第一个时期（如5-6月）标准化降水量结果
            results_period2: 第二个时期（如7-8月）标准化降水量结果
            
        Returns:
            Dict: 包含所有网格点LDFAI指数的字典
        """
        # 获取所有网格点ID
        grid_ids_period1 = set(results_period1['grid_data'].keys())
        grid_ids_period2 = set(results_period2['grid_data'].keys())
        common_grid_ids = grid_ids_period1.intersection(grid_ids_period2)

        # 获取所有年份
        all_years = sorted(set(
            results_period1['all_years'] + results_period2['all_years']
        ))

        # 初始化结果字典
        ldfai_results = {
            'grid_data': {},
            'all_years': all_years,
            'period1_months': results_period1['months_str'],
            'period2_months': results_period2['months_str']
        }

        # 计算每个网格点的LDFAI
        for grid_id in common_grid_ids:
            grid_info_period1 = results_period1['grid_data'][grid_id]
            grid_info_period2 = results_period2['grid_data'][grid_id]

            # 创建网格点结果
            ldfai_results['grid_data'][grid_id] = {
                'province': grid_info_period1['province'],
                'lat': grid_info_period1['lat'],
                'lon': grid_info_period1['lon'],
                'yearly_data': {}
            }

            # 计算每年的LDFAI
            for year in all_years:
                r_period1 = np.nan
                r_period2 = np.nan

                # 获取第一个时期标准化降水量
                if year in grid_info_period1['yearly_data']:
                    r_period1_key = f"R_{results_period1['months_str']}"
                    if r_period1_key in grid_info_period1['yearly_data'][year]:
                        r_period1 = grid_info_period1['yearly_data'][year][r_period1_key]

                # 获取第二个时期标准化降水量
                if year in grid_info_period2['yearly_data']:
                    r_period2_key = f"R_{results_period2['months_str']}"
                    if r_period2_key in grid_info_period2['yearly_data'][year]:
                        r_period2 = grid_info_period2['yearly_data'][year][r_period2_key]

                # 计算LDFAI
                ldfai = self.calculate_ldfai_for_grid_point(r_period1, r_period2)

                # 存储结果
                ldfai_results['grid_data'][grid_id]['yearly_data'][year] = {
                    f'R_{results_period1["months_str"]}': r_period1,
                    f'R_{results_period2["months_str"]}': r_period2,
                    'LDFAI': ldfai
                }

        return ldfai_results

    def save_grid_points_to_excel(self, results: Dict[str, Any], output_file: str = None,
                                  data_type: str = "LDFAI", year_filter: List[int] = None):
        """
        将网格点数据保存到Excel文件
        
        Args:
            results: 计算结果字典
            output_file: 输出文件路径，如果为None则自动生成
            data_type: 数据类型，可以是"LDFAI"、"R56"、"R78"等
            year_filter: 年份过滤列表，如果为None则保存所有年份
        """
        # 如果未指定输出路径，则自动生成
        if output_file is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            year_range = "全部年份"
            if year_filter:
                year_range = f"{min(year_filter)}-{max(year_filter)}"
            output_file = os.path.join(OUTPUT_DIR, f"网格点_{data_type}_{year_range}_{timestamp}.xlsx")

        # 确保输出目录存在
        os.makedirs(os.path.dirname(output_file), exist_ok=True)

        # 获取要处理的年份
        years_to_process = year_filter if year_filter else results['all_years']

        # 创建数据列表
        data_rows = []

        # 遍历所有网格点
        for grid_id, grid_info in results['grid_data'].items():
            province = grid_info['province']
            lat = grid_info['lat']
            lon = grid_info['lon']

            # 遍历要处理的年份
            for year in years_to_process:
                if year in grid_info['yearly_data']:
                    yearly_data = grid_info['yearly_data'][year]

                    # 创建行数据
                    row = {
                        '省份': province,
                        '纬度': lat,
                        '经度': lon,
                        '年份': year
                    }

                    # 添加指标数据
                    if data_type == "LDFAI" and 'LDFAI' in yearly_data:
                        row['LDFAI'] = yearly_data['LDFAI']
                        
                        # 使用动态键名获取两个时期的标准化降水量
                        period1_key = f"R_{results.get('period1_months', '5-6月')}"
                        period2_key = f"R_{results.get('period2_months', '7-8月')}"
                        
                        # 设置列名
                        period1_col = f"R{results.get('period1_months', '56').replace('-', '')}"
                        period2_col = f"R{results.get('period2_months', '78').replace('-', '')}"
                        
                        row[period1_col] = yearly_data.get(period1_key, np.nan)
                        row[period2_col] = yearly_data.get(period2_key, np.nan)
                    elif data_type.startswith("R") and data_type in yearly_data:
                        row[data_type] = yearly_data[data_type]
                    elif f"{data_type}_{results.get('months_str', '')}" in yearly_data:
                        row[data_type] = yearly_data[f"{data_type}_{results.get('months_str', '')}"]

                    data_rows.append(row)

        # 创建DataFrame
        df = pd.DataFrame(data_rows)

        # 保存到Excel
        df.to_excel(output_file, index=False, engine='openpyxl')
        print(f"网格点数据已保存到: {output_file}")

        return output_file


def parse_args():
    """
    解析命令行参数
    
    返回:
        解析后的参数命名空间
    """
    parser = argparse.ArgumentParser(description='网格点级别干旱指标计算工具')

    # 操作类型参数
    parser.add_argument('--action', type=str,
                        choices=['calculate_ldfai', 'calculate_r', 'clear_cache'],
                        default='calculate_ldfai',
                        help='操作类型: calculate_ldfai(计算LDFAI), calculate_r(计算指定月份标准化降水量), clear_cache(清除缓存)')

    # 创建月份参数组 - 用于 calculate_r 操作
    r_group = parser.add_argument_group('R计算参数', '用于calculate_r操作的参数')
    r_group.add_argument('--months', type=int, nargs='+',
                        help='[用于calculate_r] 要计算的月份列表，以空格分隔，例如: --months 5 6 表示5-6月')
    
    # 创建LDFAI参数组 - 用于 calculate_ldfai 操作
    ldfai_group = parser.add_argument_group('LDFAI计算参数', '用于calculate_ldfai操作的参数')
    ldfai_group.add_argument('--period1-months', type=int, nargs='+',
                        help='[用于calculate_ldfai] 第一个时期月份，默认为5 6月')
    ldfai_group.add_argument('--period2-months', type=int, nargs='+',
                        help='[用于calculate_ldfai] 第二个时期月份，默认为7 8月')

    # 省份选择参数
    parser.add_argument('--provinces', type=str, nargs='+',
                        help='要计算的省份列表，以空格分隔，默认为所有北方省份')

    # 年份过滤参数
    parser.add_argument('--years', type=int, nargs='+',
                        help='要输出的年份列表，以空格分隔，默认为所有年份')

    # 输出文件参数
    parser.add_argument('--output-file', type=str,
                        help='输出文件路径，默认自动生成')

    # 缓存控制参数
    parser.add_argument('--no-cache', action='store_true',
                        help='不使用缓存，强制重新计算')

    return parser.parse_args()


def main():
    """
    主函数 - 计算网格点级别的干旱指标
    """
    # 确保目录存在
    os.makedirs(CACHE_DIR, exist_ok=True)
    os.makedirs(OUTPUT_DIR, exist_ok=True)

    # 解析命令行参数
    args = parse_args()
    
    # 检查操作类型与参数的匹配性
    if args.action == 'calculate_r':
        if not args.months:
            print("错误: 使用 calculate_r 操作时必须指定 --months 参数")
            return
    elif args.action == 'calculate_ldfai':
        if args.months:
            print("错误: 使用 calculate_ldfai 操作时不能指定 --months 参数")
            return
        # 为 LDFAI 参数设置默认值
        if not args.period1_months:
            args.period1_months = [5, 6]
            print(f"未指定 --period1-months 参数，使用默认值: {args.period1_months}")
        if not args.period2_months:
            args.period2_months = [7, 8]
            print(f"未指定 --period2-months 参数，使用默认值: {args.period2_months}")

    # 确定要使用的省份
    if args.provinces:
        # 验证输入的省份是否有效
        selected_provinces = []
        for province in args.provinces:
            if province in TARGET_PROVINCES:
                selected_provinces.append(province)
            else:
                print(f"警告: 省份 '{province}' 不在支持的北方省份列表中，将被忽略")

        # 如果没有有效省份，使用默认省份列表
        if not selected_provinces:
            print("警告: 没有指定有效的省份，将使用默认的北方省份列表")
            selected_provinces = TARGET_PROVINCES.copy()
    else:
        # 默认使用所有目标省份
        selected_provinces = TARGET_PROVINCES.copy()

    # 创建计算器实例
    calculator = GridDroughtIndicesCalculator(target_provinces=selected_provinces)

    try:
        # 根据操作类型执行相应的功能
        if args.action == 'clear_cache':
            # 清除缓存
            cache_files = glob.glob(os.path.join(CACHE_DIR, "grid_cache_*.npy"))
            for file in cache_files:
                try:
                    os.remove(file)
                    print(f"已删除缓存文件: {file}")
                except Exception as e:
                    print(f"删除缓存文件{file}失败: {e}")
            return

        # 使用缓存标志
        use_cache = not args.no_cache

        if args.action == 'calculate_r':
            # 计算指定月份标准化降水量
            if not args.months:
                print("错误: 使用 calculate_r 操作时必须指定 --months 参数")
                return
            
            # 验证月份范围
            for month in args.months:
                if month < 1 or month > 12:
                    print(f"错误: 月份 {month} 超出有效范围 (1-12)")
                    return
            
            months_str = "-".join([str(m) for m in args.months]) + "月"
            print(f"\n计算{months_str}标准化降水量...")
            
            results = calculator.calculate_standardized_precipitation_for_grid_points(
                months=args.months, cache=use_cache
            )

            # 保存结果
            output_file = calculator.save_grid_points_to_excel(
                results, output_file=args.output_file, data_type="R", year_filter=args.years
            )
            print(f"\n计算完成！{months_str}标准化降水量结果已保存到: {output_file}")

        elif args.action == 'calculate_ldfai':
            # 计算LDFAI
            print("\n计算长周期旱涝急转指数(LDFAI)...")
            
            # 验证时期月份参数
            for month in args.period1_months + args.period2_months:
                if month < 1 or month > 12:
                    print(f"错误: 月份 {month} 超出有效范围 (1-12)")
                    return
            
            period1_str = "-".join([str(m) for m in args.period1_months]) + "月"
            period2_str = "-".join([str(m) for m in args.period2_months]) + "月"
            
            # 计算第一个时期标准化降水量
            print(f"\n步骤1: 计算{period1_str}标准化降水量...")
            results_period1 = calculator.calculate_standardized_precipitation_for_grid_points(
                months=args.period1_months, cache=use_cache
            )

            # 计算第二个时期标准化降水量
            print(f"\n步骤2: 计算{period2_str}标准化降水量...")
            results_period2 = calculator.calculate_standardized_precipitation_for_grid_points(
                months=args.period2_months, cache=use_cache
            )

            # 计算LDFAI
            print(f"\n步骤3: 计算LDFAI ({period1_str} vs {period2_str})...")
            ldfai_results = calculator.calculate_ldfai_for_grid_points(results_period1, results_period2)

            # 保存结果
            output_file = calculator.save_grid_points_to_excel(
                ldfai_results, output_file=args.output_file, data_type="LDFAI", year_filter=args.years
            )
            print(f"\n计算完成！LDFAI结果已保存到: {output_file}")

    except Exception as e:
        print(f"计算过程中出错: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()
