# -*- coding: utf-8 -*-
"""
网格点级干旱指数计算模块

此模块提供网格点级干旱指数计算的函数和类。
主要功能包括：
- 计算网格点级长周期旱涝急转指数 (LDFAI)
"""

import os
import gc
import numpy as np
import pandas as pd

from dry_wet_abrupt.drought_indices.base import DroughtIndex
from dry_wet_abrupt.utils.data_loader import load_era5_data
from dry_wet_abrupt.utils.cache import cache_data, load_cached_data
from dry_wet_abrupt.config import CACHE_DIR, PROVINCE_GRID_POINTS_DIR, get_config


class GridDroughtIndex(DroughtIndex):
    """
    网格点级干旱指数计算类
    用于计算基于网格点的干旱指数
    """

    def __init__(self, start_year=1960, end_year=2024, use_cache=True):
        """
        初始化网格点级干旱指数计算器
        
        参数:
            start_year (int): 起始年份
            end_year (int): 结束年份
            use_cache (bool): 是否使用缓存
        """
        super().__init__(start_year, end_year, use_cache)
        self.cache_dir = CACHE_DIR
        self.grid_points_dir = get_config('province_grid_points_dir', str(PROVINCE_GRID_POINTS_DIR))

    def calculate_ldfai(self, period1_months=None, period2_months=None, provinces=None):
        """
        计算长周期旱涝急转指数 (LDFAI)
        
        参数:
            period1_months (list): 第一个时期的月份列表，默认为[5, 6]
            period2_months (list): 第二个时期的月份列表，默认为[7, 8]
            provinces (list): 省份列表，如为None则计算所有省份
            
        返回:
            pandas.DataFrame: 包含LDFAI计算结果的数据框
        """
        # 设置默认月份
        period1_months = period1_months or [5, 6]
        period2_months = period2_months or [7, 8]

        # 构建时间段描述
        period1 = '-'.join([str(m) for m in period1_months]) + '月'
        period2 = '-'.join([str(m) for m in period2_months]) + '月'

        print(f"计算{period1}和{period2}的长周期旱涝急转指数 (LDFAI)...")

        # 计算两个时期的标准化降水量
        results_period1 = self._calculate_standardized_precipitation_for_grid_points(period1_months, provinces)
        results_period2 = self._calculate_standardized_precipitation_for_grid_points(period2_months, provinces)

        # 计算LDFAI
        ldfai_results = self._calculate_ldfai_for_grid_points(results_period1, results_period2)

        # 转换结果为DataFrame格式
        result_list = []

        for grid_id, grid_info in ldfai_results['grid_data'].items():
            province = grid_info['province']
            lon = grid_info['lon']
            lat = grid_info['lat']

            for year, year_data in grid_info['yearly_data'].items():
                r_period1 = year_data.get(f'R_{period1}', np.nan)
                r_period2 = year_data.get(f'R_{period2}', np.nan)
                ldfai = year_data.get('LDFAI', np.nan)

                result_list.append({
                    'province': province,
                    'lon': lon,
                    'lat': lat,
                    'year': year,
                    f'R_{period1}': r_period1,
                    f'R_{period2}': r_period2,
                    'LDFAI': ldfai
                })

        # 转换为DataFrame并返回
        return pd.DataFrame(result_list)

    def _calculate_standardized_precipitation_for_grid_points(self, months, provinces=None, cache=True):
        """
        计算所有网格点指定月份的标准化降水量
        
        参数:
            months (list): 要计算的月份列表
            provinces (list): 省份列表，如为None则计算所有省份
            cache (bool): 是否使用缓存，默认为True
            
        返回:
            Dict: 包含所有网格点标准化降水量的字典
        """
        # 构建月份字符串，例如"5-6月"
        months_str = "-".join([str(m) for m in months]) + "月"

        # 检查是否已有缓存数据
        cache_filename = f"grid_cache_{months_str}_{self.start_year}_{self.end_year}.npy"
        cache_file = os.path.join(self.cache_dir, cache_filename)

        grid_data = {}
        all_years = list(range(self.start_year, self.end_year + 1))

        if cache and self.use_cache and os.path.exists(cache_file):
            try:
                print(f"从缓存文件加载{months_str}数据...")
                cached_data = load_cached_data(cache_file)
                if isinstance(cached_data, dict) and 'grid_data' in cached_data:
                    cached_grid_data = cached_data['grid_data']
                    if 'all_years' in cached_data:
                        all_years = cached_data['all_years']

                    # 检查缓存中是否包含当前目标省份的数据
                    cached_provinces = set()
                    for grid_id, grid_info in cached_grid_data.items():
                        cached_provinces.add(grid_info['province'])

                    missing_provinces = set(provinces) - cached_provinces

                    if missing_provinces:
                        print(f"警告: 缓存中缺少以下省份的数据: {', '.join(missing_provinces)}")
                        print(f"缓存中包含的省份: {', '.join(sorted(cached_provinces))}")
                        print(f"建议使用 --no-cache 参数强制重新计算所有数据")
                        print("或者先清除缓存: python main.py cache --clear")
                        raise ValueError(f"缓存数据不完整，缺少省份: {', '.join(missing_provinces)}")

                    grid_data = cached_grid_data
                    print(f"成功从缓存加载{len(all_years)}年的数据，包含省份: {', '.join(sorted(cached_provinces))}")
                else:
                    # 兼容旧格式的缓存
                    grid_data = cached_data
                    print(f"从旧格式缓存加载数据")
            except Exception as e:
                print(f"加载缓存数据失败: {e}，将重新计算")
                grid_data = {}

        # 如果没有缓存数据，则收集数据
        if not grid_data:
            # 收集网格点降水数据
            grid_data_raw = self._collect_grid_data(months_str, months=months, provinces=provinces)

            # 初始化结果字典
            grid_data = {}

            # 遍历所有省份
            for province, point_yearly_data in grid_data_raw.items():
                # 遍历该省份的所有网格点
                for point_id, yearly_data in point_yearly_data.items():
                    # 获取网格点坐标
                    lon, lat = self.parse_point_id(point_id)
                    # 网格点唯一标识增加省份信息
                    grid_id = f"{province}_{point_id}"

                    # 初始化网格点数据结构
                    if grid_id not in grid_data:
                        grid_data[grid_id] = {
                            'province': province,
                            'lat': lat,
                            'lon': lon,
                            'yearly_data': yearly_data
                        }

                    # 计算平均值和标准差
                    yearly_total_precip = []
                    for year in all_years:
                        yearly_total_precip.append(yearly_data[year]['total_precip'])

                    mean_precip = float(np.mean(yearly_total_precip))
                    std_precip = float(np.std(yearly_total_precip, ddof=1))
                    # 计算每年的标准化降水量
                    for year in all_years:
                        total_precip = yearly_data[year]['total_precip']
                        if std_precip > 0:
                            r_value = (total_precip - mean_precip) / std_precip
                        else:
                            r_value = 0

                        grid_data[grid_id]['yearly_data'][year][f'R_{months_str}'] = float(r_value)

            # 保存数据到缓存文件
            if cache and self.use_cache:
                try:
                    os.makedirs(self.cache_dir, exist_ok=True)
                    cache_data({
                        'grid_data': grid_data,
                        'all_years': all_years,
                        'months_str': months_str
                    }, cache_file)
                    print(f"已将{months_str}数据保存到缓存文件: {cache_file}")
                except Exception as e:
                    print(f"保存缓存数据失败: {e}")

        return {
            'grid_data': grid_data,
            'all_years': all_years,
            'months_str': months_str
        }

    def _calculate_ldfai_for_grid_points(self, results_period1, results_period2):
        """
        计算所有网格点的长周期旱涝急转指数 (LDFAI)
        
        参数:
            results_period1: 第一个时期标准化降水量结果
            results_period2: 第二个时期标准化降水量结果
            
        返回:
            Dict: 包含所有网格点LDFAI指数的字典
        """
        # 获取所有网格点ID
        grid_ids_period1 = set(results_period1['grid_data'].keys())
        grid_ids_period2 = set(results_period2['grid_data'].keys())
        common_grid_ids = grid_ids_period1.intersection(grid_ids_period2)

        # 获取所有年份
        all_years = sorted(set(
            results_period1.get('all_years', range(self.start_year, self.end_year + 1)) +
            results_period2.get('all_years', range(self.start_year, self.end_year + 1))
        ))

        # 初始化结果字典
        ldfai_results = {
            'grid_data': {},
            'all_years': all_years,
            'period1_months': results_period1['months_str'],
            'period2_months': results_period2['months_str']
        }

        # 计算每个网格点的LDFAI
        for grid_id in common_grid_ids:
            grid_info_period1 = results_period1['grid_data'][grid_id]
            grid_info_period2 = results_period2['grid_data'][grid_id]

            # 创建网格点结果
            ldfai_results['grid_data'][grid_id] = {
                'province': grid_info_period1['province'],
                'lat': grid_info_period1['lat'],
                'lon': grid_info_period1['lon'],
                'yearly_data': {}
            }

            # 计算每年的LDFAI
            for year in all_years:
                r_period1 = np.nan
                r_period2 = np.nan

                # 获取第一个时期标准化降水量
                if year in grid_info_period1.get('yearly_data', {}):
                    r_period1_key = f"R_{results_period1['months_str']}"
                    # if r_period1_key in grid_info_period1['yearly_data'][year]:
                    r_period1 = grid_info_period1['yearly_data'][year].get(r_period1_key, 0)

                # 获取第二个时期标准化降水量
                if year in grid_info_period2.get('yearly_data', {}):
                    r_period2_key = f"R_{results_period2['months_str']}"
                    r_period2 = grid_info_period2['yearly_data'][year].get(r_period2_key, 0)

                # 计算LDFAI
                ldfai = self._calculate_ldfai_for_grid_point(r_period1, r_period2)

                # 存储结果
                ldfai_results['grid_data'][grid_id]['yearly_data'][year] = {
                    f'R_{results_period1["months_str"]}': r_period1,
                    f'R_{results_period2["months_str"]}': r_period2,
                    'LDFAI': ldfai
                }

        return ldfai_results

    def _calculate_ldfai_for_grid_point(self, r56, r78):
        """
        计算单个网格点的长周期旱涝急转指数 (LDFAI)
        
        参数:
            r56: 第一个时期标准化降水量
            r78: 第二个时期标准化降水量
            
        返回:
            float: LDFAI指数值
        """
        if np.isnan(r56) or np.isnan(r78):
            return np.nan

        # 计算LDFAI
        if abs(r56) + abs(r78) > 0:
            ldfai = (r78 - r56) * (abs(r56) + abs(r78)) * (1.8 ** (-abs(r56 + r78)))
        else:
            ldfai = 0

        return float(ldfai)

    def _collect_grid_data(self, period, months=None, provinces=None, use_cache=True):
        """
        收集指定时间段的网格点数据，优化内存使用
        
        参数:
            period (str): 时间段，如'5-6月'
            months (list): 月份列表，如[5, 6]
            provinces (list): 省份列表
            use_cache (bool): 是否使用缓存
            
        返回:
            dict: 省份-网格点-数据的嵌套字典
        """
        # 设置缓存标志
        # self.use_cache = use_cache

        # 构建缓存文件路径
        # cache_file = os.path.join(self.cache_dir, f"grid_cache_{period}_{self.start_year}_{self.end_year}.npy")

        # 解析月份
        if months is None:
            # 尝试解析时间段
            if '-' in period:
                month_strs = period.replace('月', '').split('-')
                start_month = int(month_strs[0])
                end_month = int(month_strs[1])
                months = list(range(start_month, end_month + 1))
            else:
                month_str = period.replace('月', '')
                months = [int(month_str)]

        # 加载省份网格点数据
        provinces = provinces or self.get_all_provinces()
        grid_points = self.load_province_grid_points(provinces)

        # 提取网格点数据，分批处理以减少内存使用
        province_data = {}
        batch_size = 50  # 每批处理的网格点数量

        for province, points in grid_points.items():
            total_points = len(points)
            print(f"处理{province}的网格点数据，共{total_points}个点")
            province_data[province] = {}

            # 计算总批次数
            total_batches = (total_points + batch_size - 1) // batch_size

            # 分批处理网格点
            for batch_index in range(total_batches):
                start_idx = batch_index * batch_size
                end_idx = min(start_idx + batch_size, total_points)
                batch_points = points[start_idx:end_idx]

                print(f"处理{province}的第{batch_index + 1}/{total_batches}批网格点，共{len(batch_points)}个点")

                # 使用points参数批量加载数据
                batch_data = load_era5_data(get_config('era5_data_path'), self.start_year, self.end_year, months,
                                            batch_points, chunk_size=5,
                                            )
                # 检查批次数据是否有效
                if not batch_data:
                    print(f"警告: {province}的第{batch_index + 1}批数据加载失败或为空")
                    continue

                # 合并批次数据到结果集
                if not province_data[province]:
                    # 第一批有效数据直接赋值
                    province_data[province] = batch_data
                else:
                    # 直接合并数据字典 - 数据结构: {point_id: {year: data}}
                    # 由于前置方法已经处理好了所有年份数据，这里可以直接更新字典
                    province_data[province].update(batch_data)
                # 每处理完一批，主动触发垃圾回收
                gc.collect()

            # 批次处理完成后报告进度
            print(f"完成{province}的所有{total_points}个网格点数据处理，共{len(province_data[province])}个有效点位")

        return province_data

    def load_province_grid_points(self, provinces):
        """
        加载省份网格点数据
        
        参数:
            provinces (list): 省份列表
            
        返回:
            dict: 省份网格点字典
        """
        result = {}

        for province in provinces:
            # 构建省份网格点文件路径
            file_path = os.path.join(self.grid_points_dir, f"{province}_grid_points.csv")

            # 如果文件存在，则加载数据
            if os.path.exists(file_path):
                try:
                    df = pd.read_csv(file_path, encoding='utf-8-sig')
                    points = df[['经度', '纬度']].values
                    result[province] = points
                    print(f"已加载{province}的网格点数据，共{len(points)}个点")
                except Exception as e:
                    print(f"加载{province}的网格点数据失败: {e}")
            else:
                print(f"警告: {province}的网格点数据文件不存在: {file_path}")

        return result

    def get_all_provinces(self):
        """
        获取所有省份列表
        
        返回:
            list: 省份列表
        """
        # 从省份网格点目录中获取所有省份
        provinces = []
        for file_name in os.listdir(self.grid_points_dir):
            if file_name.endswith('_grid_points.csv'):
                province = file_name.replace('_grid_points.csv', '')
                provinces.append(province)

        return provinces

    def get_point_id(self, lon, lat):
        """
        获取网格点ID
        
        参数:
            lon (float): 经度
            lat (float): 纬度
            
        返回:
            str: 网格点ID
        """
        return f"{lon:.4f}_{lat:.4f}"

    def parse_point_id(self, point_id):
        """
        解析网格点ID
        
        参数:
            point_id (str): 网格点ID
            
        返回:
            tuple: (经度, 纬度)
        """
        lon_str, lat_str = point_id.split('_')
        return float(lon_str), float(lat_str)
