"计算风云平均云底高度"
# import numpy as np
# import netCDF4 as nc
# import os
# import glob
# from tqdm import tqdm
#
# # 配置参数
# # INPUT_DIR = "/mnt/datastore/liudddata/result/test"
# # INPUT_DIR = "/mnt/datastore/liudddata/result/20200104_ocean"
# INPUT_DIR = "/mnt/datastore/liudddata/result/20190912_ocean"
# # INPUT_DIR = "/mnt/datastore/liudddata/result/20190208_ocean"
# OUTPUT_NC = "/mnt/datastore/liudddata/result/test20190912.nc"
# VARIABLE_NAME = "predicted_mean"
# FILL_VALUE = -999.9
#
#
# def process_2d_grid():
#     # 获取文件列表
#     file_list = sorted(glob.glob(os.path.join(INPUT_DIR, "*_predicted_2d_mc.nc")))
#     if not file_list:
#         raise ValueError(f"在目录 {INPUT_DIR} 中未找到输入文件")
#
#     print(f"找到 {len(file_list)} 个文件进行处理")
#
#     # 读取第一个文件获取网格信息
#     try:
#         with nc.Dataset(file_list[0], 'r') as template_ds:
#             # 验证维度结构
#             if 'y' not in template_ds.dimensions or 'x' not in template_ds.dimensions:
#                 raise RuntimeError("原始文件维度不符合预期结构，期望包含 'y' 和 'x' 维度")
#
#             # 获取维度大小
#             y_dim = len(template_ds.dimensions['y'])
#             x_dim = len(template_ds.dimensions['x'])
#             print(f"网格维度: y={y_dim}, x={x_dim}")
#
#             # 检查变量是否存在
#             if VARIABLE_NAME not in template_ds.variables:
#                 available_vars = list(template_ds.variables.keys())
#                 raise RuntimeError(f"变量 '{VARIABLE_NAME}' 不存在。可用变量: {available_vars}")
#
#             # 获取x和y坐标变量（一维）
#             x_coords = template_ds['x'][:] if 'x' in template_ds.variables else None
#             y_coords = template_ds['y'][:] if 'y' in template_ds.variables else None
#
#     except Exception as e:
#         raise RuntimeError(f"读取模板文件失败: {str(e)}")
#
#     # 初始化累加数组
#     sum_data = np.zeros((y_dim, x_dim), dtype=np.float64)
#     count_data = np.zeros((y_dim, x_dim), dtype=np.uint32)
#
#     # 统计信息
#     processed_files = 0
#     skipped_files = 0
#
#     # 逐文件处理
#     for file_path in tqdm(file_list, desc="处理文件"):
#         try:
#             with nc.Dataset(file_path, 'r') as ds:
#                 # 检查变量和维度
#                 if VARIABLE_NAME not in ds.variables:
#                     print(f"警告: 文件 {os.path.basename(file_path)} 中缺少变量 '{VARIABLE_NAME}'，跳过")
#                     skipped_files += 1
#                     continue
#
#                 data_var = ds[VARIABLE_NAME]
#                 if data_var.shape != (y_dim, x_dim):
#                     print(
#                         f"警告: 文件 {os.path.basename(file_path)} 维度不匹配，期望({y_dim}, {x_dim})，实际{data_var.shape}，跳过")
#                     skipped_files += 1
#                     continue
#
#                 # 读取数据
#                 data = data_var[:, :]
#
#                 # 处理可能的掩码数组
#                 if hasattr(data, 'mask'):
#                     data = data.filled(FILL_VALUE)
#
#                 data = data.astype(np.float64)
#
#                 # 有效数据判断
#                 valid_mask = (
#                         ~np.isclose(data, FILL_VALUE, atol=1e-6) &
#                         ~np.isnan(data) &
#                         (data >= 0)  # 云底高度>=0为有效值
#                 )
#
#                 # 累加计算
#                 sum_data += np.where(valid_mask, data, 0)
#                 count_data += valid_mask.astype(np.uint32)
#                 processed_files += 1
#
#         except Exception as e:
#             print(f"警告: 处理文件 {os.path.basename(file_path)} 时出错: {str(e)}")
#             skipped_files += 1
#             continue
#
#     print(f"处理统计: 成功处理 {processed_files} 个文件, 跳过 {skipped_files} 个文件")
#
#     # 检查是否有有效数据
#     total_valid_points = np.sum(count_data > 0)
#     if total_valid_points == 0:
#         raise RuntimeError("所有文件中都没有找到有效数据")
#
#     print(f"有效数据点数量: {total_valid_points}")
#     print(f"数据覆盖率: {total_valid_points / (y_dim * x_dim) * 100:.2f}%")
#
#     # 计算平均值
#     with np.errstate(divide='ignore', invalid='ignore'):
#         mean_data = np.divide(sum_data, count_data, where=count_data > 0)
#     mean_data = np.where(count_data > 0, mean_data, FILL_VALUE).astype(np.float32)
#
#     # 创建输出文件
#     try:
#         with nc.Dataset(OUTPUT_NC, 'w', format='NETCDF4') as out_ds:
#             # 定义维度（保持原始x,y维度）
#             out_ds.createDimension('y', y_dim)
#             out_ds.createDimension('x', x_dim)
#
#             # 添加x和y坐标变量（一维）
#             if x_coords is not None:
#                 x_var = out_ds.createVariable('x', 'f4', ('x',), zlib=True)
#                 x_var[:] = x_coords
#                 x_var.long_name = "x coordinate"
#
#             if y_coords is not None:
#                 y_var = out_ds.createVariable('y', 'f4', ('y',), zlib=True)
#                 y_var[:] = y_coords
#                 y_var.long_name = "y coordinate"
#
#             # 创建平均云底高度变量
#             mean_cbh_var = out_ds.createVariable(
#                 'mean_cbh', 'f4', ('y', 'x'),
#                 fill_value=FILL_VALUE,
#                 zlib=True,
#                 complevel=4
#             )
#             mean_cbh_var[:, :] = mean_data
#             mean_cbh_var.long_name = "Average Cloud Base Height"
#             mean_cbh_var.units = "m"
#             mean_cbh_var.missing_value = FILL_VALUE
#             mean_cbh_var.comment = "网格平均云底高度"
#
#             # 创建有效样本数变量
#             valid_count_var = out_ds.createVariable(
#                 'valid_count', 'u4', ('y', 'x'),
#                 zlib=True,
#                 complevel=4
#             )
#             valid_count_var[:, :] = count_data
#             valid_count_var.long_name = "Number of valid observations"
#             valid_count_var.units = "count"
#             valid_count_var.comment = "每个网格点上的有效样本数量"
#
#             # 全局属性
#             out_ds.title = "FY-4A网格平均云底高度"
#             out_ds.source = "基于predicted_mean变量计算的时间平均"
#             out_ds.history = f"Created on {np.datetime64('now').astype(str)} from {len(file_list)} files"
#             out_ds.processed_files_count = processed_files
#             out_ds.skipped_files_count = skipped_files
#             out_ds.data_coverage_percentage = float(total_valid_points / (y_dim * x_dim) * 100)
#
#     except Exception as e:
#         raise RuntimeError(f"创建输出文件失败: {str(e)}")
#
#
# if __name__ == "__main__":
#     try:
#         process_2d_grid()
#         print(f"处理完成，结果保存至：{OUTPUT_NC}")
#         print(f"输出变量: mean_cbh (平均云底高度), valid_count (有效样本数)")
#         print(f"输出维度: y, x")
#     except Exception as e:
#         print(f"处理失败：{str(e)}")

"计算风云多年平均云底高度，批量读取文件夹"
# import numpy as np
# import netCDF4 as nc
# import os
# import glob
# from tqdm import tqdm
#
# # 配置参数 - 修改为多个目录
# INPUT_DIRS = [
#     "/mnt/datastore/liudddata/result/20200104_ocean",
#     "/mnt/datastore/liudddata/result/20190912_ocean",
#     "/mnt/datastore/liudddata/result/20190208_ocean"
# ]
# OUTPUT_NC = "/mnt/datastore/liudddata/result/combined_ocean_mean.nc"
# VARIABLE_NAME = "predicted_mean"
# FILL_VALUE = -999.9
#
#
# def process_2d_grid():
#     # 获取所有目录下的文件列表
#     file_list = []
#     for input_dir in INPUT_DIRS:
#         dir_files = sorted(glob.glob(os.path.join(input_dir, "*_predicted_2d_mc.nc")))
#         print(f"目录 {input_dir} 中找到 {len(dir_files)} 个文件")
#         file_list.extend(dir_files)
#
#     if not file_list:
#         raise ValueError(f"在指定目录中未找到任何输入文件")
#
#     print(f"总共找到 {len(file_list)} 个文件进行处理")
#
#     # 读取第一个文件获取网格信息
#     try:
#         with nc.Dataset(file_list[0], 'r') as template_ds:
#             # 验证维度结构
#             if 'y' not in template_ds.dimensions or 'x' not in template_ds.dimensions:
#                 raise RuntimeError("原始文件维度不符合预期结构，期望包含 'y' 和 'x' 维度")
#
#             # 获取维度大小
#             y_dim = len(template_ds.dimensions['y'])
#             x_dim = len(template_ds.dimensions['x'])
#             print(f"网格维度: y={y_dim}, x={x_dim}")
#
#             # 检查变量是否存在
#             if VARIABLE_NAME not in template_ds.variables:
#                 available_vars = list(template_ds.variables.keys())
#                 raise RuntimeError(f"变量 '{VARIABLE_NAME}' 不存在。可用变量: {available_vars}")
#
#             # 获取x和y坐标变量（一维）
#             x_coords = template_ds['x'][:] if 'x' in template_ds.variables else None
#             y_coords = template_ds['y'][:] if 'y' in template_ds.variables else None
#
#     except Exception as e:
#         raise RuntimeError(f"读取模板文件失败: {str(e)}")
#
#     # 初始化累加数组
#     sum_data = np.zeros((y_dim, x_dim), dtype=np.float64)
#     count_data = np.zeros((y_dim, x_dim), dtype=np.uint32)
#
#     # 统计信息
#     processed_files = 0
#     skipped_files = 0
#
#     # 逐文件处理 - 添加进度条显示
#     for file_path in tqdm(file_list, desc="处理文件"):
#         try:
#             with nc.Dataset(file_path, 'r') as ds:
#                 # 检查变量和维度
#                 if VARIABLE_NAME not in ds.variables:
#                     print(f"警告: 文件 {os.path.basename(file_path)} 中缺少变量 '{VARIABLE_NAME}'，跳过")
#                     skipped_files += 1
#                     continue
#
#                 data_var = ds[VARIABLE_NAME]
#                 if data_var.shape != (y_dim, x_dim):
#                     print(
#                         f"警告: 文件 {os.path.basename(file_path)} 维度不匹配，期望({y_dim}, {x_dim})，实际{data_var.shape}，跳过")
#                     skipped_files += 1
#                     continue
#
#                 # 读取数据
#                 data = data_var[:, :]
#
#                 # 处理可能的掩码数组
#                 if hasattr(data, 'mask'):
#                     data = data.filled(FILL_VALUE)
#
#                 data = data.astype(np.float64)
#
#                 # 有效数据判断
#                 valid_mask = (
#                         ~np.isclose(data, FILL_VALUE, atol=1e-6) &
#                         ~np.isnan(data) &
#                         (data >= 0)  # 云底高度>=0为有效值
#                 )
#
#                 # 累加计算
#                 sum_data += np.where(valid_mask, data, 0)
#                 count_data += valid_mask.astype(np.uint32)
#                 processed_files += 1
#
#         except Exception as e:
#             print(f"警告: 处理文件 {os.path.basename(file_path)} 时出错: {str(e)}")
#             skipped_files += 1
#             continue
#
#     print(f"处理统计: 成功处理 {processed_files} 个文件, 跳过 {skipped_files} 个文件")
#
#     # 检查是否有有效数据
#     total_valid_points = np.sum(count_data > 0)
#     if total_valid_points == 0:
#         raise RuntimeError("所有文件中都没有找到有效数据")
#
#     print(f"有效数据点数量: {total_valid_points}")
#     print(f"数据覆盖率: {total_valid_points / (y_dim * x_dim) * 100:.2f}%")
#
#     # 计算平均值
#     with np.errstate(divide='ignore', invalid='ignore'):
#         mean_data = np.divide(sum_data, count_data, where=count_data > 0)
#     mean_data = np.where(count_data > 0, mean_data, FILL_VALUE).astype(np.float32)
#
#     # 创建输出文件
#     try:
#         with nc.Dataset(OUTPUT_NC, 'w', format='NETCDF4') as out_ds:
#             # 定义维度（保持原始x,y维度）
#             out_ds.createDimension('y', y_dim)
#             out_ds.createDimension('x', x_dim)
#
#             # 添加x和y坐标变量（一维）
#             if x_coords is not None:
#                 x_var = out_ds.createVariable('x', 'f4', ('x',), zlib=True)
#                 x_var[:] = x_coords
#                 x_var.long_name = "x coordinate"
#
#             if y_coords is not None:
#                 y_var = out_ds.createVariable('y', 'f4', ('y',), zlib=True)
#                 y_var[:] = y_coords
#                 y_var.long_name = "y coordinate"
#
#             # 创建平均云底高度变量
#             mean_cbh_var = out_ds.createVariable(
#                 'mean_cbh', 'f4', ('y', 'x'),
#                 fill_value=FILL_VALUE,
#                 zlib=True,
#                 complevel=4
#             )
#             mean_cbh_var[:, :] = mean_data
#             mean_cbh_var.long_name = "Average Cloud Base Height"
#             mean_cbh_var.units = "m"
#             mean_cbh_var.missing_value = FILL_VALUE
#             mean_cbh_var.comment = "网格平均云底高度，基于三个目录的所有文件计算"
#
#             # 创建有效样本数变量
#             valid_count_var = out_ds.createVariable(
#                 'valid_count', 'u4', ('y', 'x'),
#                 zlib=True,
#                 complevel=4
#             )
#             valid_count_var[:, :] = count_data
#             valid_count_var.long_name = "Number of valid observations"
#             valid_count_var.units = "count"
#             valid_count_var.comment = "每个网格点上的有效样本数量"
#
#             # 全局属性
#             out_ds.title = "FY-4A网格平均云底高度（三目录合并）"
#             out_ds.source = f"基于predicted_mean变量计算的时间平均，合并目录: {', '.join(INPUT_DIRS)}"
#             out_ds.history = f"Created on {np.datetime64('now').astype(str)} from {len(file_list)} files"
#             out_ds.processed_files_count = processed_files
#             out_ds.skipped_files_count = skipped_files
#             out_ds.data_coverage_percentage = float(total_valid_points / (y_dim * x_dim) * 100)
#             out_ds.input_directories = str(INPUT_DIRS)
#
#     except Exception as e:
#         raise RuntimeError(f"创建输出文件失败: {str(e)}")
#
#
# if __name__ == "__main__":
#     try:
#         process_2d_grid()
#         print(f"处理完成，结果保存至：{OUTPUT_NC}")
#         print(f"输出变量: mean_cbh (平均云底高度), valid_count (有效样本数)")
#         print(f"输出维度: y, x")
#         print(f"处理的目录: {INPUT_DIRS}")
#     except Exception as e:
#         print(f"处理失败：{str(e)}")

"计算结果添加XY经纬度信息"
import numpy as np
import netCDF4 as nc
import os

# 配置参数
# AVERAGE_FILE = "/mnt/datastore/liudddata/result/test20200104.nc"  # 已计算好的平均文件
AVERAGE_FILE = "/mnt/datastore/liudddata/result/combined_ocean_mean.nc"  # 已计算好的平均文件
COORD_FILE = '/home/liudd/data_preprocessing/FY4A_coordinates.nc'  # 坐标文件
OUTPUT_FILE = "/mnt/datastore/liudddata/result/201902_202004_with_coords.nc"  # 输出文件（带坐标）


def load_fy_coordinates(coord_file_name):
    """加载风云卫星的地理坐标数据，确保正确的维度顺序"""
    with nc.Dataset(coord_file_name, 'r') as coord_file:
        # 读取经纬度数据并进行转置以确保正确的维度顺序
        lat_fy = coord_file.variables['lat'][:, :].T
        lon_fy = coord_file.variables['lon'][:, :].T
        # 处理可能的NaN值
        lat_fy = np.where(np.isnan(lat_fy), -90., lat_fy)
        lon_fy = np.where(np.isnan(lon_fy), 360., lon_fy)
    return lat_fy, lon_fy


def add_coordinates_to_average():
    """
    为已经计算好的平均云底高度文件添加经纬度坐标信息
    """
    # 检查输入文件是否存在
    if not os.path.exists(AVERAGE_FILE):
        raise FileNotFoundError(f"平均文件不存在: {AVERAGE_FILE}")

    if not os.path.exists(COORD_FILE):
        raise FileNotFoundError(f"坐标文件不存在: {COORD_FILE}")

    print(f"正在处理平均文件: {AVERAGE_FILE}")
    print(f"使用坐标文件: {COORD_FILE}")

    # 读取平均文件信息
    try:
        with nc.Dataset(AVERAGE_FILE, 'r') as avg_ds:
            # 获取维度信息
            if 'y' not in avg_ds.dimensions or 'x' not in avg_ds.dimensions:
                raise RuntimeError("平均文件缺少 'y' 或 'x' 维度")

            y_dim = len(avg_ds.dimensions['y'])
            x_dim = len(avg_ds.dimensions['x'])
            print(f"平均文件维度: y={y_dim}, x={x_dim}")

            # 检查变量
            if 'mean_cbh' not in avg_ds.variables:
                raise RuntimeError("平均文件中缺少 'mean_cbh' 变量")

            # 读取数据变量
            mean_cbh_data = avg_ds['mean_cbh'][:, :]
            if 'valid_count' in avg_ds.variables:
                valid_count_data = avg_ds['valid_count'][:, :]
            else:
                valid_count_data = None
                print("警告: 平均文件中没有 'valid_count' 变量")

            # 获取填充值
            if hasattr(avg_ds['mean_cbh'], '_FillValue'):
                fill_value = avg_ds['mean_cbh']._FillValue
            else:
                fill_value = -999.9
                print(f"警告: 未找到填充值，使用默认值: {fill_value}")

            # 获取数据范围用于验证
            valid_data = mean_cbh_data[mean_cbh_data != fill_value]
            if len(valid_data) > 0:
                data_min = np.min(valid_data)
                data_max = np.max(valid_data)
                data_mean = np.mean(valid_data)
                print(f"平均云底高度范围: 最小值={data_min:.1f}, 最大值={data_max:.1f}, 平均值={data_mean:.1f}")

    except Exception as e:
        raise RuntimeError(f"读取平均文件失败: {str(e)}")

    # 使用您的正确方法加载坐标
    try:
        lat_2d, lon_2d = load_fy_coordinates(COORD_FILE)
        print(f"坐标文件维度: lat{lat_2d.shape}, lon{lon_2d.shape}")

        # 验证维度是否匹配
        if lat_2d.shape != (y_dim, x_dim) or lon_2d.shape != (y_dim, x_dim):
            print(f"警告: 坐标维度({lat_2d.shape})与数据维度({y_dim}, {x_dim})不匹配")
            # 尝试转置数据而不是坐标
            if lat_2d.shape == (x_dim, y_dim):
                print("检测到坐标维度顺序可能相反，将转置数据")
                mean_cbh_data = mean_cbh_data.T
                if valid_count_data is not None:
                    valid_count_data = valid_count_data.T
            else:
                raise RuntimeError(f"坐标文件与数据文件维度不匹配且无法自动修正")

    except Exception as e:
        raise RuntimeError(f"读取坐标文件失败: {str(e)}")

    # 创建输出文件（带坐标）
    try:
        with nc.Dataset(OUTPUT_FILE, 'w', format='NETCDF4') as out_ds:
            # 定义维度（使用平均文件的维度名称）
            out_ds.createDimension('y', y_dim)
            out_ds.createDimension('x', x_dim)

            # 添加经纬度变量（二维）
            lat_var = out_ds.createVariable('lat', 'f4', ('y', 'x'), zlib=True)
            lon_var = out_ds.createVariable('lon', 'f4', ('y', 'x'), zlib=True)

            # 写入经纬度数据
            lat_var[:, :] = lat_2d.astype(np.float32)
            lon_var[:, :] = lon_2d.astype(np.float32)

            # 设置经纬度属性
            lat_var.units = "degrees_north"
            lat_var.long_name = "latitude"
            lat_var.standard_name = "latitude"

            lon_var.units = "degrees_east"
            lon_var.long_name = "longitude"
            lon_var.standard_name = "longitude"

            # 创建平均云底高度变量
            mean_cbh_var = out_ds.createVariable(
                'mean_cbh', 'f4', ('y', 'x'),
                fill_value=fill_value,
                zlib=True,
                complevel=4
            )
            mean_cbh_var[:, :] = mean_cbh_data.astype(np.float32)

            # 复制属性（如果存在）
            with nc.Dataset(AVERAGE_FILE, 'r') as src_ds:
                src_var = src_ds['mean_cbh']
                for attr_name in src_var.ncattrs():
                    if attr_name not in ['_FillValue']:  # 跳过已设置的属性
                        setattr(mean_cbh_var, attr_name, getattr(src_var, attr_name))

            # 添加坐标引用
            mean_cbh_var.coordinates = "lat lon"

            # 如果有有效计数数据，也复制
            if valid_count_data is not None:
                valid_count_var = out_ds.createVariable(
                    'valid_count', 'u4', ('y', 'x'),
                    zlib=True,
                    complevel=4
                )
                valid_count_var[:, :] = valid_count_data

                # 复制属性
                with nc.Dataset(AVERAGE_FILE, 'r') as src_ds:
                    src_var = src_ds['valid_count']
                    for attr_name in src_var.ncattrs():
                        setattr(valid_count_var, attr_name, getattr(src_var, attr_name))

                valid_count_var.coordinates = "lat lon"

            # 复制全局属性
            with nc.Dataset(AVERAGE_FILE, 'r') as src_ds:
                for attr_name in src_ds.ncattrs():
                    setattr(out_ds, attr_name, getattr(src_ds, attr_name))

            # 添加坐标信息到全局属性
            out_ds.coordinate_source = COORD_FILE
            out_ds.history = f"{getattr(out_ds, 'history', '')}; Added lat/lon coordinates from {COORD_FILE}"

            print(f"成功创建带坐标的输出文件: {OUTPUT_FILE}")

            # 验证输出数据范围
            valid_output_data = mean_cbh_data[mean_cbh_data != fill_value]
            if len(valid_output_data) > 0:
                output_min = np.min(valid_output_data)
                output_max = np.max(valid_output_data)
                output_mean = np.mean(valid_output_data)
                print(f"输出云底高度范围: 最小值={output_min:.1f}, 最大值={output_max:.1f}, 平均值={output_mean:.1f}")

    except Exception as e:
        raise RuntimeError(f"创建输出文件失败: {str(e)}")


if __name__ == "__main__":
    try:
        add_coordinates_to_average()
        print(f"处理完成!")
        print(f"输入文件: {AVERAGE_FILE}")
        print(f"输出文件: {OUTPUT_FILE}")
        print(f"坐标来源: {COORD_FILE}")
    except Exception as e:
        print(f"处理失败: {str(e)}")