
'''批处理201909-202004'''
# import os
# import numpy as np
# import pandas as pd
# from read_CloudSat import reader
# import concurrent.futures
#
#
# def process_single_file_pair(filepath, filepath_lida):
#     """
#     处理单个文件对的函数
#     """
#     # 读取数据
#     f = reader(filepath)
#     d = reader(filepath_lida)
#     lon_c, lat_c, elv = f.read_geo()
#     height = f.read_sds('Height')
#     height = height * 0.001
#     cloud_mask = f.read_sds('CPR_Cloud_mask')
#     cloud_radar = f.read_sds('Radar_Reflectivity')
#     cloud_faction = d.read_sds('CloudFraction')
#     time = f.read_time(datetime=True)
#     cloudsat_start_time, cloudsat_end_time = time[[0, -1]]
#
#     cloudsat_cth = []
#     cloudsat_cbh = []
#     cloud_types = []
#     times = []
#
#     for i, row_data in enumerate(cloud_mask):
#         # 将时间转换为年-月-日 时的格式
#         time_str = time[i].strftime('%Y-%m-%d %H')
#         times.append(time_str)
#         # 使用按位操作，确保 cloud_mask >= 20 且 cloud_radar >= -30
#         valid_mask = (cloud_mask[i, :] >= 20) & (cloud_radar[i, :] >= -30)
#
#         if np.any(valid_mask):  # 如果有满足条件的值
#             col_idx = np.argmax(valid_mask)  # 找到第一个满足条件的位置
#             last_idx = np.where(valid_mask)[0][-1]  # 找到最后一个满足条件的位置
#
#             # 获取云顶高度
#             cth_value = height[i, col_idx] if col_idx < height.shape[1] else np.nan
#             cloudsat_cth.append(cth_value)
#
#             # 获取云底高度
#             cbh_value = height[i, last_idx] if last_idx < height.shape[1] else np.nan
#             cloudsat_cbh.append(cbh_value)
#
#             # 判断云类型
#             if col_idx < last_idx:
#                 between_values = row_data[col_idx + 1:last_idx]  # 从云顶到云底的区域
#                 cloud_type = 1 if np.any(between_values < 20) else 0  # 判断云类型
#             else:
#                 cloud_type = 0  # 如果云顶和云底相同或无有效数据
#             cloud_types.append(cloud_type)
#
#         else:
#             # 新添加的逻辑，当cloud_radar >= -30但不满足cloud_mask >= 20时
#             radar_valid = cloud_radar[i, :] >= -30
#             if np.any(radar_valid):
#                 # 进一步判断cloud_faction是否大于等于99
#                 faction_valid = cloud_faction[i, :] >= 99
#                 if np.any(faction_valid):
#                     col_idx = np.argmax(faction_valid)
#                     last_idx = np.where(faction_valid)[0][-1]
#
#                     cth_value = height[i, col_idx] if col_idx < height.shape[1] else np.nan
#                     cloudsat_cth.append(cth_value)
#
#                     cbh_value = height[i, last_idx] if last_idx < height.shape[1] else np.nan
#                     cloudsat_cbh.append(cbh_value)
#
#                     # 这里暂时按原逻辑一样处理云类型（可根据实际需求调整）
#                     if col_idx < last_idx:
#                         between_values = row_data[col_idx + 1:last_idx]
#                         cloud_type = 1 if np.any(between_values < 20) else 0
#                     else:
#                         cloud_type = 0
#                     cloud_types.append(cloud_type)
#                 else:
#                     cloudsat_cth.append(np.nan)
#                     cloudsat_cbh.append(np.nan)
#                     cloud_types.append(np.nan)
#             else:
#                 cloudsat_cth.append(np.nan)
#                 cloudsat_cbh.append(np.nan)
#                 cloud_types.append(np.nan)
#
#     data_dict = {
#         'longitude': lon_c,
#         'latitude': lat_c,
#         'cloudsat_cth': [cth * 1000 for cth in cloudsat_cth],
#         'cloudsat_cbh': [cbh * 1000 for cbh in cloudsat_cbh],
#         'cloudsat_type': cloud_types,
#         'time': times
#     }
#
#     # 将字典转换为DataFrame
#     return pd.DataFrame(data_dict)
#
#
# def process_cloudsat_data(filepaths_geoprof, filepaths_lida):
#     all_dfs = []
#     with concurrent.futures.ProcessPoolExecutor() as executor:
#         # 使用并行处理
#         results = list(executor.map(process_single_file_pair, filepaths_geoprof, filepaths_lida))
#         all_dfs.extend(results)
#
#     combined_df = pd.concat(all_dfs, ignore_index=True)
#     return combined_df
#
#
# # 示例调用
# # geoprof_dir = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_GEOPROF/cloudsat_2020_1_4'
# # lida_dir = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_lida/cloudsat_lida_2020_1_4'
# geoprof_dir = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_GEOPROF/cloudsat_2019_2_8'
# lida_dir = '/mnt/raid1/liudd/cloudsat_02_08lida'
# # geoprof_dir = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_GEOPROF/cloudsat_2019_9_12'
# # lida_dir = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_lida/cloudsat_lida_2019_9_12'
# print("geoprof_dir:", geoprof_dir)
# print("lida_dir:", lida_dir)
#
# geoprof_files = [os.path.join(geoprof_dir, f) for f in os.listdir(geoprof_dir) if f.endswith('.hdf')]
# lida_files = [os.path.join(lida_dir, f) for f in os.listdir(lida_dir) if f.endswith('.hdf')]
#
# result_df = process_cloudsat_data(geoprof_files, lida_files)
# # 过滤掉cloudsat_cbh列中的空值
# result_df = result_df.dropna(subset=['cloudsat_cbh'])
# csv_filepath = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_201902_08.csv'
# result_df.to_csv(csv_filepath, index=False)


'''201902_08数据'''
import os
import numpy as np
import pandas as pd
import logging
from read_CloudSat import reader
import concurrent.futures
from scipy.interpolate import griddata
# from global_land_mask import globe
import xarray as xr
from pyhdf.error import HDF4Error
from tqdm import tqdm

# 配置参数
GEOPROF_DIR = "/mnt/datastore/liudddata/cloudsat_data/cloudsat_GEOPROF/cloudsat_2019_2_8"
LIDAR_DIR = "/mnt/datastore/liudddata/cloudsat_data/cloudsat_lida/cloudsat_lida_2019_2_8"


# 初始化日志
logging.basicConfig(
    filename='cloudsat_processing.log',
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)


def generate_lidar_path(geoprof_path):
    """智能生成LIDAR文件路径"""
    try:
        base_name = os.path.basename(geoprof_path)
        # 替换文件名关键部分
        lidar_name = base_name.replace("CS_2B-GEOPROF_GRANULE_P1", "CS_2B-GEOPROF-LIDAR_GRANULE_P2")
        lidar_path = os.path.join(LIDAR_DIR, lidar_name)
        return lidar_path if os.path.exists(lidar_path) else None
    except Exception as e:
        logging.error(f"路径生成失败: {geoprof_path} - {str(e)}")
        return None


def find_valid_pairs():
    """查找有效的文件对"""
    valid_pairs = []
    geoprof_files = sorted([f for f in os.listdir(GEOPROF_DIR) if f.endswith('.hdf')])

    for gp_file in tqdm(geoprof_files, desc="匹配文件"):
        gp_path = os.path.join(GEOPROF_DIR, gp_file)
        lidar_path = generate_lidar_path(gp_path)

        if lidar_path:
            valid_pairs.append((gp_path, lidar_path))
        else:
            logging.warning(f"未找到匹配文件: {gp_file}")

    logging.info(f"找到有效文件对: {len(valid_pairs)} 对")
    return valid_pairs


def process_single_pair(pair):
    """处理单个文件对"""
    gp_path, lidar_path = pair
    try:
        # 读取数据
        geo_reader = reader(gp_path)
        lid_reader = reader(lidar_path)

        # 获取地理信息
        lon, lat, _ = geo_reader.read_geo()
        height = geo_reader.read_sds('Height')
        cloud_mask = geo_reader.read_sds('CPR_Cloud_mask')
        cloud_radar = geo_reader.read_sds('Radar_Reflectivity')
        cloud_faction = lid_reader.read_sds('CloudFraction')
        time_data = geo_reader.read_time(datetime=True)


        cloudsat_cth = []
        cloudsat_cbh = []
        cloud_types = []
        times = []

        for i, row_data in enumerate(cloud_mask):
            # 将时间转换为年-月-日 时的格式
            time_str = time_data[i].strftime('%Y-%m-%d %H')
            times.append(time_str)
            # 使用按位操作，确保 cloud_mask >= 20 且 cloud_radar >= -30
            valid_mask = (cloud_mask[i, :] >= 20) & (cloud_radar[i, :] >= -30)

            if np.any(valid_mask):  # 如果有满足条件的值
                col_idx = np.argmax(valid_mask)  # 找到第一个满足条件的位置
                last_idx = np.where(valid_mask)[0][-1]  # 找到最后一个满足条件的位置

                # 获取云顶高度
                cth_value = height[i, col_idx] if col_idx < height.shape[1] else np.nan
                cloudsat_cth.append(cth_value)

                # 获取云底高度
                cbh_value = height[i, last_idx] if last_idx < height.shape[1] else np.nan
                cloudsat_cbh.append(cbh_value)

                # 判断云类型
                if col_idx < last_idx:
                    between_values = row_data[col_idx + 1:last_idx]  # 从云顶到云底的区域
                    cloud_type = 1 if np.any(between_values < 20) else 0  # 判断云类型
                else:
                    cloud_type = 0  # 如果云顶和云底相同或无有效数据
                cloud_types.append(cloud_type)

            else:
                # 新添加的逻辑，当cloud_radar >= -30但不满足cloud_mask >= 20时
                radar_valid = cloud_radar[i, :] >= -30
                if np.any(radar_valid):
                    # 进一步判断cloud_faction是否大于等于99
                    faction_valid = cloud_faction[i, :] >= 99
                    if np.any(faction_valid):
                        col_idx = np.argmax(faction_valid)
                        last_idx = np.where(faction_valid)[0][-1]

                        cth_value = height[i, col_idx] if col_idx < height.shape[1] else np.nan
                        cloudsat_cth.append(cth_value)

                        cbh_value = height[i, last_idx] if last_idx < height.shape[1] else np.nan
                        cloudsat_cbh.append(cbh_value)

                        # 这里暂时按原逻辑一样处理云类型（可根据实际需求调整）
                        if col_idx < last_idx:
                            between_values = row_data[col_idx + 1:last_idx]
                            cloud_type = 1 if np.any(between_values < 20) else 0
                        else:
                            cloud_type = 0
                        cloud_types.append(cloud_type)
                    else:
                        cloudsat_cth.append(np.nan)
                        cloudsat_cbh.append(np.nan)
                        cloud_types.append(np.nan)
                else:
                    cloudsat_cth.append(np.nan)
                    cloudsat_cbh.append(np.nan)
                    cloud_types.append(np.nan)

        results = {
            'longitude': lon,
            'latitude': lat,
            'cloudsat_cth': cloudsat_cth,
            'cloudsat_cbh':  cloudsat_cbh,
            'cloudsat_type': cloud_types,
            'time': times
        }

        # 将字典转换为DataFrame
        # return pd.DataFrame(data_dict)

        return pd.DataFrame(results)

    except HDF4Error as e:
        logging.error(f"HDF错误 [{gp_path}]: {str(e)}")
    except Exception as e:
        logging.error(f"处理失败 [{gp_path}]: {str(e)}")

    return pd.DataFrame()


def parallel_processing(valid_pairs):
    """并行处理所有文件对"""
    all_dfs = []

    with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
        futures = [executor.submit(process_single_pair, pair) for pair in valid_pairs]

        with tqdm(total=len(futures), desc="处理文件") as pbar:
            for future in concurrent.futures.as_completed(futures):
                all_dfs.append(future.result())
                pbar.update(1)

    return pd.concat(all_dfs, ignore_index=True)


# def load_fy4a_grid():
#     """加载FY4A网格"""
#     ds = xr.open_dataset(FY4A_GRID)
#     return {
#         'lon': ds.lon.values.T,
#         'lat': ds.lat.values.T,
#         'shape': ds.lon.shape[::-1]  # 转置形状
#     }


# def interpolate_data(df, grid):
#     """数据插值"""
#     valid = df.dropna(subset=['lon', 'lat', 'cbh'])
#     points = valid[['lon', 'lat']].values
#     values = valid['cbh'].values
#
#     # 网格插值
#     grid_z = griddata(
#         points, values,
#         (grid['lon'].ravel(), grid['lat'].ravel()),
#         method='linear',
#         fill_value=np.nan
#     )
#
#     return grid_z.reshape(grid['shape'])


# def create_output_dataset(grid_data, grid_info):
#     """创建输出数据集"""
#     ds = xr.Dataset(
#         {'cbh': (('y', 'x'), grid_data)},
#         coords={
#             'lon': (('y', 'x'), grid_info['lon']),
#             'lat': (('y', 'x'), grid_info['lat'])
#         }
#     )
#     return ds


def main():
    """主处理流程"""
    logging.info("=== 开始处理流程 ===")

    # 步骤1: 文件匹配
    valid_pairs = find_valid_pairs()
    if not valid_pairs:
        raise ValueError("未找到有效文件对")

    # 步骤2: 并行处理
    result_df = parallel_processing(valid_pairs)
    # 过滤掉cloudsat_cbh列中的空值
    result_df = result_df.dropna(subset=['cloudsat_cbh'])
    csv_filepath = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_201902_08.csv'
    result_df.to_csv(csv_filepath, index=False)

    # # 步骤3: 网格插值
    # fy4_grid = load_fy4a_grid()
    # cbh_grid = interpolate_data(result_df, fy4_grid)

    # 步骤4: 保存结果
    # output_ds = create_output_dataset(cbh_grid, fy4_grid)
    # output_ds.to_netcdf(OUTPUT_NC)
    # logging.info(f"结果已保存至: {OUTPUT_NC}")


if __name__ == "__main__":
    try:
        main()
        print("处理成功完成!")
    except Exception as e:
        logging.critical(f"主流程失败: {str(e)}")
        print(f"处理失败: {str(e)}")