# import h5py
# import numpy as np
# import pandas as pd
# import netCDF4 as nc
# from global_land_mask import globe
# import multiprocessing
#
# # 定义文件路径
# hdf_file_path = r"/mnt/raid1/liudd/fy_l1_202001_02/FY4A-_AGRI--_N_DISK_1047E_L1-_FDI-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.HDF"
# cth_file_path = r"/mnt/raid1/liudd/fy_cth_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CTH-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
# ctt_file_path = r"/mnt/raid1/liudd/fy_ctt_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CTT-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
# ctp_file_path = r"/mnt/raid1/liudd/fy_ctp_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CTP-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
# clt_file_path = r"/mnt/raid1/liudd/fy_clt_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CLT-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
# olr_file_path = r"/mnt/raid1/liudd/fy_olr_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_OLR-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
# clm_file_path = r"/mnt/raid1/liudd/fy_clm_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CLM-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
# coord_file_name = '/home/liudd/data_preprocessing/FY4A_coordinates.nc'
#
# # 加载数据
# cth = nc.Dataset(cth_file_path, 'r')
# cth_data = cth.variables['CTH'][:]
#
# ctt = nc.Dataset(ctt_file_path, 'r')
# ctt_data = ctt.variables['CTT'][:]
#
# ctp = nc.Dataset(ctp_file_path, 'r')
# ctp_data = ctp.variables['CTP'][:]
#
# clt = nc.Dataset(clt_file_path, 'r')
# clt_data = clt.variables['CLT'][:]
#
# olr = nc.Dataset(olr_file_path, 'r')
# olr_data = olr.variables['OLR'][:]
#
# clm = nc.Dataset(clm_file_path, 'r')
# clm_data = clm.variables['CLM'][:]
#
# coord_file_open = nc.Dataset(coord_file_name, 'r')
# lat = coord_file_open.variables['lat'][:, :].T
# lon = coord_file_open.variables['lon'][:, :].T
#
# # 读取 HDF 文件中的数据
# NOMNames = [f'NOMChannel{str(i).zfill(2)}' for i in range(1, 15)]
# CALNames = [f'CALChannel{str(i).zfill(2)}' for i in range(1, 15)]
# img = np.zeros((2748, 2748, 14), dtype=np.float32)
#
# with h5py.File(hdf_file_path, 'r') as h5file:
#     for i in range(14):
#         NOMData = h5file[NOMNames[i]][:]
#         CalData = h5file[CALNames[i]][:]
#         valid_mask = (NOMData >= 0) & (NOMData < 4096)
#         if i == 6:
#             valid_mask = (NOMData >= 0) & (NOMData < 65536)
#         TOARefData = np.zeros_like(NOMData, dtype=np.float32)
#         indices = np.where(valid_mask)
#         index_values = NOMData[indices].astype(int)
#         valid_indices = index_values[(0 <= index_values) & (index_values < len(CalData))]
#         TOARefData[indices] = CalData[valid_indices]
#         img[:, :, i] = TOARefData
#
# # 将数据整理成 DataFrame
# rows, cols = np.indices(img.shape[:2])
#
# data = {
#     'fy_lat': lat.flatten(),
#     'fy_lon': lon.flatten(),
#     'fy_cth': cth_data.flatten(),
#     'fy_ctt': ctt_data.flatten(),
#     'fy_ctp': ctp_data.flatten(),
#     'fy_clt': clt_data.flatten(),
#     'fy_olr': olr_data.flatten(),
#     'fy_clm': clm_data.flatten(),
# }
#
# # 添加每个通道的数据
# for i in range(14):
#     data[f'band{i + 1}'] = img[:, :, i].flatten()
#
# # 创建 DataFrame
# df = pd.DataFrame(data).dropna()
#
# # 判断某一行数据是海洋还是陆地
# def is_land_row(row):
#     lat = row['fy_lat']
#     lon = row['fy_lon']
#     if lon > 180:
#         lon -= 360
#     return globe.is_land(lat, lon)
#
# # 分批处理数据：将数据按海洋和陆地分开
# def process_chunk(chunk):
#     land_rows = []
#     ocean_rows = []
#     for _, row in chunk.iterrows():
#         if is_land_row(row):
#             land_rows.append(row)
#         else:
#             ocean_rows.append(row)
#     return pd.DataFrame(land_rows), pd.DataFrame(ocean_rows)
#
# # 分批处理数据，避免内存问题
# batch_size = 1000  # 每批处理1000行数据
# num_processes = multiprocessing.cpu_count()  # 使用所有可用的CPU核心
# pool = multiprocessing.Pool(processes=num_processes)  # 创建进程池
# results = []
#
# # 分批处理
# for start in range(0, len(df), batch_size):
#     end = start + batch_size
#     chunk = df.iloc[start:end]
#     results.append(pool.apply_async(process_chunk, args=(chunk,)))
#
# # 等待所有进程完成
# land_data = []
# ocean_data = []
#
# for result in results:
#     land_df_chunk, ocean_df_chunk = result.get()
#     land_data.append(land_df_chunk)
#     ocean_data.append(ocean_df_chunk)
#
# # 将结果合并
# land_df = pd.concat(land_data, ignore_index=True)
# ocean_df = pd.concat(ocean_data, ignore_index=True)
#
# # 保存海洋和陆地数据到CSV文件
# land_df.to_csv("2020010104land_data.csv", index=False)
# ocean_df.to_csv("2020010104ocean_data.csv", index=False)
#
# print("数据处理完成，已分别保存海洋数据和陆地数据。")
import h5py
import numpy as np
import pandas as pd
import netCDF4 as nc
from global_land_mask import globe
import multiprocessing
import re

# 定义文件路径
hdf_file_path = r"/mnt/raid1/liudd/fy_l1_202001_02/FY4A-_AGRI--_N_DISK_1047E_L1-_FDI-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.HDF"
cth_file_path = r"/mnt/raid1/liudd/fy_cth_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CTH-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
ctt_file_path = r"/mnt/raid1/liudd/fy_ctt_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CTT-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
ctp_file_path = r"/mnt/raid1/liudd/fy_ctp_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CTP-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
clt_file_path = r"/mnt/raid1/liudd/fy_clt_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CLT-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
olr_file_path = r"/mnt/raid1/liudd/fy_olr_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_OLR-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
clm_file_path = r"/mnt/raid1/liudd/fy_clm_202001_02/FY4A-_AGRI--_N_DISK_1047E_L2-_CLM-_MULT_NOM_20200101040000_20200101041459_4000M_V0001.NC"
coord_file_name = '/home/liudd/data_preprocessing/FY4A_coordinates.nc'

# 提取时间信息
def extract_time_from_filename(filename):
    match = re.search(r'_(\d{14})_', filename)
    if match:
        return match.group(1)
    return None

timestamp = extract_time_from_filename(hdf_file_path)
if timestamp:
    print(f"提取的时间信息: {timestamp}")
else:
    raise ValueError("文件名中未找到时间信息。")

# 加载数据
cth_data = nc.Dataset(cth_file_path, 'r').variables['CTH'][:]
ctt_data = nc.Dataset(ctt_file_path, 'r').variables['CTT'][:]
ctp_data = nc.Dataset(ctp_file_path, 'r').variables['CTP'][:]
clt_data = nc.Dataset(clt_file_path, 'r').variables['CLT'][:]
olr_data = nc.Dataset(olr_file_path, 'r').variables['OLR'][:]
clm_data = nc.Dataset(clm_file_path, 'r').variables['CLM'][:]

coord_file_open = nc.Dataset(coord_file_name, 'r')
lat = coord_file_open.variables['lat'][:, :].T
lon = coord_file_open.variables['lon'][:, :].T

# 读取 HDF 文件中的数据
NOMNames = [f'NOMChannel{str(i).zfill(2)}' for i in range(1, 15)]
CALNames = [f'CALChannel{str(i).zfill(2)}' for i in range(1, 15)]
img = np.zeros((2748, 2748, 14), dtype=np.float32)

with h5py.File(hdf_file_path, 'r') as h5file:
    for i in range(14):
        NOMData = h5file[NOMNames[i]][:]
        CalData = h5file[CALNames[i]][:]
        valid_mask = (NOMData >= 0) & (NOMData < 4096)
        if i == 6:
            valid_mask = (NOMData >= 0) & (NOMData < 65536)
        TOARefData = np.zeros_like(NOMData, dtype=np.float32)
        indices = np.where(valid_mask)
        index_values = NOMData[indices].astype(int)
        valid_indices = index_values[(0 <= index_values) & (index_values < len(CalData))]
        TOARefData[indices] = CalData[valid_indices]
        img[:, :, i] = TOARefData

# 将数据整理成 DataFrame
data = {
    'fy_lat': lat.flatten(),
    'fy_lon': lon.flatten(),
    'fy_cth': cth_data.flatten(),
    'fy_ctt': ctt_data.flatten(),
    'fy_ctp': ctp_data.flatten(),
    'fy_clt': clt_data.flatten(),
    'fy_olr': olr_data.flatten(),
    'fy_clm': clm_data.flatten(),
    'timestamp': [timestamp] * lat.size,
}

for i in range(14):
    data[f'band{i + 1}'] = img[:, :, i].flatten()

df = pd.DataFrame(data).dropna()

# 分批处理数据：筛选海洋数据
def filter_ocean_chunk(chunk):
    ocean_rows = []
    for _, row in chunk.iterrows():
        lon = row['fy_lon']
        if lon > 180:
            lon -= 360
        if not globe.is_land(row['fy_lat'], lon):
            ocean_rows.append(row)
    return pd.DataFrame(ocean_rows)

# 多进程处理
batch_size = 10000  # 每批处理 10000 行数据
num_processes = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=num_processes)
results = []

for start in range(0, len(df), batch_size):
    end = start + batch_size
    chunk = df.iloc[start:end]
    results.append(pool.apply_async(filter_ocean_chunk, args=(chunk,)))

# 收集结果
ocean_data = pd.concat([result.get() for result in results], ignore_index=True)

# 保存海洋数据到 CSV 文件
ocean_data.to_csv(f"{timestamp}_ocean_data.csv", index=False)

print(f"仅海洋数据处理完成，已保存到 {timestamp}_ocean_data.csv 文件。")


# # 保存海洋数据到CSV文件
# df_ocean.to_csv("ocean_data_2020010104.csv", index=False)
#
# print("仅海洋数据处理完成，已保存到CSV文件。")
