from global_land_mask import globe
import numpy as np
import pandas as pd
import multiprocessing

# 加载 fy 文件中的纬度和经度数据
fy_df = pd.read_csv('match_time201902_08_L1_10.25.csv')

def is_land_row(row):
    lat = row['fy_lat']
    lon = row['fy_lon']
    if lon > 180:
        lon -= 360
    return globe.is_land(lat, lon)

def process_chunk(chunk):
    land_rows = []
    ocean_rows = []
    for _, row in chunk.iterrows():
        if is_land_row(row):
            land_rows.append(row)
        else:
            ocean_rows.append(row)
    return pd.DataFrame(land_rows), pd.DataFrame(ocean_rows)

# 分批处理数据以避免内存问题，如果数据量很大
batch_size = 1000
num_processes = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=num_processes)
results = []
for start in range(0, len(fy_df), batch_size):
    end = start + batch_size
    chunk = fy_df.iloc[start:end]
    results.append(pool.apply_async(process_chunk, args=(chunk,)))

land_df = pd.DataFrame()
ocean_df = pd.DataFrame()

for result in results:
    land_chunk, ocean_chunk = result.get()
    land_df = pd.concat([land_df, land_chunk], ignore_index=True)
    ocean_df = pd.concat([ocean_df, ocean_chunk], ignore_index=True)

pool.close()
pool.join()

# 输出陆地数据到 CSV 文件
land_csv_file_path = 'match_02_08_land_data.csv'
land_df.to_csv(land_csv_file_path, index=False)

# 输出海洋数据到 CSV 文件
ocean_csv_file_path = 'match_02_08_ocean_data.csv'
ocean_df.to_csv(ocean_csv_file_path, index=False)

print(f'Land data saved to {land_csv_file_path}')
print(f'Ocean data saved to {ocean_csv_file_path}')