# -*- coding: utf-8 -*-
"""
y ： 起报时间
x : 预报 lead time
对不同集合成员id求RMSE
"""
import pandas as pd
import os,glob,math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.dates as mdates
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter,LatitudeFormatter
import cartopy.feature as cfeature
import shapely.geometry as sgeom
from datetime import datetime,timedelta
from tqdm import tqdm
from global_land_mask import globe
from typlot.scripts.gsj_typhoon import tydat,see,count_rapidgrow,tydat_CMA,average_datetime,split_str_id,load_land_polygons,detect_landfall
from geopy.distance import geodesic
import matplotlib.ticker as ticker
import seaborn as sns
import xarray as xr
import warnings
warnings.filterwarnings("ignore")
from typlot.config.global_config import *
from geopy.distance import geodesic

ini_time_mode = ['00','12']
names = ['mojie_28','dusurui_16','gaemi_09','haikui_38','kangni_54','shantuo_44','saola_25','koinu_49']

#names = ['dusurui_16']
tynames,tyids = split_str_id(names)
draw_obs_opt = True
obs_baseline='land'  # 'land' 'RI'
RIstd = 7
tyrmse = {}
show = False  # 改为False确保保存图片
track_id = np.arange(1,52)


# 构建文件路径列表
typhoon_files = []
for tyname, tyid in zip(tynames, tyids):
    file_path = f"/data/gsj/typlot/typlot/{tyname}_pmin_umax_dist_extended.nc"
    typhoon_files.append(file_path)

print("所有台风文件:")
for file in typhoon_files:
    print(file)

# 方法1: 找到最大start_time维度并重新对齐
def align_typhoon_data(typhoon_files):
    """对齐所有台风数据的start_time维度到最大值"""
    
    # 第一步: 找到具有最大start_time维度的参考数据集
    max_start_time = 0
    reference_ds = None
    
    for file in typhoon_files:
        ds = xr.open_dataset(file)
        current_start_time = len(ds.start_time)
        if current_start_time > max_start_time:
            max_start_time = current_start_time
            if reference_ds is not None:
                reference_ds.close()
            reference_ds = ds
        else:
            ds.close()
    
    print(f"最大 start_time 维度: {max_start_time}")
    
    # 第二步: 重新对齐所有数据集
    aligned_datasets = {}
    
    for file in typhoon_files:
        ds = xr.open_dataset(file)
        
        if len(ds.start_time) == max_start_time:
            # 已经是最大维度，直接使用
            aligned_datasets[file] = ds
        else:
            # 创建新的坐标范围
            new_start_time = np.arange(max_start_time)
            
            # 使用 reindex 而不是 reindex_like
            aligned_ds = ds.reindex(
                start_time=new_start_time,
                method=None,  # 不进行插值
                fill_value=np.nan
            )
            aligned_datasets[file] = aligned_ds
            ds.close()  # 关闭原始数据集
    
    return aligned_datasets, reference_ds

# 使用修正的方法
aligned_datasets, reference_ds = align_typhoon_data(typhoon_files)

# 保存对齐后的数据
def save_aligned_datasets(aligned_datasets, output_dir="/data/gsj/typlot/typlot/aligned/"):
    """保存对齐后的数据集"""
    os.makedirs(output_dir, exist_ok=True)
    
    for file_path, ds in aligned_datasets.items():
        filename = os.path.basename(file_path)
        output_path = os.path.join(output_dir, f"aligned_{filename}")
        
        # 保存对齐后的数据
        ds.to_netcdf(output_path)
        print(f"已保存对齐数据: {output_path}")
    
    print("所有对齐数据保存完成!")


# 在适当的位置调用
save_aligned_datasets(aligned_datasets)

# 直接求平均并保存
def quick_average():
    # 读取所有文件
    datasets = []
    for tyname in tynames:
        file_path = f'/data/gsj/typlot/typlot/aligned/aligned_{tyname}_pmin_umax_dist_extended.nc'
        if os.path.exists(file_path):
            ds = xr.open_dataset(file_path)
            datasets.append(ds)
            print(f"已加载: {tyname}")
    
    if datasets:
        # 求平均
        combined = xr.concat(datasets, dim='typhoon')
        avg_ds = combined.mean(dim='typhoon', skipna=True)
        
        # 保存
        avg_ds.to_netcdf('/data/gsj/typlot/typlot/aligned/average_8typhoons.nc')
        print("平均数据已保存!")
    else:
        print("没有找到数据文件")

# 执行
quick_average()
