# pre packages
from myglobal import *

# sys packages
from pylab import np
import obspy as ob
from glob import glob
from tqdm import tqdm
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
from scipy.stats import linregress
import datetime
from scipy import interpolate


# self packages
from utils.loc import load_loc, get_distance,sort_data_by_distance
from utils.math import measure_shift_fft,remove_point_skip
from utils.h5data import get_event_data, save_h5_data, read_h5_data
from utils.plot import plot_scatters,plot_traces
from utils.trace import get_tapered_slices, safe_filter

def get_solid(date_string, ndays=180):
    import datetime
    import pysolid
    lon, lat = 115.814586,39.117921                 # point of interest in degree, Los Angles, CA
    step_sec = 60 * 60                       # sample spacing in time domain in seconds
    ts = datetime.datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%SZ')  # start date and time
    te = ts+datetime.timedelta(days=ndays)  # end   date and time

    dt_out, tide_e, tide_n, tide_u = pysolid.calc_solid_earth_tides_point(
        lat, lon, ts, te,
        step_sec=step_sec,
        display=False,
        verbose=False,
    )
    dt_out = dt_out-ts
    dt_out = np.array([i.days+i.seconds/24/3600+8/24 for i in dt_out])
    
    return dt_out,tide_u
# ... existing code ...
def get_sensor_T(date_string, file = './loc/P320T.csv'):
    import datetime
    import pandas as pd
    ts = datetime.datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%SZ')  # start date and time

    t_phy = []
    data = pd.DataFrame(pd.read_csv(file))
    # 2024-10-13 08:00:00
    for i in data['datetime']:
        di = datetime.datetime.strptime(i, '%Y-%m-%d %H:%M:%S')
        dti = (di - ts).total_seconds()/3600/24+8/24
        t_phy.append(dti)
    t_phy = np.array(t_phy)

    phy = []
    for i in data['T']:
        phy.append(float(i))
    phy = np.array(phy)
    
    return t_phy,phy
# ... existing code ...
# ... existing code ...
def get_era5_T(date_string, file = 'loc/era5.T.2325.grib',key='t2m'):
    import xarray as xr
    import datetime
    import pandas as pd

    # 使用cfgrib引擎读取grib文件
    ds = xr.open_dataset(file, engine='cfgrib')
    
    # 访问温度变量（通常为't'或't2m'）
    if key in ds:
        temperature = ds[key]  # 2米高温度
    elif 't' in ds:
        temperature = ds['t']    # 默认温度变量
    else:
        # 如果不确定变量名，可以打印所有变量名
        print("Available variables:", list(ds.variables.keys()))
        raise ValueError("Could not find temperature variable")
    
    
    # 获取时间维度
    time = ds.time
    lat = ds.latitude
    lon = ds.longitude
    
    print("Time dimension:", time)
    print("Latitude dimension:", lat)
    print("Longitude dimension:", lon)
    
    # 将时间转换为datetime对象
    ts = datetime.datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%SZ')
    
    # 提取时间序列数据（这里假设我们要提取特定经纬度点的数据）
    # 以北京附近为例 (latitude=39.9, longitude=116.4)
    target_lat, target_lon = 38.61462333,115.2452038
    
    # 找到最接近的网格点
    lat_idx = abs(lat - target_lat).argmin()
    lon_idx = abs(lon - target_lon).argmin()
    
    # 提取时间序列
    temp_series = temperature.isel(latitude=lat_idx, longitude=lon_idx)
    
    # 转换为pandas DataFrame便于处理
    df = temp_series.to_dataframe().reset_index()
    
    # 处理时间
    t_era5 = []
    temp_values = []
    
    for i, row in df.iterrows():
        # 计算相对于起始时间的时间差（天）
        dti = (pd.to_datetime(row['time']) - pd.to_datetime(ts)).total_seconds()/3600/24 + 8/24
        t_era5.append(dti)
        # 温度值需要从Kelvin转换为摄氏度（如果是Kelvin）
        temp_values.append(row[temperature.name] - 273.15 if temperature.units == 'K' else row[temperature.name])
    
    t_era5 = np.array(t_era5)
    temp_values = np.array(temp_values)
    

    return t_era5, temp_values
# ... existing code ...

def merge_data(t, data, t_merged, method='mean'):
    if method=='interp':
        f4inerp = interpolate.interp1d(t, data, kind='linear', bounds_error=False,fill_value=0)
        return f4inerp(t_merged)    
    if method=='mean':
        te_target = np.arange(0,NDAYS,RES_D)
        te_real = []
        deltaT_real = []
        for j, t_merged_j in enumerate(te_target):
            mask_j = (t<=t_merged_j+RES_D) & (t>t_merged_j-RES_D)
            if mask_j.sum()>0:
                deltaT_real.append(data[mask_j].mean())
                te_real.append(t[mask_j].mean())
        f4inerp = interpolate.interp1d(te_real, deltaT_real, kind='linear', bounds_error=False,fill_value=0)
        dt4merge = f4inerp(t_merged)
        return dt4merge


if __name__ == '__main__':
    # cmd
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('-debug', action='store_true', help='method: DEBUG')
    parser.add_argument('-base', default='DZ155', help='BASE_STAT, default  is DZ155')
    parser.add_argument('-line', default='DZ064', help='LINE_ID1, red')
    parser.add_argument('-date', default='2303',  help='which data to use, 2303, 2406, 2409, 254C')
    parser.add_argument('-emarker', default='', help='emarker')

    parser.add_argument('-dt', default=1,type=int, help='有效的时间分辨率，单位 小时')
    parser.add_argument('-input', default='',  help='file pattern to load')
    parser.add_argument('-output', default='',  help='output stack h5 file')
    args = parser.parse_args()
    print(args)

    DEBUG= args.debug
    BASE_STAT=args.base
    LINE_ID=args.line
    files_pattern = args.input
    OUTFILE = args.output
    emarker = args.emarker
    RESOLUTION = args.dt

    DATE=args.date

    date_info = get_info(DATE)
    LINES = date_info['LINES']
    H5_ROOT = date_info['H5_ROOT']
    NDAYS = date_info['NDAYS']
    START_TIME = date_info['START_TIME']
    s_info = date_info['s_info']

    if LINE_ID in LINES.keys():
        STATS = LINES[LINE_ID]
    else:
        STATS = [LINE_ID]

    RES_D = RESOLUTION/24
    FIG_ROOT = 'figures/8.merge.dtshift'
    OUTFILE = f'{H5_ROOT}/{emarker}.{LINE_ID}.RES.{RESOLUTION:03d}H.h5' if not OUTFILE else OUTFILE
    print(f'save final data to {OUTFILE}')
    # assert 1==2

    te_merged = np.arange(0,NDAYS,RES_D)

    save_h5_data(file_name=OUTFILE, 
                data_in_dict={'START_TIME':START_TIME,
                            'all_groups':np.array(STATS,dtype='S'),
                            'te':te_merged},
                attrs_dict=vars(args), 
                group_name='metadata',mode='w')


    files = glob(files_pattern)
    files.sort()
    if len(files)==0:
        print(f'no file found in {files_pattern}')

    keys = [file.split('/')[-1] for file in files]
    save_h5_data(file_name=OUTFILE, 
            data_in_dict={
                'files':np.array(files, dtype='S'),
                'keys':np.array(keys, dtype='S')
            },group_name='metadata',mode='a')

    data_phy = {}
    te_phy, phy = get_solid(START_TIME, NDAYS)
    data_phy['solid']= merge_data(te_phy,phy,t_merged=te_merged)

    te_phy, phy = get_era5_T(START_TIME,key='t2m')  
    data_phy['t2m']= merge_data(te_phy,phy,t_merged=te_merged)

    if DATE=='254C':
        te_phy, phy = get_sensor_T(START_TIME, file = './loc/P320T.csv')
        data_phy['4CT']= merge_data(te_phy,phy,t_merged=te_merged,method='mean')
    elif DATE=='2303':
        te_phy, phy = get_sensor_T(START_TIME, file = 'loc/skt.DZ064.csv')
        data_phy['T_DZ064']= merge_data(te_phy,phy,t_merged=te_merged,method='mean')
    save_h5_data(file_name=OUTFILE, 
                    data_in_dict=data_phy,group_name='phy',mode='a')

    for j,name in enumerate(STATS):

        data_j = {'te':te_merged}
        for i, file in enumerate(files):
            key_i = file.split('/')[-1]

            delta_t,te,T_MAX,health = read_h5_data(file,
                        ['delta_t','te','T_MAX','health'], group_name=name)

            te_bjt = te/3600/24+8/24
            delta_t = remove_point_skip(delta_t, peroid=100)

            SOURCE = read_h5_data(file,['S'], group_name='metadata')[0]
            SOURCE = SOURCE.decode()
            rij, azij,_ = get_distance(s_info=s_info,name1=SOURCE,name2=name, S2N=True)
            save_h5_data(file_name=OUTFILE, 
                    data_in_dict={
                        'te_bjt':te_bjt,
                        'delta_t':delta_t,
                        'heakth':health,
                        'T_MAX':T_MAX,
                        'r':rij,
                        'az':azij,
                    },
                    attrs_dict={'file':file},
                    group_name=f'{name}/RAW/{key_i}/',mode='a')

            data_j[f'merged/{key_i}']= merge_data(te_bjt, delta_t,t_merged=te_merged)
            data_j[f'health/{key_i}']= merge_data(te_bjt, health,t_merged=te_merged)

        print(OUTFILE,f'{name}')
        save_h5_data(file_name=OUTFILE, 
                    data_in_dict=data_j,group_name=name,mode='a')
