# pre packages
from myglobal import *
from mpi4py import MPI
comm = MPI.COMM_WORLD

# sys packages
from pylab import np
import obspy as ob
from glob import glob
from tqdm import tqdm
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
from scipy.stats import linregress
from scipy.interpolate import interp1d
import time
from scipy import signal


# self packages
from utils.loc import load_loc, get_distance,sort_data_by_distance
from utils.math import norm, my_vel_regress
from corr4py.corr import corr_with_wavelets
from utils.h5data import get_event_data, save_h5_data, read_h5_data,h5py,h5glob, item_exists
from utils.hsr import RawDataBase, search_events
from utils.plot import plot_traces_by_subfigures, plot_events,plot_raw_data
from utils.trace import get_tapered_slices, filtfilt

def get_hsr_slices_N(data_infile, t,
                   CORR_STATS, BASE_STAT, L_WIN, 
                    v_info,x_infile,STATS_infile):
    """
    从原始数据中截取对应台站序列特定时间段的记录,同时避免列车信号
    优先选取列车在北部的情况
        
    Returns:
    traces_cut : numpy.ndarray
        处理后的数据，形状为[n_stations, n_samples_cut]
        其中n_stations为CORR_STATS中的台站数，n_samples_cut为截取后的时间点数
    t_cut : numpy.ndarray
        截取后的时间轴数组，形状为(n_samples_cut,)
    """

    tmin, tmax = t[0], t[-1]
    nt = len(t)
    dt = t[1]-t[0]
    ns = len(CORR_STATS)
    base_idx = CORR_STATS.index(BASE_STAT)

    traces = []
    x_raw = []
    for j,name in enumerate(CORR_STATS):
        idx_j = STATS_infile.index(name)
        trace_j = data_infile[idx_j,:]
        # trace_j = filtfilt(trace_j,dt,[9,14], order=4)
        traces.append(trace_j)
        x_raw.append(x_infile[idx_j])
    traces = np.array(traces)
    x_raw = np.array(x_raw)

    LT = L_WIN
    LT_MIN = (tmax-tmin)/3
    LTP = int(np.round(LT/dt))
    traces_out = np.zeros([ns,LTP], dtype=np.float32)
    t_out = np.zeros([LTP],dtype=np.float32)

    # 列车速度记录
    direction = v_info[0]
    v1,a1,e1 = v_info[1:4]
    v2,a2,e2 = v_info[4:]
    TS, TE = -2,7 # 列车理论到时的偏移值,用于去除列车在头顶的情况

    t_hsr_S2N = x_raw/(v1+1e-10)-a1 # S2N理论到时
    t_hsr_min1 = np.min(t_hsr_S2N+TS)
    t_hsr_max1 = np.max(t_hsr_S2N+TE)

    t_hsr_N2S = x_raw/(v2+1e-10)-a2 # 到时
    t_hsr_min2 = np.min(t_hsr_N2S+TS)
    t_hsr_max2 = np.max(t_hsr_N2S+TE)

    # 对测线无影响事件
    # event_S2N, tmin, tmax, event_S2N
    if (t_hsr_min1<tmin or t_hsr_max1>tmax) and direction!=0:
        direction=2
    # event_N2S, tmin, tmax, event_N2S
    if (t_hsr_min2<tmin or t_hsr_max2>tmax) and direction!=0:
        direction=1

    # 根据方向选择时间窗口
    if direction==1: # S2N
        # 只选列车在北方的窗口
        # tmin,t_hsr_min,[t_hsr_max, tmax]
        TLIM = t_hsr_max1,t_hsr_max1+LT

    elif direction==2: # N2S
        # 只选列车在北方的窗口
        # [tmin,t_hsr_min],t_hsr_max, tmax
        TLIM = t_hsr_min2-LT,t_hsr_min2
        
        
    elif direction==3: # 双向列车的复杂情况
        
        # 选存在列车在北向的窗口
        # 交汇点在线北面
        # tmin,[t_hsr_min1,t_hsr_max1], [t_hsr_min2,t_hsr_max2], tmax   
        if t_hsr_min1<=t_hsr_min2:
            
            if t_hsr_min2-t_hsr_max1>LT*0.6:     # 双车在北
                TLIM = t_hsr_max1, min(t_hsr_min2,t_hsr_max1+LT)
            elif tmax-t_hsr_max2>t_hsr_min1-tmin:# 右侧时间多
                TLIM = t_hsr_max2, t_hsr_max2+LT
            elif t_hsr_min1-tmin>tmax-t_hsr_max2:# 左侧时间多
                TLIM = t_hsr_min1-LT, t_hsr_min1
            else:
                raise ValueError('no specified direction.')
        # 交汇点在线南面
        # tmin,[t_hsr_min2,t_hsr_max2], [t_hsr_min1,t_hsr_max1], tmax        
        else:
            if tmax-t_hsr_max1>t_hsr_min2-tmin: # 右侧时间多
                TLIM = t_hsr_max1, t_hsr_max1+LT
            else:                               # 左侧时间多
                TLIM = t_hsr_min2-LT, t_hsr_min2
        TLIM=[100000,-10000000]
    else:
        
        # TLIM=[0,LT]
        TLIM=[100000,-10000000]
        print(TLIM)
        print(f'no specified direction. {TLIM=}')
    TLIM = max(TLIM[0], tmin),min(TLIM[1], tmax)

    if TLIM[1]-TLIM[0]<10:
        print(v1,a1,e1,direction)
        print([t_hsr_min1,t_hsr_max1])
        print(v2,a2,e2,direction)
        print([t_hsr_min2,t_hsr_max2])  
        print(f'TLIM {TLIM=} is too large and unexpected.')
        return traces_out,t_out, [t_out[0],t_out[-1]],False
    else:
        print(TLIM)

    traces_cut,t_cut = get_tapered_slices(traces, t, TLIM, L_Taper=1)
    nt_cut = len(t_cut)
    if nt_cut <LTP:
        traces_out[:,:nt_cut] = traces_cut
        t_out = t_cut[0]+np.arange(LTP)*dt
    elif nt_cut ==LTP:
        traces_out = traces_cut
        t_out = t_cut
    else:
        raise ValueError(f'{TLIM=}is too large and unexpected.{nt_cut=},{LTP=} ')

    return traces_out,t_out, TLIM,True

def get_hsr_slices_S(data_infile, t,
                   CORR_STATS, BASE_STAT, L_WIN, 
                    v_info,x_infile,STATS_infile):
    """
    从原始数据中截取对应台站序列特定时间段的记录,同时避免列车信号
    优先选取列车在南部的情况
        
    Returns:
    traces_cut : numpy.ndarray
        处理后的数据，形状为[n_stations, n_samples_cut]
        其中n_stations为CORR_STATS中的台站数，n_samples_cut为截取后的时间点数
    t_cut : numpy.ndarray
        截取后的时间轴数组，形状为(n_samples_cut,)
    """

    tmin, tmax = t[0], t[-1]
    nt = len(t)
    dt = t[1]-t[0]
    ns = len(CORR_STATS)
    base_idx = CORR_STATS.index(BASE_STAT)

    traces = []
    x_raw = []
    for j,name in enumerate(CORR_STATS):
        idx_j = STATS_infile.index(name)
        trace_j = data_infile[idx_j,:]
        # trace_j = filtfilt(trace_j,dt,[11,15], order=4)
        traces.append(trace_j)
        x_raw.append(x_infile[idx_j])
    traces = np.array(traces)
    x_raw = np.array(x_raw)

    LT = L_WIN
    LT_MIN = (tmax-tmin)/3
    LTP = int(np.round(LT/dt))
    traces_out = np.zeros([ns,LTP], dtype=np.float32)
    t_out = np.zeros([LTP],dtype=np.float32)

    # 列车速度记录
    direction = v_info[0]
    v1,a1,e1 = v_info[1:4]
    v2,a2,e2 = v_info[4:]
    TS, TE = -2,7 # 列车理论到时的偏移值,用于去除列车在头顶的情况

    t_hsr_S2N = x_raw/(v1+1e-10)-a1 # S2N理论到时
    t_hsr_min2 = np.min(t_hsr_S2N+TS)
    t_hsr_max2 = np.max(t_hsr_S2N+TE)

    t_hsr_N2S = x_raw/(v2+1e-10)-a2 # 到时
    t_hsr_min1 = np.min(t_hsr_N2S+TS)
    t_hsr_max1 = np.max(t_hsr_N2S+TE)
    
    if direction==1:
        direction=2
    elif direction==2:
        direction=1

    # 对测线无影响事件
    # event_N2S, tmin, tmax, event_N2S
    # if (t_hsr_min1<tmin or t_hsr_max1>tmax) and direction!=0:
    #     direction=2
    # # event_S2N, tmin, tmax, event_S2N
    # if (t_hsr_min2<tmin or t_hsr_max2>tmax) and direction!=0:
    #     direction=1

    # 根据方向选择时间窗口
    if direction==1: # N2S
        # 只选列车在南方的窗口
        # tmin,t_hsr_min,[t_hsr_max, tmax]
        TLIM = t_hsr_max1,t_hsr_max1+LT

    elif direction==2: # S2N
        # 只选列车在南方的窗口
        # [tmin,t_hsr_min],t_hsr_max, tmax
        TLIM = t_hsr_min2-LT,t_hsr_min2
        
    elif direction==3: # 双向列车的复杂情况
        
        # 选存在列车在南向的窗口
        # 交汇点在线頁面
        # tmin,[t_hsr_min1,t_hsr_max1], [t_hsr_min2,t_hsr_max2], tmax   
        if t_hsr_min1<=t_hsr_min2:
            
            if t_hsr_min2-t_hsr_max1>LT*0.6:     # 双车在北
                TLIM = t_hsr_max1, min(t_hsr_min2,t_hsr_max1+LT)
            elif tmax-t_hsr_max2>t_hsr_min1-tmin:# 右侧时间多
                TLIM = t_hsr_max2, t_hsr_max2+LT
            elif t_hsr_min1-tmin>tmax-t_hsr_max2:# 左侧时间多
                TLIM = t_hsr_min1-LT, t_hsr_min1
            else:
                raise ValueError('no specified direction.')
        # 交汇点在线南面
        # tmin,[t_hsr_min2,t_hsr_max2], [t_hsr_min1,t_hsr_max1], tmax        
        else:
            if tmax-t_hsr_max1>t_hsr_min2-tmin: # 右侧时间多
                TLIM = t_hsr_max1, t_hsr_max1+LT
            else:                               # 左侧时间多
                TLIM = t_hsr_min2-LT, t_hsr_min2
        TLIM=[100000,-10000000]
    else:
        
        TLIM=[100000,-10000000]
        print(TLIM)
        print(f'no specified direction. {TLIM=}')

    TLIM = max(TLIM[0], tmin),min(TLIM[1], tmax)

    if TLIM[1]-TLIM[0]<10:
        print(v1,a1,e1,direction)
        print([t_hsr_min1,t_hsr_max1])
        print(v2,a2,e2,direction)
        print([t_hsr_min2,t_hsr_max2])  
        print(f'TLIM {TLIM=} is too large and unexpected.')
        return traces_out,t_out, [t_out[0],t_out[-1]],False
    else:
        print(TLIM)

    traces_cut,t_cut = get_tapered_slices(traces, t, TLIM, L_Taper=1)
    nt_cut = len(t_cut)
    if nt_cut <LTP:
        traces_out[:,:nt_cut] = traces_cut
        t_out = t_cut[0]+np.arange(LTP)*dt
    elif nt_cut ==LTP:
        traces_out = traces_cut
        t_out = t_cut
    else:
        raise ValueError(f'{TLIM=}is too large and unexpected.{nt_cut=},{LTP=} ')

    return traces_out,t_out, TLIM,True


import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-debug', action='store_true', help='method: DEBUG')
parser.add_argument('-mute', action='store_true', help='method: mute unvalid times')
parser.add_argument('-raw', action='store_true', help='method: use raw data to corr')
parser.add_argument('-base', default='DZ155', help='BASE_STAT, default  is DZ155')
parser.add_argument('-line', default='VFHF', help='LINE_ID, default is VFHF')
parser.add_argument('-emarker', default='', help='marker in previous processes')
parser.add_argument('-ts', default=-20, type=int, help='start time of corr, unit:s, type: int')
parser.add_argument('-te', default=20,type=int, help='end time of corr, unit:s, type: int')
parser.add_argument('-fs', default=-1, type=float, help='start frequency, unit:Hz, type: float')
parser.add_argument('-fe', default=-1,type=float, help='stop time, unit:Hz, type: float')
parser.add_argument('-L', default=0,type=int, help='length of corr, unit:s, type: int')
parser.add_argument('-D', default=1,type=int, help='location of train, D=1: N, D=2: S')
parser.add_argument('-date', default='',  help='which data to use, 2303, 2406, 2409, 254C')
parser.add_argument('-input', default='',  help='input h5file')
parser.add_argument('-output', default='',  help='output corr h5 file')
parser.add_argument('-figroot', default='figures/4.corr.figures',  help='root to save figs')

args = parser.parse_args()
print(args)

# MPI相关设置
rank = comm.Get_rank()
size = comm.Get_size()

DEBUG = args.debug
MUTE = args.mute
USE_RAW = args.raw
LINE_ID = args.line
BASE_STAT = args.base
DATA_FILE = args.input
OUTFILE = args.output
FIG_ROOT = args.figroot
D = args.D
fs = args.fs
fe = args.fe
FS = [fs, fe] if fs != -1 and fe != -1 else None

T_LIM = [args.ts, args.te]
TL = args.L
if args.L != 0:
    T_LIM = [100, 100 + args.L]

# 只有rank0读取元数据并处理所有参数
if rank == 0:
    # 读取元数据
    metadata, args_infile = read_h5_data(DATA_FILE, 
            keys=['MARKER','EMARKER','DATE'], group_name='metadata', FILL_NONE=True, FILL_VALUE=None,
            read_attrs=True)
    EMARKER = f'{metadata[0].decode()}' if not args.emarker else args.emarker
    DATE = metadata[2].decode() if not args.date else args.date
    AMP_HSR = args_infile['amp']
    
    # 处理配置信息
    date_info = get_info(DATE)
    s_info = date_info['s_info']
    LINES = date_info['LINES']
    if LINE_ID in LINES.keys():
        CORR_STATS = LINES[LINE_ID]
    else:
        CORR_STATS = [LINE_ID]

    MARKER = f'{BASE_STAT}.Line{LINE_ID}.T{T_LIM[0]:02d}.{T_LIM[1]:02d}.TPos{"0NS"[D]}'
    FIG_ROOT = f'{FIG_ROOT}/{DATE}.{MARKER}.{EMARKER}'
    if not os.path.exists(FIG_ROOT):
        os.makedirs(FIG_ROOT)
    
    OUTFILE = DATA_FILE.replace('h5', f'{MARKER}.corr.h5') if not OUTFILE else OUTFILE
    
    # 读取组信息
    groups = read_h5_data(DATA_FILE, keys=['all_groups'])[0]
    groups = [g.decode() for g in groups]
    groups.sort()
    N_per_FIG = max(1, len(groups) // 100)  # 确保至少为1
    ne = len(groups)

    # 读取统计信息
    STATS_infile, VINFO, x_infile = read_h5_data(
        DATA_FILE, keys=['stats','VINFO','x'], group_name='info')
    STATS_infile = [s.decode() for s in STATS_infile]
    X_shift = x_infile[STATS_infile.index(BASE_STAT)]

    # 保存元数据
    save_h5_data(file_name=f'{OUTFILE}', 
                attrs_dict=vars(args), 
                group_name='metadata',
                data_in_dict={
                    'OUTFILE': OUTFILE,
                    'INFILE': DATA_FILE,
                    'MARKER': MARKER,
                    'EMARKER': EMARKER,
                    'DATE': DATE,
                    'BASE_STAT': BASE_STAT
                }, mode='w')

    # 保存info组
    save_h5_data(OUTFILE, {
        'all_groups': np.array(groups, dtype='S'),
        'stats': np.array(STATS_infile, dtype='S'),
        'x': x_infile,
        'VINFO': VINFO
    }, group_name='info', mode='a')

    # 计算台站距离
    x = []
    for name in CORR_STATS:
        xi, _, _ = get_distance(s_info=s_info, name1=BASE_STAT, name2=name, S2N=True)
        x.append(xi)
    x, CORR_STATS = sort_data_by_distance(x, NAMES=CORR_STATS)
    x = np.array(x)
    nx = len(x)
    
    # 准备广播的数据
    broadcast_data = {
        'EMARKER': EMARKER,
        'DATE': DATE,
        'AMP_HSR': AMP_HSR,
        'CORR_STATS': CORR_STATS,
        'MARKER': MARKER,
        'OUTFILE': OUTFILE,
        'FIG_ROOT': FIG_ROOT,
        'groups': groups,
        'ne': ne,
        'N_per_FIG': N_per_FIG,
        'STATS_infile': STATS_infile,
        'x_infile': x_infile,
        'x': x,
        'nx':nx
    }
else:
    broadcast_data = None

# 广播所有必要参数
comm.Barrier()
broadcast_data = comm.bcast(broadcast_data, root=0)

# 解包广播的数据
AMP_HSR = broadcast_data['AMP_HSR']
CORR_STATS = broadcast_data['CORR_STATS']
ne = broadcast_data['ne']
STATS_infile = broadcast_data['STATS_infile']
x_infile = broadcast_data['x_infile']
x = broadcast_data['x']
nx = broadcast_data['nx']

dt = 0.01
ts, te = -10, 10  # 互相关滑动时间
# 分轮处理任务，总共100轮，每轮处理ne//100个事件
TASK_ROUND = 100

for task_i in range(TASK_ROUND):
    # 每轮中，rank0读取本轮所有事件的数据
    t1 = time.time()
    if rank == 0:
        # 在每轮中选择该轮需要处理的事件组
        groups_task = groups[task_i*(ne//TASK_ROUND):(task_i+1)*(ne//TASK_ROUND)]
        # 读取本轮所有事件的数据
        data_list = []
        
        for i, group_i in enumerate(groups_task):
            print(MARKER, task_i, i, group_i)
            data_i, t_i, e_time_i, STATS_infile_i = read_h5_data(DATA_FILE, keys=['data','t','te','stats'],group_name=group_i)
            assert abs((t_i[1]-t_i[0])-dt)<1e-5
            STATS_infile_i = [s.decode() for s in STATS_infile_i]
            e_time_i = e_time_i.decode()
            
            # 将数据打包成元组
            data_item = (group_i, data_i, t_i, e_time_i, STATS_infile_i, VINFO[groups.index(group_i),:])
            data_list.append(data_item)
            
        # 将本轮数据分发给各个rank
        # 按rank数量分割数据
        data4scatter = [data_list[i::size] for i in range(size)]
        print('*'*100)
        print(f"processing {task_i=}", [len(i) for i in data4scatter])
        print('*'*100)
    else:
        data4scatter = None
        
    comm.Barrier()
    
    # 分发本轮数据给各个rank
    data_task_rank = comm.scatter(data4scatter, root=0)
    
    # 每个进程处理分配给它的任务
    results_local = {}  # 本地处理结果，使用字典模式，group_i作为key
    
    # 使用更简洁的元组解包方式
    for i, items in enumerate(data_task_rank):
        group_i, data_i, t_local, e_time_local, STATS_infile_local, vinfo_i = items
        
        vinfo_p = vinfo_i[1:4]
        vinfo_n = vinfo_i[4:]
        
        # 获取截取后的数据
        if D==1:
            traces, t_local, TLIM_i, USED = get_hsr_slices_N(data_i, t_local, CORR_STATS, BASE_STAT, L_WIN=T_LIM[1]-T_LIM[0], 
                                        v_info=vinfo_i, x_infile=x_infile, STATS_infile=STATS_infile_local)
            if not USED or traces.max()>AMP_HSR*3:
                print(f'{group_i} is not used')
                continue
        elif D==2:
            traces, t_local, TLIM_i, USED = get_hsr_slices_S(data_i, t_local, CORR_STATS, BASE_STAT, L_WIN=T_LIM[1]-T_LIM[0], 
                                        v_info=vinfo_i, x_infile=x_infile, STATS_infile=STATS_infile_local)
            if not USED or traces.max()>AMP_HSR*3:
                print(f'{group_i} is not used')
                continue
        else:
            # 直接使用T_LIM截取数据
            traces,t_local = get_tapered_slices(traces, t_local, T_LIM, L_Taper=1)
            USED = True
        
        nt = len(t_local)
        idx_BASE = CORR_STATS.index(BASE_STAT)
        print(traces[idx_BASE,:].max())
        
        # corr_with_wavelets函数本身已经进行了OMP并行，无需MPI进一步并行化
        corr = corr_with_wavelets(traces.reshape([1,nx,nt]),traces[idx_BASE,:].reshape([1,nt]), 
                                sp=int(ts/dt), ep=int(te/dt))
                                
        corr = corr.reshape([nx,-1])
        
        t_corr = np.arange(int(ts/dt),int(te/dt))*dt
        
        # 保存本地结果到字典，group_i作为key
        results_local[group_i] = {
            'corr': corr,
            't': t_corr,
            'R': np.array(CORR_STATS, dtype='S'),
            'S': BASE_STAT,
            'te': e_time_local
        }
        
        # 生成调试图像（仅rank0）
        if rank == 0 and DEBUG and i==0:
            from pylab import figure, GridSpec, plt
            plt.close('all')
            fig = figure(figsize=(8, 8), dpi=300)
            gs = GridSpec(2,2)
            ax1 = fig.add_subplot(gs[0,:])
            ax2 = fig.add_subplot(gs[1,0])
            ax3 = fig.add_subplot(gs[1,1])
            PLOT_WIGGLE=False if nx>30 else True
            fig, _ = plot_events(traces, x, t_local,fig=fig, ax=ax1, 
                                DO_FILTER=False, PLOT_WIGGLE=PLOT_WIGGLE, NORM=False, SCALE=300)
            for v,a,e in [vinfo_p,vinfo_n]:
                ax1.plot([-100,100],[v*(-100+a)-X_shift,v*(100+a)-X_shift], lw=1, color='blue')
            ax1.set(
                    xlim=[TLIM_i[0]-10,TLIM_i[1]+10],
                    ylim=[x.min()-300,x.max()+300],
                    title=f'v={vinfo_p[0]:.1f},{vinfo_n[0]:.1f}')
            if not PLOT_WIGGLE:
                plt.clim([-traces.var()**0.5,traces.var()**0.5])
            print(corr.shape, t_corr.shape)
            # corr = norm(corr,ONE_AXIS=True)
            fig, _,_ = plot_raw_data(corr, x, t_corr, fig=fig, ax1=ax2,ax2 = ax3,
                                    fs=1, fe=40, VLIM=[-4000,4000], PLOT_WIGGLE=PLOT_WIGGLE, SCALE=300, FV_NORM=False)
            ax2.set_title('b) corr')
            ax3.set_title('c) FV of b')
            ax3.collections[-1].set_clim([0,0.3])
            fig.tight_layout()
            fig.savefig(f'{FIG_ROOT}/{task_i:02d}_{i:04d}.corr.{MARKER}.{e_time_local}.png')
    
    # 收集所有rank的处理结果
    results_all = comm.gather(results_local, root=0)
    
    # 只有rank0负责保存结果
    if rank == 0:
        # 使用字典模式更新所有结果
        print('*'*100)
        print(f"collecting {task_i=}", [len(i) for i in data4scatter])
        print('*'*100)
        all_results_dict = {}
        for result_dict in results_all:
            all_results_dict.update(result_dict)
        
        # 保存所有结果到HDF5文件
        for group_i, data_dict in all_results_dict.items():
            save_h5_data(OUTFILE, data_in_dict=data_dict, group_name=f'{group_i}', mode='r+')
    
    # 同步所有进程
    comm.Barrier()

# 只有rank0保存最终的组列表
if rank == 0:
    groups = h5glob(OUTFILE,pattern='DAY*/IDX*.T*',object_type='group')
    groups.sort()
    save_h5_data(OUTFILE,{'all_groups':np.array(groups,dtype='S')}, mode='a')