# pre packages
from myglobal import *

# sys packages
from pylab import np
import obspy as ob
from glob import glob
from tqdm import tqdm
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
from scipy.stats import linregress
from scipy.interpolate import interp1d


# self packages
from utils.loc import load_loc, get_distance,sort_data_by_distance, get_stats_within_distance
from utils.math import norm, my_vel_regress
from corr4py.corr import corr_with_wavelets
from utils.h5data import get_event_data, save_h5_data, read_h5_data,h5py,h5glob, item_exists
from utils.hsr import get_hsr_slices_N,get_hsr_slices_S
from utils.plot import plot_traces_by_subfigures, plot_events,plot_raw_data
from utils.trace import get_tapered_slices, filtfilt

# MPI packages
from mpi4py import MPI

USE_D3_EVENTS = True  # 是否使用混合列车数据，南北都有
USE_D1_EVENTS = True  # 是否使用北向列车数据
USE_D2_EVENTS = True  # 是否使用南向列车数据
TS_WAVELET, TE_WAVELET = -10,15        # 列车理论到时的偏移值,确保列车与测线的距离够长
THREADS_NUM = 4  # corr4py线程数
N_FIG = 30 # 显示的图片数，per base stat
DT_DATA=0.01 # 数据采样间隔
R4LINE=1000 # 自选台站最大间距
CORRECT_D=True # 当列车不在区间内的窗口内，是否进行修正

def process_base_stat_data(BASE_STAT, config, data_i, t, group_i,etime, i, VINFO, x_infile, STATS_infile):
    """
    处理单个BASE_STAT的数据
    
    参数:
    BASE_STAT: 基准台站名称
    config: 配置信息字典，包含不随group_i变化的参数
    data_i: 输入数据
    t: 时间轴
    group_i: 组名
    etime: 事件时间
    i: 组索引
    VINFO: 速度信息
    x_infile: 位置信息
    STATS_infile: 台站列表
    
    返回:
    处理结果字典或None（如果数据无效）
    """
    CORR_STATS = config['CORR_STATS']
    x = config['x']
    MARKER = config['MARKER']
    
    # 从config中获取不随group_i变化的参数
    TPOS = config['TPOS']
    T_LIM = config['T_LIM']
    AMP_HSR = config['AMP_HSR']
    ts = config['ts']
    te = config['te']
    dt = config['dt']
    DEBUG = config['DEBUG']
    N_per_FIG = config['N_per_FIG']
    FIG_ROOT = config['fig_root']  # 直接从config中获取当前BASE_STAT的输出目录
    
    # 获取截取后的数据
    if TPOS=='N':
        traces, t_new, TLIM_i, USED = get_hsr_slices_N(data_i, t, CORR_STATS, BASE_STAT, L_WIN=T_LIM[1]-T_LIM[0], 
                                    v_info=VINFO[i,:], x_infile=x_infile, STATS_infile=STATS_infile,
                                    TS_WAVELET=TS_WAVELET,TE_WAVELET=TE_WAVELET,
                                    USE_D1_EVENTS=USE_D1_EVENTS,USE_D2_EVENTS=USE_D2_EVENTS,USE_D3_EVENTS=USE_D3_EVENTS)
        if not USED or traces.max()>AMP_HSR*3:
            print(f'{group_i} is not used for {BASE_STAT}')
            return None
    elif TPOS=='S':
        traces, t_new, TLIM_i, USED = get_hsr_slices_S(data_i, t, CORR_STATS, BASE_STAT, L_WIN=T_LIM[1]-T_LIM[0], 
                                    v_info=VINFO[i,:], x_infile=x_infile, STATS_infile=STATS_infile,
                                    TS_WAVELET=TS_WAVELET,TE_WAVELET=TE_WAVELET,
                                    USE_D1_EVENTS=USE_D1_EVENTS,USE_D2_EVENTS=USE_D2_EVENTS,USE_D3_EVENTS=USE_D3_EVENTS)
        if not USED or traces.max()>AMP_HSR*3:
            print(f'{group_i} is not used for {BASE_STAT}')
            return None
    else:
        traces = []
        for j,name in enumerate(CORR_STATS):
            trace_j = data_i[STATS_infile.index(name),:]
            traces.append(trace_j)
        # 直接使用T_LIM截取数据
        traces,t_new = get_tapered_slices(np.array(traces), t, T_LIM, L_Taper=1)
        USED = True

    # 计算互相关
    nt = len(t_new)
    idx_BASE = CORR_STATS.index(BASE_STAT)
    corr = corr_with_wavelets(traces.reshape([1,len(CORR_STATS),nt]),traces[idx_BASE,:].reshape([1,nt]), 
                            sp=int(ts/dt), ep=int(te/dt),
                            THREADS_NUM=THREADS_NUM)

    corr = corr.reshape([len(CORR_STATS),-1])
    t_corr = np.arange(int(ts/dt),int(te/dt))*dt

    result = {
        'corr': corr,
        't': t_corr,
        'CORR_STATS': CORR_STATS,
        'x': x,
        'BASE_STAT': BASE_STAT,
        'MARKER': MARKER,
        'traces': traces,
        't_new': t_new,
        'TLIM_i': TLIM_i,
        'group_i': group_i
    }
    
    # 生成调试图（所有BASE_STAT都生成图片）
    if DEBUG and i%N_per_FIG==0:
        vinfo_p = VINFO[i,1:4]
        vinfo_n = VINFO[i,4:]
        X_shift = x_infile[STATS_infile.index(BASE_STAT)]
        
        from pylab import figure, GridSpec, plt
        plt.close('all')
        fig = figure(figsize=(8, 8), dpi=300)
        gs = GridSpec(2,2)
        ax1 = fig.add_subplot(gs[0,:])
        ax2 = fig.add_subplot(gs[1,0])
        ax3 = fig.add_subplot(gs[1,1])
        nx = len(CORR_STATS)
        PLOT_WIGGLE=False if nx>30 else True

        # 绘制原始波形（使用原始数据）
        traces_debug,t_debug = traces, t_new
        fig, _ = plot_events(traces_debug, x, t_debug,fig=fig, ax=ax1, 
                            DO_FILTER=False, PLOT_WIGGLE=PLOT_WIGGLE, NORM=False, SCALE=300)
        for v,a,e in [vinfo_p,vinfo_n]:
            ax1.plot([-100,100],[v*(-100+a)-X_shift,v*(100+a)-X_shift], lw=1, color='blue')
        ax1.set(
                xlim=[t_debug[0]-15,t_debug[-1]+15],
                ylim=[x.min()-300,x.max()+300],
                title=f'v={vinfo_p[0]:.1f},{vinfo_n[0]:.1f}')
        if not PLOT_WIGGLE:
            plt.clim([-traces_debug.var()**0.5,traces_debug.var()**0.5])
            
        print(corr.shape, t_corr.shape)
        corr_norm = norm(corr,ONE_AXIS=True)
        fig, _,_ = plot_raw_data(corr_norm, x, t_corr, fig=fig, ax1=ax2,ax2 = ax3,
                                fs=1, fe=40, VLIM=[-4000,4000], PLOT_WIGGLE=PLOT_WIGGLE, SCALE=300, FV_NORM=False)
        ax2.set_title('b) corr')
        ax3.set_title('c) FV of b')
        ax3.collections[-1].set_clim([0,0.3])
        fig.tight_layout()
        folder = f'{FIG_ROOT}/{i:04d}.{group_i.split("/")[-1]}'
        fig.savefig(f'{folder}/corr.{MARKER}.png')
    
    return result

if __name__ == '__main__':
    # 初始化MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()
    if size<2:
        print('Please run with at least 2 processes.')
        sys.exit(1)
    
    # cmd
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('-debug', action='store_true', help='method: DEBUG')
    parser.add_argument('-mute', action='store_true', help='method: mute unvalid times')
    parser.add_argument('-raw', action='store_true', help='method: use raw data to corr')
    parser.add_argument('-base', default='DZ155', help='BASE_STAT, default  is DZ155')
    parser.add_argument('-bases', nargs='+', help='List of BASE_STATS to process')
    parser.add_argument('-line', default='VFHF', help='LINE_ID, default is VFHF')
    parser.add_argument('-emarker', default='', help='marker in previous processes')
    parser.add_argument('-ts', default=-20, type=int, help='start time of corr, unit:s, type: int')
    parser.add_argument('-te', default=20,type=int, help='end time of corr, unit:s, type: int')
    parser.add_argument('-fs', default=-1, type=float, help='start frequency, unit:Hz, type: float')
    parser.add_argument('-fe', default=-1,type=float, help='stop time, unit:Hz, type: float')
    parser.add_argument('-L', default=0,type=int, help='length of corr, unit:s, type: int')
    parser.add_argument('-Tpos', default='N',type=str, help='location of train,N,S, default is N')

    parser.add_argument('-noD1', action='store_true', help='method: discard S2N events')
    parser.add_argument('-noD2', action='store_true', help='method: discard N2S events')
    parser.add_argument('-noD3', action='store_true', help='method: discard mixed events')

    parser.add_argument('-date', default='',  help='which data to use, 2303, 2406, 2409, 254C')
    parser.add_argument('-input', default='',  help='input h5file')
    parser.add_argument('-output', default='',  help='output corr h5 file')
    parser.add_argument('-figroot', default='figures/4.corr.figures',  help='root to save figs')

    args = parser.parse_args()
    

    DEBUG= args.debug
    MUTE = args.mute
    USE_RAW = args.raw
    LINE_ID = args.line
    print(args.bases)
    BASE_STATS = args.bases if args.bases else [args.base]  # 支持多个基准台
    EMARKER = args.emarker
    DATA_FILE = args.input
    OUTFILE = args.output
    FIG_ROOT = args.figroot
    TPOS = args.Tpos

    USE_D1_EVENTS = not args.noD1
    USE_D2_EVENTS = not args.noD2
    USE_D3_EVENTS = not args.noD3

    DATA_FOLDER = os.path.dirname(DATA_FILE)
    fs = args.fs
    fe = args.fe
    FS = [fs,fe] if fs!=-1 and fe!=-1 else None

    T_LIM = [args.ts,args.te]
    TL = args.L
    if args.L !=0:
        T_LIM=[100,100+args.L]

    # 只有rank 0读取数据
    metadata, args_infile = None, None
    groups = None
    VINFO = None
    x_infile = None
    
    if rank == 0:
        # 读取数据
        metadata, args_infile = read_h5_data(DATA_FILE, 
                keys=['MARKER','EMARKER','DATE'], group_name='metadata',FILL_NONE=True, FILL_VALUE=None,
                read_attrs=True)
        groups = read_h5_data(DATA_FILE, keys=['all_groups'])[0]
        groups = [g.decode() for g in groups]
        ne = len(groups)
        VINFO = read_h5_data(DATA_FILE, keys=['info/VINFO'], FILL_NONE=True)[0]
        x_infile= read_h5_data(DATA_FILE, keys=['info/x'], FILL_NONE=True)[0]

    # 广播数据
    metadata = comm.bcast(metadata, root=0)
    args_infile = comm.bcast(args_infile, root=0)
    groups = comm.bcast(groups, root=0)
    VINFO = comm.bcast(VINFO, root=0)
    x_infile = comm.bcast(x_infile, root=0)
    
    N_per_FIG = int(len(groups)/N_FIG)
    print(f'find {len(groups)} groups in {DATA_FILE}')
    
    # 获取配置信息
    EMARKER = f'{metadata[0].decode()}' if not args.emarker else args.emarker
    DATE= metadata[2].decode() if not args.date else args.date
    AMP_HSR = args_infile['amp']

    # 使用新的get_info函数获取配置信息
    date_info = get_info(DATE)
    s_info = date_info['s_info']
    LINES = date_info['LINES']
    
    configs = {}
    
    if rank == 0:
        print(f'find {len(groups)} groups in {DATA_FILE}')
        DATA_FOLDER = f'{DATA_FOLDER}/{EMARKER}.corr'
        if not os.path.exists(DATA_FOLDER):
            os.makedirs(DATA_FOLDER)

         # 为每个FIG创建FIG_ROOT(大批量绘图,每个事件一个文件夹)
        fig_root = f'{FIG_ROOT}/MS.{DATE}.{EMARKER}'
        if not os.path.exists(fig_root):
            os.mkdir(fig_root)

        # 为每个BASE_STAT创建配置信息和输出文件
        for BASE_STAT in BASE_STATS:
            if LINE_ID in LINES.keys():
                CORR_STATS = LINES[LINE_ID]
            else:
                CORR_STATS = get_stats_within_distance(s_info, base=BASE_STAT,r0=-R4LINE,r1=R4LINE)

            MARKER = f'{BASE_STAT}.Line{LINE_ID}.T{T_LIM[0]:02d}.{T_LIM[1]:02d}.Tp.{TPOS}'
            MARKER += '.D'+'1'*USE_D1_EVENTS+'2'*USE_D2_EVENTS+'3'*USE_D3_EVENTS

            x = []
            for name in CORR_STATS:
                xi, _,_ = get_distance(s_info=s_info,name1=BASE_STAT,name2=name, S2N=True)
                x.append(xi)
            x,CORR_STATS = sort_data_by_distance(x,NAMES=CORR_STATS)
            x = np.array(x)

            # 为每个BASE_STAT创建单独的输出文件
            output_file = f'{DATA_FOLDER}/{EMARKER}.corr.{MARKER}.h5' if not OUTFILE else OUTFILE.replace('.h5', f'.{BASE_STAT}.h5')
            
            # 创建输出文件并写入元数据
            save_h5_data(file_name=output_file, 
                        attrs_dict=vars(args), 
                        group_name='metadata',
                        data_in_dict={'OUTFILE':output_file,
                                    'INFILE':DATA_FILE,
                                    'MARKER':MARKER,
                                    'EMARKER':EMARKER,
                                    'DATE':DATE,
                                    'BASE_STAT':BASE_STAT},mode='w')
            # 配置广播信息
            configs[BASE_STAT] = {
                'CORR_STATS': CORR_STATS,
                'MARKER': MARKER,
                'x': x,
                'output_file': output_file,
                'fig_root': fig_root,
                'TPOS': TPOS,
                'T_LIM': T_LIM,
                'AMP_HSR': AMP_HSR,
                'ts': -10,
                'te': 10,
                'dt': DT_DATA,
                'DEBUG': DEBUG,
                'N_per_FIG': N_per_FIG
            }
            
        print('#'*100)
        for BASE_STAT in BASE_STATS:
            print(f'{configs[BASE_STAT]=}')
            print('#'*100)

        my_base_stats = []
    else:
        # 将BASE_STATS分配给不同的进程
        base_stat_per_rank = np.array_split(BASE_STATS, size-1)
        my_base_stats = base_stat_per_rank[rank-1]
        my_base_stats = [str(base) for base in my_base_stats]

    # 广播配置信息
    configs = comm.bcast(configs, root=0)

    
    if rank == 0:
        IT = enumerate(groups)
        groups_out_dict = {base: [] for base in my_base_stats}  # 为每个分配给rank0的BASE_STAT维护单独的groups_out列表
        VINFO_out_dict = {base: [] for base in my_base_stats}   # 为每个分配给rank0的BASE_STAT维护单独的VINFO_out列表

        # 读取所有数据并分发给其他进程
        for i,group_i in IT:
            # if i!=3341:
            #     continue
            print(f"Processing group {i}: {group_i}")
            # 只读取一次数据
            data_i, t, e_time, STATS_infile= read_h5_data(DATA_FILE, keys=['data','t','te','stats'],group_name=group_i)
            assert abs((t[1]-t[0])-DT_DATA)<1e-5
            STATS_infile = [i.decode() for i in STATS_infile]
            e_time = e_time.decode()

            # 创建图片文件夹
            if DEBUG and i%N_per_FIG==0:
                name_i = group_i.split('/')[-1]
                fig_root_i = f'{fig_root}/{i:04d}.{name_i}'
                if not os.path.exists(fig_root_i):
                    os.mkdir(fig_root_i)
            
            # 将数据发送给所有其他进程
            for target_rank in range(1, size):
                comm.send((data_i, t, e_time, STATS_infile, group_i, i), dest=target_rank)
            
        # 通知其他进程结束
        for target_rank in range(1, size):
            comm.send(None, dest=target_rank)
    else:
        # 其他进程接收数据并处理分配给它们的BASE_STAT
        # 为每个BASE_STAT维护数据集合，最后统一保存
        results_dict = {base: {
            'corr_list': [],
            't_list': [],
            'R_list': [],
            'S_list': [],
            'te_list': [],
            'groups_out': [],
            'VINFO_out': []
        } for base in my_base_stats}
        
        while True:
            # 接收数据
            data = comm.recv(source=0)
            if data is None:  # 收到结束信号
                break
                
            data_i, t, e_time, STATS_infile, group_i, i = data
            print(f"{rank=} Processing group {i}: {group_i},{my_base_stats}")
            # 处理分配给当前进程的BASE_STAT
            for BASE_STAT in my_base_stats:
                config = configs[BASE_STAT].copy()
                OUTFILE = config['output_file']
                
                # 处理数据
                result = process_base_stat_data(
                    BASE_STAT, config, data_i, t, group_i,e_time, i, VINFO, x_infile, STATS_infile
                )
                
                if result is None:
                    continue
                    
                # 添加到有效的组列表
                results_dict[BASE_STAT]['groups_out'].append(group_i)
                results_dict[BASE_STAT]['VINFO_out'].append(VINFO[i,:])

                # 收集结果数据，暂不保存
                results_dict[BASE_STAT]['corr_list'].append(result['corr'])
                results_dict[BASE_STAT]['t_list'].append(result['t'])
                results_dict[BASE_STAT]['R_list'].append(np.array(result['CORR_STATS'], dtype='S'))
                results_dict[BASE_STAT]['S_list'].append(BASE_STAT)
                results_dict[BASE_STAT]['te_list'].append(e_time)
        
        # 为每个分配给当前进程的BASE_STAT统一保存所有数据
        for BASE_STAT in my_base_stats:
            OUTFILE = configs[BASE_STAT]['output_file']
            groups_out = results_dict[BASE_STAT]['groups_out']
            VINFO_out = results_dict[BASE_STAT]['VINFO_out']
            CORR_STATS = configs[BASE_STAT]['CORR_STATS']
            
            if results_dict[BASE_STAT]['corr_list']:  # 如果有数据需要保存
                # 合并所有数据为大矩阵
                all_corr = np.stack(results_dict[BASE_STAT]['corr_list'])
                all_t = np.stack(results_dict[BASE_STAT]['t_list']) 
                all_R = np.stack(results_dict[BASE_STAT]['R_list'])
                all_te = np.array(results_dict[BASE_STAT]['te_list'], dtype='S')
                
                # 统一保存所有数据
                data_dict = {
                    'corr': all_corr,
                    't': all_t,
                    'R': all_R,
                    'S': np.array(results_dict[BASE_STAT]['S_list'], dtype='S'),
                    'te': all_te,
                    'all_groups': np.array(groups_out, dtype='S'),
                    'vinfo': np.array(VINFO_out),
                    'stats': np.array(CORR_STATS, dtype='S')
                }
                
                save_h5_data(OUTFILE, data_in_dict=data_dict, mode='a')
                print(OUTFILE, ' saved with consolidated data.')
            else:
                # 即使没有数据也要保存基本的元数据
                save_h5_data(OUTFILE, {
                    'all_groups': np.array(groups_out, dtype='S'),
                    'vinfo': np.array(VINFO_out),
                    'stats': np.array(CORR_STATS, dtype='S')
                }, mode='a')
                print(OUTFILE, ' saved with metadata only.')
