# pre packages
from myglobal import *

# sys packages
from pylab import np
import obspy as ob
from glob import glob
from tqdm import tqdm
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
from scipy import signal
import os
import sys
import time

# MPI packages
from mpi4py import MPI

# self packages
from utils.loc import load_loc, get_distance,sort_data_by_distance
from utils.math import measure_shift_fft,remove_point_skip,measure_shift_fft_full, norm
from utils.h5data import get_event_data, save_h5_data, read_h5_data
from utils.plot import plot_scatters,plot_traces
from utils.trace import get_tapered_slices, safe_filter,filtfilt

def load_data(data_file, use_d1_events, use_d2_events, use_d3_events, fs, fe):
    '''
    从PBH数据文件中读取全部的数据
    参数:
    data_file: PBH数据文件路径
    use_d1_events: 是否使用D1类型事件
    use_d2_events: 是否使用D2类型事件
    use_d3_events: 是否使用D3类型事件
    fs: 频率下限
    fe: 频率上限
    
    返回:
    traces_nens: 波形数据
    te_all: 时间数组
    t: 时间轴
    traces_ref: 参考波形
    stats_infile: 台站列表
    SOURCE: 源台站
    info: 事件信息
    metadata: 元数据字典
    '''
    
    # 读取元数据
    datasets, args_infile = read_h5_data(data_file, 
                                        keys=['MARKER', 'EMARKER', 'DATE', 'BASE_STAT'], 
                                        group_name='metadata', read_attrs=True)
    
    metadata = {
        'MARKER': datasets[0].decode(),
        'EMARKER': datasets[1].decode(),
        'DATE': datasets[2].decode(),
        'BASE_STAT': datasets[3].decode(),
        'args': args_infile
    }
    
    # 读取所有必要数据
    traces_nens, te_all, t, traces_ref, stats_infile, SOURCE, info = read_h5_data(
        data_file, ['traces', 'te_all', 't', 'ref', 'R', 'S', 'info'])
    
    # 解码字符串数据
    stats_infile = [stat.decode() if isinstance(stat, bytes) else stat for stat in stats_infile]
    SOURCE = SOURCE.decode() if isinstance(SOURCE, bytes) else SOURCE
    
    # 根据事件类型筛选数据
    mask = np.zeros(len(info), dtype=bool)
    if use_d1_events:
        mask |= (info == 1)
    if use_d2_events:
        mask |= (info == 2)
    if use_d3_events:
        mask |= (info == 3)
    
    # 如果没有指定任何事件类型，则使用全部数据
    if not use_d1_events and not use_d2_events and not use_d3_events:
        mask[:] = True
    
    # 应用筛选
    traces_nens = traces_nens[mask]
    te_all = te_all[mask]
    info = info[mask]
    
    # 如果筛选了事件，则重新计算参考波形
    if not (use_d1_events and use_d2_events and use_d3_events):
        # 叠加计算新的参考波形
        traces_ref = traces_nens.sum(axis=0)
        # 归一化
        for j in range(traces_ref.shape[0]):
            traces_ref[j, :] = norm(traces_ref[j, :])
    
    return traces_nens, te_all, t, traces_ref, stats_infile, SOURCE, info, metadata

def process_station_data(j, name, SOURCE, s_info, traces_nens, te_all, t, traces_ref, 
                         NE_STACK, STEP, fs, fe, Val_len, N_after_stack, dt):
    """
    处理单个台站的数据
    """
    rj, azj, _ = get_distance(s_info=s_info, name1=SOURCE, name2=name, S2N=True)

    te_j = np.zeros([N_after_stack])
    delta_t_j = np.zeros([N_after_stack])
    VALID_MASK = np.ones([N_after_stack])
    traces_j = np.zeros([N_after_stack, len(t)])
    ref_j = traces_ref[j, :]
    # 使用最大振幅位置正负-1,1s的窗口,计算时间移动
    
    traces_j_be4stack = traces_nens[:, j, :]
    traces_j_be4stack = filtfilt(traces_j_be4stack, dt, [fs, fe], order=4, N_CORE=None)
    # traces_j_be4stack = norm(traces_j_be4stack, ONE_AXIS=True)

    ref_j = filtfilt(ref_j, dt, [fs, fe], order=4, N_CORE=None)
    ref_j = norm(ref_j)
    tp_max = np.argmax(np.abs(signal.hilbert(ref_j)))
    T_MAX = t[tp_max]

    d_phi_j = []
    for i in range(N_after_stack):
        # 不同台，在最大值附近求时间偏移
        te_j[i] = te_all[i*STEP:i*STEP+NE_STACK].mean()

        # 获取当前叠加的道集
        stack_traces = traces_j_be4stack[i*STEP:i*STEP+NE_STACK, :]
        trace_ij = stack_traces.sum(axis=0)
        
        # 检查空道集比例，如果超过一半则标记为无效
        empty_traces_count = np.sum(np.all(stack_traces == 0, axis=1))
        if empty_traces_count > NE_STACK / 2:
            VALID_MASK[i] = 0
        else:
            VALID_MASK[i] = 1

        # if T_MAX < 0:
        #     T_window =[T_MAX-2, T_MAX+0.5]
        # else:
        #     T_window =[T_MAX-0.5, T_MAX+2]
        T_window =[T_MAX-1, T_MAX+1]
        delta_t_i, freq_valid, diff_phi, trace_tapered = measure_shift_fft_full(
                                    trace_ij, ref_j, t, 
                                    T_window=T_window, 
                                    FS=[fs, fe], 
                                    L_Taper=1-Val_len, 
                                    FIT=False)
            
        d_phi_j.append(diff_phi*VALID_MASK[i])
        delta_t_j[i] = delta_t_i*VALID_MASK[i]
        traces_j[i, :] = trace_tapered

    d_phi_j = np.array(d_phi_j)
    
    # 返回处理结果
    result = {
        'j': j,
        'name': name,
        'rj': rj,
        'azj': azj,
        'te_j': te_j,
        'delta_t_j': delta_t_j,
        'd_phi_j': d_phi_j,
        'traces_j': traces_j,
        'ref_j': ref_j,
        'VALID_MASK': VALID_MASK,
        'T_MAX': T_MAX,
        'freq_valid': freq_valid
    }
    
    return result

# cmd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-debug', action='store_true', help='method: DEBUG')
parser.add_argument('-emarker', default='', help='event marker, default is DZ155.Line{LINE_ID}')
parser.add_argument('-fs', default=9.0, type=float, help='start frequency before measure T, unit:Hz, type: float')
parser.add_argument('-fe', default=11.0,type=float, help='stop time before measure T, unit:Hz, type: float')

parser.add_argument('-input', default='',  help='input corr h5file')
parser.add_argument('-output', default='',  help='output stack h5 file')
parser.add_argument('-figroot', default='figures/7.dt.change.figures',  help='root to save figs')

parser.add_argument('-noD1', action='store_true', help='method: discard S2N events')
parser.add_argument('-noD2', action='store_true', help='method: discard N2S events')
parser.add_argument('-noD3', action='store_true', help='method: discard mixed events')
parser.add_argument('-noD0', action='store_true', help='method: discard unknown events')

parser.add_argument('-nstack', default=10, type=int, help='event stack number, type: int')
args = parser.parse_args()
print(args)

# MPI初始化
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()

if size < 2:
    print('Please run with at least 2 processes.')
    sys.exit(1)

DEBUG= args.debug
NE_STACK = args.nstack
DATA_FILE = args.input
OUTFILE = args.output
FIG_ROOT = args.figroot

USE_D1_EVENTS = not args.noD1
USE_D2_EVENTS = not args.noD2
USE_D3_EVENTS = not args.noD3

fs = args.fs
fe = args.fe

# rank 0 负责读取数据和初始化
if rank == 0:
    DATA_FOLDER = os.path.dirname(DATA_FILE)+'.dt'
    if not os.path.exists(DATA_FOLDER):
        os.mkdir(DATA_FOLDER)

    # 加载数据和元数据
    traces_nens, te_all, t, traces_ref, stats_infile, SOURCE, info,metadata = load_data(
        DATA_FILE, USE_D1_EVENTS, USE_D2_EVENTS, USE_D3_EVENTS, fs, fe)

    # 处理EMARKER
    EMARKER = f'{metadata["EMARKER"]}.{metadata["MARKER"]}' if not args.emarker else args.emarker
    DATE = metadata["DATE"]
    BASE_STAT = metadata["BASE_STAT"]
    args_infile = metadata['args']

    MARKER=f'F{fs:04.1f}.{fe:04.1f}.N.{NE_STACK:03d}'
    MARKER += '.D'+'1'*USE_D1_EVENTS+'2'*USE_D2_EVENTS+'3'*USE_D3_EVENTS
    if not OUTFILE:
        OUTFILE = f'{DATA_FOLDER}/{EMARKER}.{MARKER}.h5'

    ne, ns, nt = traces_nens.shape
    dt = t[1] - t[0]
    assert SOURCE == BASE_STAT

    print(vars(args))

    # 使用新的get_info函数获取配置信息
    date_info = get_info(DATE)
    s_info = date_info['s_info']
    STATS = stats_infile  # 直接使用从文件中读取的台站列表

    STEP = max(1, int(NE_STACK/5))  # 至少为1
    Val_len = min(0.5, 2/(fs)*2) # 左右各2个波长窗口
    print(f'{Val_len=}')
    N_after_stack = len(np.arange(0, ne-NE_STACK, STEP)) if ne > NE_STACK else 0

    # 生成文件
    args_infile.update(vars(args))
    print(f'save final data to {OUTFILE}')
    save_h5_data(file_name=OUTFILE, 
        attrs_dict=vars(args), 
        group_name='metadata',
        data_in_dict={'MARKER': MARKER,
                      'EMARKER': EMARKER,
                      'DATE': DATE,
                      'BASE_STAT': BASE_STAT,
                      'OUTFILE':OUTFILE,
                      'INFILE':DATA_FILE,
                      'STEP': STEP,
                      'FS': fs,
                      'FE': fe,
                      }, mode='w')
    
    # 准备发送给各进程的数据
    metadata_to_send = {
        'EMARKER': EMARKER,
        'DATE': DATE,
        'BASE_STAT': BASE_STAT,
        'MARKER': MARKER,
        'OUTFILE': OUTFILE,
        'STEP': STEP,
        'Val_len': Val_len,
        'N_after_stack': N_after_stack,
        'dt': dt,
        's_info': s_info,
        'NE_STACK': NE_STACK,
        'fs': fs,
        'fe': fe,
        'te_all': te_all,
        't': t,
        'SOURCE': SOURCE,
        'ns': ns
    }
    
    # 发送元数据到所有进程
    for dest_rank in range(1, size):
        comm.send(metadata_to_send, dest=dest_rank, tag=0)
    
    # 发送每个台站的数据到对应的进程
    for j, name in enumerate(stats_infile):
        dest_rank = j % (size - 1) + 1  # 分配给1到size-1的进程
        station_data = {
            'j': j,
            'name': name,
            'traces_nens_j': traces_nens[:, j, :],
            'traces_ref_j': traces_ref[j, :]
        }
        comm.send(station_data, dest=dest_rank, tag=j+1)
    
    # 发送结束信号
    for dest_rank in range(1, size):
        comm.send(None, dest=dest_rank, tag=ns+1)
        
    # rank 0 不处理任何任务
    local_results = []
        
else:
    # 接收元数据
    metadata_to_send = comm.recv(source=0, tag=0)
    
    EMARKER = metadata_to_send['EMARKER']
    DATE = metadata_to_send['DATE']
    BASE_STAT = metadata_to_send['BASE_STAT']
    MARKER = metadata_to_send['MARKER']
    OUTFILE = metadata_to_send['OUTFILE']
    STEP = metadata_to_send['STEP']
    Val_len = metadata_to_send['Val_len']
    N_after_stack = metadata_to_send['N_after_stack']
    dt = metadata_to_send['dt']
    s_info = metadata_to_send['s_info']
    NE_STACK = metadata_to_send['NE_STACK']
    fs = metadata_to_send['fs']
    fe = metadata_to_send['fe']
    te_all = metadata_to_send['te_all']
    t = metadata_to_send['t']
    SOURCE = metadata_to_send['SOURCE']
    ns = metadata_to_send['ns']

    # 处理分配给当前进程的任务
    local_results = []
    while True:
        # 接收任务
        station_data = comm.recv(source=0, tag=MPI.ANY_TAG)
        
        # 检查是否是结束信号
        if station_data is None:
            break
            
        j = station_data['j']
        name = station_data['name']
        traces_nens_j = station_data['traces_nens_j']
        traces_ref_j = station_data['traces_ref_j']
        
        print(f"Rank {rank} processing station {j}: {name}")
        
        # 重构数据结构以适配处理函数
        traces_nens_full = np.zeros((len(te_all), 1, len(t)))
        traces_nens_full[:, 0, :] = traces_nens_j
        traces_ref_full = np.zeros((1, len(t)))
        traces_ref_full[0, :] = traces_ref_j
        
        result = process_station_data(0, name, SOURCE, s_info, traces_nens_full, te_all, t, traces_ref_full,
                                      NE_STACK, STEP, fs, fe, Val_len, N_after_stack, dt)
        # 调整结果中的索引
        result['j'] = j
        local_results.append(result)

# 收集所有结果到rank 0
all_results = comm.gather(local_results, root=0)

if rank == 0:
    # 整理收集到的结果
    delta_t_all = np.zeros([ns, N_after_stack])
    d_phi_all = []
    traces_all = np.zeros([ns, N_after_stack, len(t)])
    ref_all = np.zeros([ns, len(t)])
    health_all = np.zeros([ns, N_after_stack])
    T_MAX_all = np.zeros([ns])
    r_all = np.zeros([ns])
    az_all = np.zeros([ns])
    freq_valid_all = None
    te_valid = None
    
    # 按顺序处理结果
    for proc_results in all_results:
        for result in proc_results:
            j = result['j']
            delta_t_all[j, :] = result['delta_t_j']
            d_phi_all.append(result['d_phi_j'])
            traces_all[j, :, :] = result['traces_j']
            ref_all[j, :] = result['ref_j']
            health_all[j, :] = result['VALID_MASK']
            T_MAX_all[j] = result['T_MAX']
            r_all[j] = result['rj']
            az_all[j] = result['azj']
            
            if freq_valid_all is None:
                freq_valid_all = result['freq_valid']
            if te_valid is None:
                te_valid = result['te_j']

    # 保存所有数据到一个大矩阵中，而不是按组保存
    save_h5_data(OUTFILE,
                data_in_dict={
                'R': np.array(stats_infile, dtype='S'),
                # 测量量
                'delta_t': delta_t_all[:len(stats_infile), :].astype('float32'),
                'te': te_valid,
                'd_phi': np.array(d_phi_all),
                'freqs': freq_valid_all,
                # 叠加信号
                'traces': traces_all[:len(stats_infile), :, :].astype('float32'),
                'ref': ref_all[:len(stats_infile), :].astype('float32'),
                't': t,
                # 参数
                'health': health_all[:len(stats_infile), :],
                'T_MAX': T_MAX_all[:len(stats_infile)],
                'r': r_all[:len(stats_infile)],
                'az': az_all[:len(stats_infile)]
                }, mode='a')

    print(f'save final data to {OUTFILE}')