# pre packages
from myglobal import *
from mpi4py import MPI
import time

# sys packages
from pylab import np
import obspy as ob
from glob import glob
from tqdm import tqdm
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
from scipy.stats import linregress
import h5py


# self packages
from utils.loc import load_loc, get_distance,sort_data_by_distance,get_stats_within_distance
from utils.math import norm, my_vel_regress
from utils.h5data import get_event_data, save_h5_data, read_h5_data, MemoryMappedH5
from utils.hsr import RawDataBase, search_events
from utils.plot import plot_traces
from utils.trace import get_tapered_slices, safe_filter, whiten_msnoise,get_tapered_traces
from corr4py.corr import corr_with_wavelets


# cmd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-debug', action='store_true', help='method: DEBUG')
parser.add_argument('-emarker', default='', help='marker in previous processes')
parser.add_argument('-fs', default=9.0, type=float, help='start frequency before stacking, unit:Hz, type: float')
parser.add_argument('-fe', default=11.0,type=float, help='stop time before stacking, unit:Hz, type: float')
parser.add_argument('-PBH', action='store_true', help='method: 进行谱白化')

parser.add_argument('-noD1', action='store_true', help='method: discard S2N events')
parser.add_argument('-noD2', action='store_true', help='method: discard N2S events')
parser.add_argument('-noD3', action='store_true', help='method: discard mixed events')
parser.add_argument('-noD0', action='store_true', help='method: discard unknown events')

parser.add_argument('-input', default='',  help='input corr h5file')
parser.add_argument('-output', default='',  help='output stack h5 file')
parser.add_argument('-eventfile', default='',  help='event file for selecting events')
parser.add_argument('-figroot', default='figures/6.traceSeq.figures',  help='root to save figs')
args = parser.parse_args()
print(args)

DEBUG= args.debug
PBH = args.PBH
DATA_FILE = args.input
OUTFILE = args.output
EVENT_FILE = args.eventfile
FIG_ROOT = args.figroot
fs = args.fs
fe = args.fe
USE_D1_EVENTS = not args.noD1
USE_D2_EVENTS = not args.noD2
USE_D3_EVENTS = not args.noD3

comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()

def load_new_format_data(data_file, use_d1_events, use_d2_events, use_d3_events, start_time):
    """
    从新的整合格式HDF5文件中加载数据
    
    参数:
    data_file: HDF5文件路径
    use_d1_events: 是否使用D1类型事件
    use_d2_events: 是否使用D2类型事件
    use_d3_events: 是否使用D3类型事件
    start_time: 起始时间
    
    返回:
    data_all: 所有数据列表
    te_all: 时间数组
    groups: 组名列表
    info: 事件信息列表
    """
    # 读取元数据
    input_file_io = MemoryMappedH5(data_file)
    h5file = input_file_io.h5_file
    metadata, args_infile = read_h5_data( h5file=h5file, 
        keys=['MARKER', 'EMARKER', 'DATE', 'BASE_STAT'], 
        group_name='metadata', 
        FILL_NONE=True, 
        read_attrs=True)
    
    base_stat = metadata[3].decode()
    
    # 读取所有组和事件信息
    groups_raw = read_h5_data(h5file=h5file,  keys=['all_groups'])[0]
    vinfo_raw = read_h5_data(h5file=h5file,  keys=['vinfo'])[0]
    
    # 创建组名到索引的映射字典，提高查找效率
    group_to_index = {group.decode() if isinstance(group, bytes) else group: i 
                      for i, group in enumerate(groups_raw)}
    
    # 根据事件类型筛选组
    groups_selected = []
    info_selected = []
    for i in range(len(groups_raw)):
        group_i = groups_raw[i].decode()
        event_type = vinfo_raw[i, 0]
        if (event_type == 1 and use_d1_events) or \
           (event_type == 2 and use_d2_events) or \
           (event_type == 3 and use_d3_events):
            groups_selected.append(group_i)
            info_selected.append(event_type)
    
    print(f'选择 {len(groups_selected)} 组数据从 {data_file}')
    
    # 读取整合的数据
    corr_all = read_h5_data(h5file=h5file, keys=['corr'])[0]
    t_all = read_h5_data(h5file=h5file, keys=['t'])[0]
    stats_all = read_h5_data(h5file=h5file, keys=['R'])[0]
    source_all = read_h5_data(h5file=h5file, keys=['S'])[0]
    te_all_data = read_h5_data(h5file=h5file, keys=['te'])[0]
    
    # 解码字符串数据
    stats_all = [s.decode() if isinstance(s, bytes) else s for s in stats_all]
    source_all = [s.decode() if isinstance(s, bytes) else s for s in source_all]
    te_all_data = [t.decode() if isinstance(t, bytes) else t for t in te_all_data]
    
    # 构建数据结构
    data_all = []
    te_all = []
    groups_valid = []
    
    for i, group_i in enumerate(groups_selected):
        # 使用字典查找索引，提高效率
        idx = group_to_index.get(group_i)
        
        if idx is not None:
            try:
                # 提取该组的数据
                data_i = corr_all[idx]
                t = t_all[idx] if len(t_all.shape) > 1 else t_all
                stats = stats_all[idx] if isinstance(stats_all[idx], np.ndarray) else stats_all
                e_time = te_all_data[idx]
                source = source_all[idx]
                
                data_all.append([group_i, data_i, t, e_time, stats, source])
                groups_valid.append(group_i)
                te_all.append(UTCDateTime(e_time) - UTCDateTime(start_time))
                assert source == base_stat
            except Exception as e:
                print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
                print(group_i, i, data_file, f'处理错误: {e}')
                print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
                continue
    
    input_file_io.close()
    
    return data_all, np.array(te_all), groups_valid, info_selected, metadata, args_infile

if rank==0:
    DATA_FOLDER = os.path.dirname(DATA_FILE)+'.traces'
    if not os.path.exists(DATA_FOLDER):
        os.mkdir(DATA_FOLDER)

    # 先读取基本元数据以获取DATE
    metadata, args_infile = read_h5_data(DATA_FILE, 
        keys=['MARKER', 'EMARKER', 'DATE', 'BASE_STAT'], 
        group_name='metadata', 
        FILL_NONE=True, 
        read_attrs=True)
    
    DATE = metadata[2].decode()
    
    # 使用DATE获取配置信息
    date_info = get_info(DATE)
    START_TIME = date_info['START_TIME']
    
    # 现在加载完整数据
    data_all, te_all, groups, info, metadata, args_infile = load_new_format_data(
        DATA_FILE, USE_D1_EVENTS, USE_D2_EVENTS, USE_D3_EVENTS, START_TIME)
    
    EMARKER = f'{metadata[1].decode()}.traces.{metadata[0].decode()}' if not args.emarker else args.emarker
    BASE_STAT=metadata[3].decode()
    LINE_ID = args_infile['line']

    MARKER =f'{LINE_ID}.F{fs:02.1f}.{fe:02.1f}'
    MARKER+='.PBH'*PBH
    MARKER+='.D'+'1'*USE_D1_EVENTS+'2'*USE_D2_EVENTS+'3'*USE_D3_EVENTS

    OUTFILE = f'{DATA_FOLDER}/{EMARKER}.{MARKER}.h5' if not OUTFILE else OUTFILE
    FIG_ROOT = f'{FIG_ROOT}/{DATE}.{metadata[1].decode()}/'
    if not os.path.exists(FIG_ROOT):
        os.mkdir(FIG_ROOT)

    H5_ROOT = date_info['H5_ROOT']
    s_info = date_info['s_info']
    LINES = date_info['LINES']

    if LINE_ID in LINES.keys():
        CORR_STATS = LINES[LINE_ID]
    else:
        CORR_STATS = get_stats_within_distance(s_info, base=BASE_STAT)

    print(f'{MARKER} {EMARKER} {OUTFILE}')

    # get loc
    x = []
    for name in CORR_STATS:
        xi, azi,_ = get_distance(s_info=s_info,name1=BASE_STAT,name2=name, S2N=True)
        x.append(xi)
    x = np.array(x)
    
    NE = len(groups)
    data_collected = {}

    print(f'save final data to {OUTFILE}')
    args_infile.update(vars(args))
    save_h5_data(file_name=OUTFILE, 
                attrs_dict=args_infile, 
                group_name='metadata',
                data_in_dict={'OUTFILE':OUTFILE,
                            'INFILE':DATA_FILE,
                            'MARKER':MARKER,
                            'EMARKER':EMARKER,
                            'BASE_STAT':BASE_STAT,
                            'DATE':DATE},mode='w')
    broadcast_data = {
        'CORR_STATS': CORR_STATS,
        'x': x
    }
else:
    broadcast_data = None

broadcast_data = comm.bcast(broadcast_data, root=0)
CORR_STATS = broadcast_data['CORR_STATS']
x = broadcast_data['x']
ns = len(CORR_STATS)
L_CUT=18
TASK_ROUND = 10 #避免单次传递任务数据过大,超出mpi的限制,从而在数据分发与结果回收过程中报错

comm.Barrier()

for task_i in range(TASK_ROUND):
    # 运行时间计算
    t1 = time.time()
    if rank==0:
        data4task = data_all[task_i::TASK_ROUND]
        data4scatter = [data4task[i::size] for i in range(size)]
        print(f"processing {task_i=}",[len(i) for i in data4scatter])
    else:
        data4scatter = None
    comm.Barrier()
    t2 = time.time()

    data_task_rank = comm.scatter(data4scatter, root=0)
    all_traces_ne = {}
    for i,items in enumerate(data_task_rank):

        group_i,data_i, t, e_time, stats, SOURCE = items
        stats = [name.decode() if isinstance(name, bytes) else name for name in stats]

        nt = len(t)
        dt = t[1]-t[0]
        nt_cut = int(L_CUT/dt)
        
        all_traces_ns = np.zeros([ns,nt_cut])

        for j,name in enumerate(CORR_STATS):
            idx_j = stats.index(name)
            trace_j = data_i[idx_j,:]
            if np.sum(np.abs(trace_j))==0:
                continue

            # 取固定长度的窗口截断
            trace_j = get_tapered_traces(trace_j, dt, L_Taper=1, WIN_TYPE='hann')
            if PBH:
                trace_j = whiten_msnoise(trace_j, dt, fs, fe, FL=1)
            trace_j = get_tapered_traces(trace_j, dt, L_Taper=1, WIN_TYPE='hann')

            trace_j = safe_filter(trace_j, dt, ftype='bandpass', zerophase=True, freqmin=fs, freqmax=fe)
            
            # 截取有效信号，所有台都一样
            tsp = np.argmin(np.abs(t+L_CUT/2))
            tep = tsp+int(L_CUT/dt)
            
            trace_j_cut,tn = get_tapered_slices(trace_j, t, [t[tsp],t[tep]], L_Taper=0.5, WIN_TYPE='hann')

            all_traces_ns[j,:] = trace_j_cut

        all_traces_ne[group_i] = all_traces_ns

    all_traces_task =comm.gather(all_traces_ne, root=0)
    t3 = time.time()
    if rank==0:
        for kk, adict in enumerate(all_traces_task):
            print(f'task {task_i} done, collecting data {kk}', flush=True)
            data_collected.update(adict)
        t4 = time.time()
        print(f'task {task_i} done, time: {t4-t3:.2f}s, {t3-t2:.2f}s, {t2-t1:.2f}s')


if rank==0:    
    # ## 参考道生成
    all_traces_slices = [data_collected[group_i] for group_i in groups]
    all_traces_slices = np.array(all_traces_slices)
    ref_traces_slices = all_traces_slices.sum(axis=0)
    for j in range(ns):
        ref_traces_slices[j,:] = norm(ref_traces_slices[j,:])

    # 保存文件
    print(f'save final data to {OUTFILE}')
    data_dict = {
            'traces':all_traces_slices.astype('float32'),
            'te_all':te_all,
            't':tn,
            'ref':ref_traces_slices,
            'S': BASE_STAT,
            'R': np.array(CORR_STATS,dtype='S'),
            'x':x,
            'all_groups':np.array(groups, dtype='S'),
            'info':np.array(info),
        }
    save_h5_data(OUTFILE,data_dict,mode='a')

    if DEBUG:
        from pylab import plt

        # 绘制参考道
        from utils.plot import plot_raw_data
        VLIM  = [300,5000]
        v_estimate=2900
    
        VLIM  = [-5000,5000]
        v_estimate=2900
        fig,ax1, ax2= plot_raw_data(ref_traces_slices,x,tn,fs=fs,fe=fe, VLIM=VLIM, PLOT_WIGGLE=True, SCALE=60,FV_NORM=True)

        for j in range(ns):
            ax1.text(-1,x[j], CORR_STATS[j], fontsize=6)

        ax1.plot([-5,5],[-5*v_estimate,5*v_estimate])
        ax1.set_title(f'{EMARKER} {MARKER}')
        fig.tight_layout()
        figname = f'{FIG_ROOT}/ref.{EMARKER}.{MARKER}.png'
        print(figname)
        fig.savefig(figname,dpi=300)