# 台站定义与路径索引
from myglobal import *


# sys packages
from pylab import np
import obspy as ob
from glob import glob
from tqdm import tqdm
from obspy.core.utcdatetime import UTCDateTime
import h5py

# self packages

from utils.loc import load_loc 
from utils.math import norm
from utils.h5data import get_event_data, save_h5_data, read_h5_data, h5glob
from utils.hsr import RawDataBase, search_events
from utils.plot import plot_traces_by_subfigures

import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-debug', action='store_true', help='method: DEBUG')
parser.add_argument('-abn', action='store_true', help='背景噪声，默认每小时一个事件')
parser.add_argument('-base', default='DZ155', help='BASE_STAT, default  is DZ155')

parser.add_argument('-line', default='ALL', help='LINE_ID, default is ALL')
parser.add_argument('-amp', default=2e-4, type=float, help='amplitude threshold of hsr events')
parser.add_argument('-date', default='2303',  help='which data to use, 2303, 2406, 2409, 254C')
parser.add_argument('-dmarker', default='',  help='marker for data, eg. DZ155.ALL, P192.254C')
parser.add_argument('-outfile', default='',  help='output file name')
parser.add_argument('-L', default=200, type=int, help='事件长度, 默认200s')
parser.add_argument('-CHN', default=2, type=int, help='多分量仪器采集, NEZH')
# 添加一个新的参数用于指定参考h5文件
parser.add_argument('-refh5', default='', help='参考h5文件路径，用于读取已有的事件时间')

parser.add_argument('-fs', default=1, type=float, help='原始数据频率下限，默认1Hz')
parser.add_argument('-fe', default=25, type=float, help='原始数据频率上限,默认25Hz')

args = parser.parse_args()
print(args)

DEBUG= args.debug
ABN= args.abn
LINE_ID = args.line
BASE_STAT=args.base
AMP_HSR = args.amp
DATE = args.date
DMARKER = args.dmarker
OUTFILE = args.outfile
EVENT_L = args.L
CHN = args.CHN
REF_H5 = args.refh5  # 新增参数

fs = args.fs
fe = args.fe

date_info = get_info(DATE, CHN=CHN)
days = date_info['days']
H5_ROOT=date_info['H5_ROOT']
RAW_ROOT = date_info['RAW_ROOT']
CHN_NAME=date_info['CHN_NAME']
LINES = date_info['LINES']

if LINE_ID in LINES.keys():
    stats = LINES[LINE_ID]
else:
    stats = [LINE_ID]


DMARKER = f'{BASE_STAT}.{LINE_ID}.{CHN_NAME}' if len(DMARKER)==0 else DMARKER
if ABN:
    DMARKER = f'{DMARKER}.ABN'

OUTFILE = f'{H5_ROOT}/events/{DMARKER}.events.h5' if not OUTFILE else OUTFILE
print(OUTFILE)
FIG_ROOT = f'./figures/1.events/{DMARKER}'
if not os.path.exists(FIG_ROOT):
    os.makedirs(FIG_ROOT)


H5OUT = h5py.File(f'{OUTFILE}','w')
save_h5_data(h5file=H5OUT, 
             attrs_dict=vars(args), 
             group_name='metadata',
             data_in_dict={'RAW_ROOT':RAW_ROOT,
                           'OUTFILE':OUTFILE,
                           'MARKER':DMARKER,
                           'EMARKER':'',
                           'DATE':DATE
                           })
H5OUT.close()

for day in days:

    print(day)
    if DATE=='2303':
        file_names = []
        for i in stats:
            file_names.append(f'{RAW_ROOT}/{i}/*{day}*.Z.sac')
        a = RawDataBase(dt=0.01 )
        a.load_data_from_sac(file_names, stats, FILL_NONE=True)
    if DATE=='2406':
        a = RawDataBase(dt=0.01 )
        names_keys = [f'data/{i}' for i in stats]
        a.load_data_from_h5(f'{RAW_ROOT}/P320_{day}.h5',
                            names=stats,names_keys=names_keys, 
                            time_start=f'20{day[0:2]}-{day[2:4]}-{day[4:6]}T00:00:00',
                            dt_key='dt',
                            FILL_NONE=True,
                            CHN=CHN,
                            FLIM=[fs,fe])
    if DATE=='254C':
        a = RawDataBase(dt=0.01 )
        names_keys = [f'data/{i}' for i in stats]
        a.load_data_from_h5(f'{RAW_ROOT}/{day}_10ms_sensors.h5',
                            names=stats,names_keys=names_keys, 
                            time_start=f'20{day[0:2]}-{day[2:4]}-{day[4:6]}T00:00:00',
                            dt_key='dt',
                            FILL_NONE=True,
                            CHN=CHN,
                            FLIM=[fs,fe])
    # print(a.names)
    base_trace = a.get_trace_by_name(BASE_STAT)
    if len(base_trace.data)<2:
        print(f'{BASE_STAT}  {day} no data')
        continue
    
    if ABN:
        # 背景噪声，每EVENT_L一个事件,同时进行降采样到0.1s
        event_times = np.arange(0,3600*24, EVENT_L)+EVENT_L/2
        a.downsample(factor=10)
        print(f'after downsample, {a.dt=}')

    elif REF_H5 and os.path.exists(REF_H5):
        # 如果提供了参考h5文件，则从该文件读取事件时间
        print(f"从参考文件 {REF_H5} 读取事件时间")
        event_times = []
        # 遍历参考h5文件中的事件，使用新的通用搜索函数
        day_groups = h5glob(REF_H5, pattern=f'IDX*', object_type='group', group_name=f'DAY{day}')
        
        ref_h5 = h5py.File(REF_H5, 'r')
        for group_path in day_groups:
            event_time_str = ref_h5[f'{group_path}/te'][()].decode()
            event_utc = UTCDateTime(event_time_str)
            event_relative_time = event_utc - base_trace.stats.starttime
            event_times.append(event_relative_time)
        ref_h5.close()
        print(f"从参考文件读取到 {len(event_times)} 个事件")
    else:
        # 原有方式：通过search_events搜索事件
        event_times = search_events(base_trace.data, base_trace.times(),AMP_HSR=AMP_HSR,MAX_AMP=AMP_HSR*50,STEP_EVENT=50)

    if DEBUG:
        print('savefigure')
        fig = plot_traces_by_subfigures(base_trace.data, base_trace.times(), N1=8,N2=3, event_times=event_times)
        fig.tight_layout()
        fig.savefig(f'{FIG_ROOT}/{day}.{BASE_STAT}.png')

    H5OUT = h5py.File(f'{OUTFILE}','r+')
    iter_z = tqdm(enumerate(event_times), total=len(event_times), desc='save events')
    
    ts = base_trace.stats.starttime
    for i,et in iter_z:
        DATA_e, t_e = a.get_event_by_time_fast(ts+et-EVENT_L/2,ts+et+EVENT_L/2)

        data_dict = {
            'data':DATA_e,
            't':t_e,
            'te':str(ts+et),
            'stats':np.array(stats,dtype='S')
        }
        save_h5_data(h5file=H5OUT, data_in_dict=data_dict, group_name=f'DAY{day}/IDX{i:03d}.T{str(ts+et)}')
    H5OUT.close()

groups = h5glob(OUTFILE,pattern='DAY*/IDX*.T*',object_type='group')
groups.sort()
save_h5_data(OUTFILE,{'all_groups':np.array(groups,dtype='S')}, mode='a')

print(OUTFILE,' saved.')

