#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
用于读取多个traceSeq生成的h5文件并绘制相关图表的脚本
支持读取多个文件，按距离排列traces，进行统一的FV变换
"""

import argparse
import os
import sys
import numpy as np
from pylab import plt
import pandas as pd
from obspy.core.utcdatetime import UTCDateTime
import glob

# 导入自定义工具
from utils.h5data import read_h5_data, save_h5_data
from utils.trace import filtfilt, stack_traces_in_bins
from utils.fv import do_fv
from utils.plot import *
from utils.loc import load_loc, get_distance,sort_data_by_distance
from utils.math import norm

# 命令行参数解析
parser = argparse.ArgumentParser(description='Plot traceSeq h5 data from multiple files')
parser.add_argument('-input', default='', help='input h5 file pattern')
parser.add_argument('-figroot', default='figures/6.traceSeq.figures', help='root to save figs')
parser.add_argument('-emarker', default='', help='marker name')
parser.add_argument('-noD1', action='store_true', help='method: discard S2N events')
parser.add_argument('-noD2', action='store_true', help='method: discard N2S events')
parser.add_argument('-noD3', action='store_true', help='method: discard mixed events')
args = parser.parse_args()


INPUT_PATTERN = args.input
FIG_ROOT = args.figroot
EMARKER = args.emarker
USE_D1_EVENTS = not args.noD1
USE_D2_EVENTS = not args.noD2
USE_D3_EVENTS = not args.noD3


fs, fe = 5, 40
VLIM = [-4000,4000]
TLIM = [-9, 9]
NORM = True
PLOT_WIGGLE = True 
STACK = True
FOLDER = True
# INPUT_PATTERN = "data/2406.HSR/events/P192.2406.Z.corr.traces/*.P12*.Tp.N.*.h5"
# EMARKER='P12x.D123.Tp.NS'

FIG_ROOT = "figures/6.traceSeq.figures/test"

MARKER = f'F{fs:.1f}.{fe:.1f}.V{VLIM[0]:.0f}.{VLIM[1]:.0f}.{"NORM"*NORM}.{"STACK"*STACK}.{"FOLDER"*FOLDER}'

h5_name = f'{FIG_ROOT}/{EMARKER}.{MARKER}.h5'
figname = f'{FIG_ROOT}/{EMARKER}.{MARKER}.png'

input_files = glob.glob(INPUT_PATTERN)
input_files.sort()
if not input_files:
    print(f"未找到匹配的文件: {INPUT_PATTERN}")
    sys.exit(1)

print(f"找到 {len(input_files)} 个文件")

# 读取第一个文件获取基本参数
datasets, args_infile = read_h5_data(input_files[0], keys=['MARKER','EMARKER'], group_name='metadata', read_attrs=True)
DATE = args_infile['date']
EMARKER = f'{datasets[1].decode()}.{datasets[0].decode()}' if not EMARKER else EMARKER

# 存储所有数据
all_ref_traces = []
all_x = []
all_corr_stats = []
train_pos = []
# 读取所有文件的数据
print('Reading data from all files...')
for i, input_file in enumerate(input_files):
    print(f'  Reading {i+1}/{len(input_files)}: {os.path.basename(input_file)}')
    
    # 读取h5文件中的数据
    try:
        data_dict = read_h5_data(input_file, keys=['t','ref', 'S', 'R', 'x'])
        _, args_infile1 = read_h5_data(input_file, keys=['MARKER'], group_name='metadata', read_attrs=True)
        tn, ref_traces, S_data, R, x = data_dict
        vinfo = read_h5_data(input_file, keys=['info'])[0]
        # 解码字符串数组
        S = S_data.decode()
        ns,nt = ref_traces.shape
        # idx_events = np.where(vinfo==2)[0]
        print()
                              
        for j in range(ns):
            name_j = R[j].decode()
            if 'E' not in name_j and x[j]<1000 and x[j]>-1000:
                all_corr_stats.append(S+'/'+name_j)
                all_ref_traces.append(ref_traces[j,:])
                # all_ref_traces.append(traces[idx_events,j,:].sum(axis=0))
                all_x.append(x[j])
        train_pos.append(args_infile1['Tpos'])
            
    except Exception as e:
        print(f"  跳过文件 {input_file}，错误: {e}")

dt=tn[1]-tn[0]

# 合并所有数据
if not all_ref_traces:
    print("没有成功读取任何数据")
    sys.exit(1)

ref_traces_combined = np.array(all_ref_traces)
x_combined = np.array(all_x)
# 按距离排序
sort_idx = np.argsort(x_combined)
x_combined = x_combined[sort_idx]
ref_traces_combined = ref_traces_combined[sort_idx,:]
names = [all_corr_stats[i] for i in sort_idx]

ns = len(all_corr_stats)

print(f'总共有 {ns} 个 traces')

if STACK:
    # 每32米范围内记录叠加生成新记录
    ref_traces_combined, x_combined, names = stack_traces_in_bins(ref_traces_combined, x_combined)
if FOLDER:
    ref_traces_combined = ref_traces_combined+ref_traces_combined[:,::-1]

# 数据处理
ref_traces = filtfilt(ref_traces_combined, dt, [fs, fe], order=4)
ref_traces = norm(ref_traces, ONE_AXIS=True)
x = np.array(x_combined)
ns = len(x)
print(ref_traces.shape)

# 提取FV
print('Plotting reference traces...')
vs = np.linspace(VLIM[0],VLIM[1],800)
FV, f,_ = do_fv(ref_traces,vs,x=np.array(x), sx=0,dt=dt, NORM=True, PRINT=False, fs=fs, fe=fe)
FV = np.abs(FV)
FV = FV/FV.max()

save_h5_data(h5_name,
              data_in_dict={'FV': FV, 
                            'vs': vs,
                            'f':f,
                            'traces':ref_traces,
                            'x':x,
                            't':tn}, mode='w')


# 绘制图形
fig, ax1, ax2 = plot_raw_data(ref_traces, x, tn, fs=fs, fe=fe, 
                              VLIM=VLIM, PLOT_WIGGLE=PLOT_WIGGLE, SCALE=40, FV_NORM=NORM)
ax1.set_ylim([x.min(), x.max()])
ax1.set_title(f'ALL.{MARKER}')
ax1.set_xlim(TLIM)

ax2.set_ylim(VLIM)
fig.tight_layout()
 
# 保存图像
fig.savefig(figname, dpi=300)

print(f'Saving figure to {figname}...')
print(f'Saving h5 to {h5_name}...')