#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SAC -> SEG-Y converter using ObsPy.

Usage examples:
  Single file:
    python sac_to_segy.py path\to\trace.sac -o path\to\trace.segy

  Batch directory:
    python sac_to_segy.py path\to\sac_dir --outdir path\to\segy_out --recursive

  Combine all SAC files in a directory into one SEG-Y:
    python sac_to_segy.py path\to\sac_dir --combine -o path\to\combined.segy --recursive
"""
import argparse
import os
from pathlib import Path
import sys
from typing import List

import numpy as np
from obspy import read, Stream
from obspy.core import AttribDict
from obspy.io.segy.segy import SEGYTraceHeader


def attach_basic_segy_headers(stream: Stream) -> Stream:
    """
    为每条 trace 附加最基本的 SEG-Y 头字段：
    - number_of_samples_in_this_trace: 采样点数
    - sample_interval_in_ms_for_this_trace: 采样间隔（单位微秒，SEG-Y 规范字段名保留 ms 字样）
    """
    for idx, tr in enumerate(stream):
        th = SEGYTraceHeader()
        th.number_of_samples_in_this_trace = int(tr.stats.npts)
        # SEG-Y 头字段名包含 ms，但实际需填写微秒数（µs）。属性类型为 16-bit 有符号整数，需要显式限制范围。
        sample_interval_microseconds = int(round(tr.stats.delta * 1_000_000))
        # SEG-Y 规格中的字段类型为 >h（大端 int16），取值需限制在 [-32768, 32767]
        sample_interval_microseconds = max(-32768, min(32767, sample_interval_microseconds))
        th.sample_interval_in_ms_for_this_trace = sample_interval_microseconds

        # 可选：设置序号字段，便于后续软件识别
        try:
            th.trace_sequence_number_within_segy_file = idx + 1
            th.trace_sequence_number_within_line = idx + 1
        except Exception:
            pass

        tr.stats.segy = AttribDict()
        tr.stats.segy.trace_header = th
    return stream


def chunk_stream(stream: Stream, max_samples: int = 32767) -> Stream:
    """
    若某条 trace 的样本点数超过 SEG-Y 写出限制（32767），则将其按块拆分为
    多条 trace，保持采样率不变，并调整每块的起始时间。
    """
    new_traces = []
    for tr in stream:
        n = int(tr.stats.npts)
        if n <= max_samples:
            new_traces.append(tr)
            continue
        data = tr.data
        delta = float(tr.stats.delta)
        starttime = tr.stats.starttime
        for start in range(0, n, max_samples):
            end = min(start + max_samples, n)
            new_tr = tr.copy()
            # 切片数据并更新统计信息
            new_tr.data = data[start:end].copy()
            new_tr.stats.starttime = starttime + start * delta
            new_tr.stats.npts = end - start
            new_traces.append(new_tr)
    return Stream(new_traces)


def split_trace_to_streams(tr, max_samples: int = 32767) -> List[Stream]:
    """将单条 trace 按最大样本数拆分为多个 Stream（每个仅含一条拆分后的 trace）。"""
    n = int(tr.stats.npts)
    if n <= max_samples:
        return [Stream([tr])]
    result: List[Stream] = []
    data = tr.data
    delta = float(tr.stats.delta)
    starttime = tr.stats.starttime
    for start in range(0, n, max_samples):
        end = min(start + max_samples, n)
        new_tr = tr.copy()
        new_tr.data = data[start:end].copy()
        new_tr.stats.starttime = starttime + start * delta
        new_tr.stats.npts = end - start
        result.append(Stream([new_tr]))
    return result


def _with_stem_append(base: Path, extra: str) -> Path:
    """在不改变目录与扩展名的情况下，为文件名 stem 添加后缀。"""
    return base.with_name(base.stem + extra + base.suffix)


def write_stream_to_format(st: Stream, out_path: Path, fmt: str, overwrite: bool = False) -> None:
    """统一写出封装。支持 SEGY/MSEED/SAC/WAV；SEG2 暂不支持写出。"""
    if out_path.exists() and not overwrite:
        raise FileExistsError(f"输出文件已存在: {out_path}，使用 --overwrite 可覆盖")
    fmt_upper = fmt.upper()
    if fmt_upper == "SEGY":
        st = attach_basic_segy_headers(st)
        st.write(str(out_path), format="SEGY", data_encoding=5)
    elif fmt_upper in {"MSEED", "SAC", "WAV"}:
        st.write(str(out_path), format=fmt_upper)
    elif fmt_upper == "SEG2":
        raise NotImplementedError(
            "当前未支持写出 SEG-2。ObsPy 仅提供 SEG-2 读取；"
            "如需 SEG-2 输出，请使用第三方工具或改用 MSEED/SEGY。"
        )
    else:
        raise ValueError(f"不支持的输出格式: {fmt}")


def convert_single_sac_to_output(
    input_sac: Path,
    output_path: Path,
    fmt: str = "SEGY",
    overwrite: bool = False,
) -> List[Path]:
    """
    单文件转换：
    - 对 SEGY：若样本数超限，分割为多个输出文件（_part001, _part002 ...）。
    - 其他格式：直接写出一个文件。
    返回生成的文件路径列表。
    """
    if not input_sac.exists():
        raise FileNotFoundError(f"输入文件不存在: {input_sac}")
    st = read(str(input_sac))
    fmt_upper = fmt.upper()

    written: List[Path] = []
    if fmt_upper == "SEGY":
        # SEGY 单文件：若单 trace 超限则拆分为多个独立输出文件
        if len(st) != 1:
            # 若存在多条 trace（不常见），逐条独立处理
            for i, tr in enumerate(st):
                base = output_path if i == 0 else _with_stem_append(output_path, f"_{i+1}")
                parts = split_trace_to_streams(tr)
                if len(parts) == 1:
                    write_stream_to_format(parts[0], base, "SEGY", overwrite=overwrite)
                    written.append(base)
                else:
                    for k, part_st in enumerate(parts, start=1):
                        out_part = _with_stem_append(base, f"_part{str(k).zfill(3)}")
                        write_stream_to_format(part_st, out_part, "SEGY", overwrite=overwrite)
                        written.append(out_part)
        else:
            tr = st[0]
            parts = split_trace_to_streams(tr)
            if len(parts) == 1:
                write_stream_to_format(parts[0], output_path, "SEGY", overwrite=overwrite)
                written.append(output_path)
            else:
                for k, part_st in enumerate(parts, start=1):
                    out_part = _with_stem_append(output_path, f"_part{str(k).zfill(3)}")
                    write_stream_to_format(part_st, out_part, "SEGY", overwrite=overwrite)
                    written.append(out_part)
        return written
    else:
        # 非 SEGY：直接写一个文件
        write_stream_to_format(st, output_path, fmt_upper, overwrite=overwrite)
        return [output_path]


def list_sac_files(input_dir: Path, recursive: bool) -> List[Path]:
    patterns = ["*.sac", "*.SAC"]
    files: List[Path] = []
    if recursive:
        for p in patterns:
            files.extend(input_dir.rglob(p))
    else:
        for p in patterns:
            files.extend(input_dir.glob(p))
    files = sorted(set(files))
    return files


def convert_directory_sac_to_segy(
    input_dir: Path,
    output_dir: Path,
    recursive: bool = True,
    overwrite: bool = False,
    fmt: str = "SEGY",
) -> List[Path]:
    sac_files = list_sac_files(input_dir, recursive)
    if not sac_files:
        raise FileNotFoundError(f"未在目录中找到 SAC 文件: {input_dir}")

    output_dir.mkdir(parents=True, exist_ok=True)
    written: List[Path] = []

    for sac_path in sac_files:
        try:
            rel = sac_path.relative_to(input_dir)
        except ValueError:
            rel = Path(sac_path.name)
        # Build output path preserving relative structure
        suffix = ".segy" if fmt.upper() == "SEGY" else f".{fmt.lower()}"
        out_path = output_dir / rel.with_suffix(suffix)
        out_path.parent.mkdir(parents=True, exist_ok=True)

        results = convert_single_sac_to_output(sac_path, out_path, fmt=fmt, overwrite=overwrite)
        written.extend(results)

    return written


def combine_sacs_to_single_segy(
    sac_files: List[Path],
    combined_segy_path: Path,
    overwrite: bool = False,
    strict_one_trace_per_sac: bool = False,
    split_output_files: bool = False,
    merge_z_only: bool = False,
    mapping_file_path: Path = None,
) -> List[str]:
    if combined_segy_path.exists() and not overwrite:
        raise FileExistsError(f"输出文件已存在: {combined_segy_path}，使用 --overwrite 可覆盖")

    if merge_z_only:
        original_count = len(sac_files)
        # Z-component filenames usually end with Z, e.g., station.BHZ.sac, or station_Z.sac
        sac_files = [p for p in sac_files if p.stem.upper().endswith("_Z")]
        print(f"Z-only 模式：从 {original_count} 个文件中筛选出 {len(sac_files)} 个 Z 分量文件。")

    if not sac_files:
        raise ValueError("待合并的 SAC 文件列表为空（或筛选后为空）")

    all_traces: List = []
    trace_mapping: List[str] = []
    dt_us_ref: int | None = None
    max_npts: int = 0

    # Pass 1: Read all traces, check sample rate consistency, and find max npts
    for sac_path in sac_files:
        try:
            st = read(str(sac_path))
            for tr in st:
                dt_us = int(round(tr.stats.delta * 1_000_000))
                if dt_us_ref is None:
                    dt_us_ref = dt_us
                elif dt_us != dt_us_ref:
                    raise ValueError(
                        f"合并失败：采样率不一致\n"
                        f"{sac_path} 的 dt={dt_us}µs 与参考 dt={dt_us_ref}µs 不一致。请先统一重采样。"
                    )
                
                if tr.stats.npts > max_npts:
                    max_npts = tr.stats.npts
                
                all_traces.append(tr)
                trace_mapping.append(Path(sac_path).name)
        except Exception as e:
            print(f"警告：读取文件 {sac_path} 失败，已跳过。错误: {e}")

    if not all_traces:
        raise ValueError("未能读取到任何有效的 trace 数据。")

    # Pass 2: Pad traces to max_npts
    for tr in all_traces:
        if tr.stats.npts < max_npts:
            padding_needed = max_npts - tr.stats.npts
            # np.pad is the correct way to pad the numpy array in trace.data
            tr.data = np.pad(tr.data, (0, padding_needed), mode='constant', constant_values=0)
            tr.stats.npts = max_npts

    combined = Stream(traces=all_traces)
    max_samples = 32767

    if split_output_files:
        # 分割为多个 SEGY 文件：每个文件包含所有 SAC 的同一时间窗口
        # npts 已经通过 padding 保持一致
        delta = combined[0].stats.delta
        npts = max_npts
        part_index = 1
        
        # 验证所有道的采样点数是否一致
        npts_list = [tr.stats.npts for tr in combined]
        if len(set(npts_list)) > 1:
            print(f"警告: 各道的采样点数不一致: {npts_list}")
            print("  将使用最大采样点数进行分割，不足的部分用零填充")
        
        for start in range(0, npts, max_samples):
            end = min(start + max_samples, npts)
            st_part = Stream()
            for tr in combined:
                new_tr = tr.copy()
                # 确保数据长度一致
                if start < len(tr.data):
                    actual_end = min(end, len(tr.data))
                    new_tr.data = tr.data[start:actual_end].copy()
                    # 如果实际长度小于期望长度，用零填充
                    if len(new_tr.data) < (end - start):
                        padding_needed = (end - start) - len(new_tr.data)
                        new_tr.data = np.pad(new_tr.data, (0, padding_needed), mode='constant', constant_values=0)
                else:
                    # 如果起始位置超出数据范围，创建全零数据
                    new_tr.data = np.zeros(end - start, dtype=tr.data.dtype)
                
                new_tr.stats.starttime = tr.stats.starttime + start * delta
                new_tr.stats.npts = end - start
                st_part.append(new_tr)
            
            # 验证分割后各道的采样点数是否一致
            part_npts_list = [tr.stats.npts for tr in st_part]
            if len(set(part_npts_list)) > 1:
                print(f"警告: 分割文件 {part_index} 中各道的采样点数不一致: {part_npts_list}")
                # 强制调整为一致
                max_part_npts = max(part_npts_list)
                for tr in st_part:
                    if tr.stats.npts < max_part_npts:
                        padding_needed = max_part_npts - tr.stats.npts
                        tr.data = np.pad(tr.data, (0, padding_needed), mode='constant', constant_values=0)
                        tr.stats.npts = max_part_npts
            
            st_part = attach_basic_segy_headers(st_part)
            out_part = _with_stem_append(combined_segy_path, f"_part{str(part_index).zfill(3)}")
            if out_part.exists() and not overwrite:
                raise FileExistsError(f"输出文件已存在: {out_part}，使用 --overwrite 可覆盖")
            st_part.write(str(out_part), format="SEGY", data_encoding=5)
            part_index += 1
    else:
        if strict_one_trace_per_sac:
            # 严格模式：不允许分块，任何超过 32767 样本的 trace 直接报错
            for tr in combined:
                if int(tr.stats.npts) > max_samples:
                    raise ValueError(
                        "严格模式启用：存在样本点数超过 32767 的 SAC，无法保证一条 SAC 对应一条道。"
                        "请先重采样降低点数，或关闭 --strict-one-trace-per-sac。"
                    )
            combined = attach_basic_segy_headers(combined)
            combined.write(str(combined_segy_path), format="SEGY", data_encoding=5)
        else:
            # 默认模式：允许分块，确保写出成功（仍输出为一个 SEGY 文件，多道）
            combined = chunk_stream(combined)
            combined = attach_basic_segy_headers(combined)
            combined.write(str(combined_segy_path), format="SEGY", data_encoding=5)
    
    # 如果指定了映射文件路径，将映射信息写入文件
    if mapping_file_path:
        try:
            with open(mapping_file_path, 'w', encoding='utf-8') as f:
                f.write("SEGY文件道与原始SAC文件的映射关系:\r\n")
                f.write("=" * 50 + "\r\n")
                for i, (trace_num, sac_file) in enumerate(zip(range(1, len(trace_mapping) + 1), trace_mapping)):
                    f.write(f"道 {trace_num}: {sac_file}")
            print(f"映射信息已写入: {mapping_file_path}")
        except Exception as e:
            print(f"警告: 无法写入映射文件 {mapping_file_path}: {e}")
    
    return trace_mapping


def parse_args():
    parser = argparse.ArgumentParser(
        description="使用 ObsPy 将 SAC 数据格式转换为 SEG-Y。",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument("input", help="输入路径：SAC 文件或目录")
    parser.add_argument(
        "-o", "--out",
        help="输出文件路径（单文件输入或 --combine 模式必填）；对 SEGY 单文件超限将自动输出多文件",
    )
    parser.add_argument(
        "--outdir",
        default="segy_out",
        help="输出目录（批量转换时使用）",
    )
    parser.add_argument(
        "--format",
        default="SEGY",
        choices=["SEGY", "SEG2", "MSEED", "SAC", "WAV"],
        help="输出数据格式。SEG2 暂不支持写出（会提示错误）。",
    )
    parser.add_argument(
        "-r", "--recursive",
        action="store_true",
        help="目录批量转换时，递归搜索子目录"
    )
    parser.add_argument(
        "--combine",
        action="store_true",
        help="将目录内所有 SAC 合并为一个 SEG-Y（采样率与点数需一致）"
    )
    parser.add_argument(
        "--strict-one-trace-per-sac",
        action="store_true",
        help="合并模式严格保证一条 SAC 对应一条道（不分块）；超限时直接报错",
    )
    parser.add_argument(
        "--split-output-files",
        action="store_true",
        help="合并模式下按样本窗口分割为多个 SEGY 文件（每个文件包含所有 SAC 的同一时间窗口）",
    )
    parser.add_argument(
        "--overwrite",
        action="store_true",
        help="允许覆盖已存在的输出文件"
    )
    parser.add_argument(
        "--mapping-file",
        help="合并模式下，指定一个文件路径用于存储道与原始SAC文件的映射关系"
    )
    parser.add_argument(
        "--z-only",
        action="store_true",
        help="合并模式下，仅合并 Z 分量（文件名以 Z 结尾）"
    )
    return parser.parse_args()


def main():
    args = parse_args()
    input_path = Path(args.input)

    if input_path.is_file():
        default_suffix = ".segy" if args.format.upper() == "SEGY" else f".{args.format.lower()}"
        out_path = Path(args.out) if args.out else input_path.with_suffix(default_suffix)
        results = convert_single_sac_to_output(input_path, out_path, fmt=args.format, overwrite=args.overwrite)
        if len(results) == 1:
            print(f"已输出: {results[0]}")
        else:
            print(f"已输出 {len(results)} 个分割文件：")
            for p in results:
                print(f"  - {p}")
        return

    if input_path.is_dir():
        if args.combine:
            if not args.out:
                print("错误：--combine 模式需要指定 -o/--out 作为合并后的 SEG-Y 输出路径。", file=sys.stderr)
                sys.exit(2)
            sac_files = list_sac_files(input_path, args.recursive)
            if not sac_files:
                print(f"未在目录中找到 SAC 文件: {input_path}", file=sys.stderr)
                sys.exit(1)
            combined_out = Path(args.out)
            combine_sacs_to_single_segy(
                sac_files,
                combined_out,
                overwrite=args.overwrite,
                strict_one_trace_per_sac=args.strict_one_trace_per_sac,
                split_output_files=args.split_output_files,
                merge_z_only=args.z_only,
                mapping_file_path=Path(args.mapping_file) if args.mapping_file else None,
            )
            if args.split_output_files:
                # 由于为多文件输出，提示用户查看输出目录
                print(f"合并已按样本窗口分割输出多个文件：查看目录 {combined_out.parent}")
            else:
                print(f"已输出合并文件: {combined_out}")
            return
        else:
            outdir = Path(args.outdir)
            written = convert_directory_sac_to_segy(
                input_path, outdir, recursive=args.recursive, overwrite=args.overwrite, fmt=args.format
            )
            print(f"批量转换完成，共 {len(written)} 个文件。输出目录: {outdir}")
            return

    print(f"输入路径无效：{input_path}", file=sys.stderr)
    sys.exit(2)


if __name__ == "__main__":
    main()