#!/usr/bin/env python3
"""
Extract NESDIS PrepBUFR sequence definitions from a textual `prepobs_prep.bufrtable`
file and emit ecCodes-compatible overrides (sequence.def).

The generator focuses on Table-D descriptors used by the MWRI regional workflow.
It parses the table, resolves mnemonic references (including wildcard rows such as
`.DTH....`), and writes the resulting descriptor graph to `sequence.def`.  Element
definitions (Table-B) are assumed to be provided by the stock ecCodes bundle.
"""

from __future__ import annotations

import argparse
import re
from collections import defaultdict
from pathlib import Path
from typing import Dict, Iterable, List, Tuple


SEQUENCE_SECTION_HEADER = "| MNEMONIC | SEQUENCE"


def parse_table_rows(bufr_table: Path) -> Tuple[Dict[str, str], Dict[str, str], Dict[str, str]]:
    """
    Parse mnemonic → code mappings and Table-D sequence expansions from the NESDIS table.

    Returns:
        tuple(name_to_code, table_a_codes, sequences_raw)
    """
    name_to_code: Dict[str, str] = {}
    table_a_codes: Dict[str, str] = {}
    sequences_raw: Dict[str, str] = {}

    current_sequence: str | None = None
    in_sequence_section = False

    with bufr_table.open("r", encoding="ascii", errors="ignore") as fh:
        for raw_line in fh:
            line = raw_line.rstrip("\n")

            if line.startswith("| MNEMONIC | NUMBER"):
                in_sequence_section = False

            if line.startswith(SEQUENCE_SECTION_HEADER):
                in_sequence_section = True
                current_sequence = None
                continue

            if not line.startswith("|"):
                continue

            parts = [segment.strip() for segment in line.strip().strip("|").split("|")]
            if len(parts) < 2:
                continue

            mnemonic, second = parts[0], parts[1]

            if in_sequence_section:
                if mnemonic:
                    if mnemonic == current_sequence and current_sequence in sequences_raw:
                        sequences_raw[current_sequence] += " " + second
                    else:
                        current_sequence = mnemonic
                        sequences_raw[current_sequence] = second
                elif current_sequence:
                    sequences_raw[current_sequence] += " " + second
                continue

            # Table-A rows carry codes like A48102 that we later convert to 3-48-102.
            if second.startswith("A") and second[1:].isdigit():
                table_a_codes[mnemonic] = second

            # Table-B / Table-D rows expose numeric descriptor codes.
            if second and all(ch.isdigit() for ch in second):
                name_to_code[mnemonic] = second

    return name_to_code, table_a_codes, sequences_raw


def build_wildcard_index(name_to_code: Dict[str, str]) -> List[Tuple[re.Pattern, str]]:
    """
    Some rows (e.g., `.DTH....`) represent wildcard descriptors. Convert them to regex rules.
    """
    wildcard_entries: List[Tuple[re.Pattern, str]] = []
    for name, code in name_to_code.items():
        if "." not in name:
            continue
        regex = "^"
        i = 0
        while i < len(name):
            if name[i] == ".":
                j = i
                while j < len(name) and name[j] == ".":
                    j += 1
                count = j - i
                if count == 1 and (j >= len(name) or name[j] != "."):
                    regex += r"\."
                else:
                    regex += f".{{{count}}}"
                i = j
            else:
                regex += re.escape(name[i])
                i += 1
        regex += "$"
        wildcard_entries.append((re.compile(regex), code))
    return wildcard_entries


def resolve_code(token: str, name_to_code: Dict[str, str], wildcard_index: List[Tuple[re.Pattern, str]]) -> str:
    """
    Map a mnemonic token to its descriptor code, accounting for wildcard patterns.
    """
    if token.isdigit():
        return token.zfill(6)

    if token in name_to_code:
        return name_to_code[token]

    for pattern, code in wildcard_index:
        if pattern.match(token):
            return code

    raise KeyError(f"Unmapped descriptor token '{token}'")


def tokenize_sequence(raw: str) -> Iterable[str]:
    """
    Split a Table-D sequence string into mnemonic tokens, stripping replication braces.
    """
    sanitized = (
        raw.replace("{", " ")
        .replace("}", " ")
        .replace("<", " ")
        .replace(">", " ")
        .replace("[", " ")
        .replace("]", " ")
    )
    for token in sanitized.split():
        yield token


def format_sequence(code: str, entries: List[str]) -> str:
    """
    Format a sequence definition line in ecCodes syntax.
    """
    values = [f"{int(entry):06d}" for entry in entries]
    chunks: List[str] = []
    line: List[str] = []
    for idx, value in enumerate(values, start=1):
        line.append(value)
        if idx % 6 == 0 and idx != len(values):
            chunks.append(", ".join(line))
            line = []
    if line:
        chunks.append(", ".join(line))
    if len(chunks) == 1:
        body = chunks[0]
    else:
        body = ",\n              ".join(chunks)
    return f"\"{int(code):06d}\" = [  {body} ]\n"


def derive_sequence_codes(
    name_to_code: Dict[str, str],
    table_a_codes: Dict[str, str],
    sequences_raw: Dict[str, str],
) -> Dict[str, List[str]]:
    """
    Build descriptor lists for all Table-D entries referenced by the MWRI workflow.
    """
    wildcard_index = build_wildcard_index(name_to_code)
    target_sequences: Dict[str, List[str]] = {}

    # Include sequence definitions for any mnemonic with an explicit numeric code (3-xx-xxx).
    candidate_names = {
        name
        for name, code in name_to_code.items()
        if len(code) == 6 and code.startswith("3") and name in sequences_raw
    }
    # Extend with Table-A driven sequences (A481xx -> 3-48-xxx).
    for name, acode in table_a_codes.items():
        if name in sequences_raw:
            candidate_names.add(name)
            name_to_code.setdefault(name, f"3{acode[1:]}")

    for mnemonic in sorted(candidate_names):
        descriptor_code = name_to_code[mnemonic]
        raw = sequences_raw[mnemonic]
        tokens = list(tokenize_sequence(raw))
        entries = [resolve_code(token, name_to_code, wildcard_index) for token in tokens]
        target_sequences[descriptor_code] = entries

    return target_sequences


def parse_existing_sequences(path: Path) -> Dict[str, List[str]]:
    """
    Load sequence definitions from an existing ecCodes sequence.def file.
    """
    if not path.exists():
        return {}

    sequences: Dict[str, List[str]] = {}
    current_code: str | None = None
    buffer: List[str] = []

    with path.open("r", encoding="ascii", errors="ignore") as fh:
        for raw_line in fh:
            line = raw_line.strip()
            if not line or line.startswith("#"):
                continue
            if line.startswith('"'):
                if current_code is not None and buffer:
                    entries = [token.zfill(6) for token in re.findall(r"\d+", " ".join(buffer))]
                    sequences[current_code] = entries
                    buffer = []
                match = re.match(r'"(\d+)"\s*=\s*\[\s*(.*)', line)
                if not match:
                    continue
                current_code = match.group(1).zfill(6)
                remainder = match.group(2)
                if remainder.endswith("]"):
                    entries = [token.zfill(6) for token in re.findall(r"\d+", remainder)]
                    sequences[current_code] = entries
                    current_code = None
                    buffer = []
                else:
                    buffer.append(remainder)
            elif current_code is not None:
                if line.endswith("]"):
                    buffer.append(line)
                    entries = [token.zfill(6) for token in re.findall(r"\d+", " ".join(buffer))]
                    sequences[current_code] = entries
                    current_code = None
                    buffer = []
                else:
                    buffer.append(line)

    if current_code is not None and buffer:
        entries = [token.zfill(6) for token in re.findall(r"\d+", " ".join(buffer))]
        sequences[current_code] = entries

    return sequences


def write_sequence_def(sequence_map: Dict[str, List[str]], output_path: Path) -> None:
    """
    Emit the consolidated sequence.def file.
    """
    lines = [
        "# Auto-generated from prepobs_prep.bufrtable via generate_prepbufr_sequence_overlay.py",
    ]
    for code in sorted(sequence_map, key=lambda x: int(x)):
        lines.append(format_sequence(code, sequence_map[code]).rstrip())
    output_path.write_text("\n".join(lines) + "\n", encoding="ascii")


def main() -> None:
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("bufr_table", type=Path, help="Path to prepobs_prep.bufrtable")
    parser.add_argument(
        "output_dir",
        type=Path,
        help="Destination directory for sequence.def (created if absent)",
    )
    parser.add_argument(
        "--base-sequence",
        type=Path,
        default=None,
        help="Optional existing sequence.def to merge with (e.g., upstream WMO table 36)",
    )
    args = parser.parse_args()

    name_to_code, table_a_codes, sequences_raw = parse_table_rows(args.bufr_table)
    sequence_map = derive_sequence_codes(name_to_code, table_a_codes, sequences_raw)

    if args.base_sequence is not None:
        base_sequences = parse_existing_sequences(args.base_sequence)
        base_sequences.update(sequence_map)
        sequence_map = base_sequences

    args.output_dir.mkdir(parents=True, exist_ok=True)
    sequence_path = args.output_dir / "sequence.def"
    write_sequence_def(sequence_map, sequence_path)
    print(f"Wrote {sequence_path} ({len(sequence_map)} sequences)")


if __name__ == "__main__":
    main()
