#!/usr/bin/env python3
"""
HiNIC3 Documentation Coverage & Quality Checker

Functions:
 1. Map every .c source file to an expected .md design document according to existing naming convention.
 2. Report missing docs, duplicate docs, or docs under 100 lines (hard requirement from project spec).
 3. Optional (flag): scan glossary terms usage to find unused / unknown terms.

Usage:
  python tools/check_docs.py [--root .] [--glossary] [--fail-on-missing]

Exit Codes:
 0 success (no blocking issues unless --fail-on-missing used)
 2 missing docs (when --fail-on-missing)
 3 short docs (<100 lines) (when --fail-on-missing)
 4 unknown glossary anomalies (when --glossary and fail requested)

Conventions Encoded:
 * Top-level:  hinic3_xxx.c      -> docs/hinic3_xxx.md
 * hw/         hw/hinic3_xxx.c   -> docs/hw_hinic3_xxx.md
 * bond/       bond/hinic3_xxx.c -> docs/bond_hinic3_bond.md (prefix directory name + underscore)
 * cqm/        cqm/abc_xyz.c     -> docs/cqm_abc_xyz.md
 * For .c files already having a documentation file recorded in FILES_INDEX.md, we still verify existence & length.

Limitations: purely static; does not parse Kconfig to exclude unused objects.
"""
from __future__ import annotations
import argparse
import pathlib
import re
import sys
from typing import Dict, List, Tuple

MIN_DOC_LINES = 100
DOC_DIR = 'docs'
FILES_INDEX = 'docs/FILES_INDEX.md'
GLOSSARY = 'docs/GLOSSARY.md'

# Patterns considered code sources (exclude staging docs / generated)
SOURCE_SUFFIX = '.c'
EXCLUDE_DIRS = {'.git', 'Documentation', 'tools', 'scripts'}
EXCLUDE_FILES = set()

NAMING_RULE_HELP = {
    'top-level': 'hinic3_foo.c -> docs/hinic3_foo.md',
    'hw': 'hw/hinic3_bar.c -> docs/hw_hinic3_bar.md',
    'bond': 'bond/hinic3_bond.c -> docs/bond_hinic3_bond.md',
    'cqm': 'cqm/cqm_xxx.c -> docs/cqm_cqm_xxx.md',
}

GLOSSARY_TERM_RE = re.compile(r'^\|\s*([A-Za-z0-9_/\-\(\) ]+?)\s*\|')


def iter_sources(root: pathlib.Path) -> List[pathlib.Path]:
    items: List[pathlib.Path] = []
    for p in root.rglob('*' + SOURCE_SUFFIX):
        if any(part in EXCLUDE_DIRS for part in p.parts):
            continue
        if p.name.startswith('test_'):
            continue
        items.append(p)
    return items


def expected_doc_path(src: pathlib.Path, root: pathlib.Path) -> pathlib.Path:
    rel = src.relative_to(root)
    parts = rel.parts
    if parts[0] == 'hw':
        return root / DOC_DIR / ('hw_' + rel.stem + '.md')
    if parts[0] == 'bond':
        return root / DOC_DIR / ('bond_' + rel.stem + '.md')
    if parts[0] == 'cqm':
        return root / DOC_DIR / (f'cqm_{rel.stem}.md')
    # default top-level mapping
    return root / DOC_DIR / (rel.stem + '.md')


def count_lines(path: pathlib.Path) -> int:
    try:
        with path.open('r', encoding='utf-8', errors='ignore') as f:
            return sum(1 for _ in f)
    except FileNotFoundError:
        return -1


def load_files_index(root: pathlib.Path) -> List[str]:
    idx = root / FILES_INDEX
    if not idx.exists():
        return []
    lines = idx.read_text(encoding='utf-8', errors='ignore').splitlines()
    # naive extraction: collect all backticked file names in first column.
    srcs = []
    for line in lines:
        if line.strip().startswith('| `') and '.c`' in line:
            # e.g. | `hw/hinic3_api_cmd.c` | [hw_hinic3_api_cmd.md] | desc |
            try:
                fragment = line.split('|')[1].strip()
                if fragment.startswith('`'):
                    src_name = fragment.strip('` ')
                    srcs.append(src_name)
            except Exception:
                pass
    return srcs


def glossary_terms(root: pathlib.Path) -> List[str]:
    g = root / GLOSSARY
    if not g.exists():
        return []
    terms: List[str] = []
    capture = False
    for line in g.read_text(encoding='utf-8', errors='ignore').splitlines():
        if line.startswith('| 术语'):
            capture = True
            continue
        if capture:
            if line.startswith('|---'):
                continue
            if not line.startswith('|'):
                break
            m = GLOSSARY_TERM_RE.match(line)
            if m:
                raw = m.group(1).strip()
                # Skip composite headings / empties
                if raw and ' ' not in raw and raw.lower() != '术语':
                    terms.append(raw)
    return terms


def scan_terms_usage(root: pathlib.Path, terms: List[str]) -> Tuple[List[str], List[str]]:
    # naive: search each term as word boundary in docs only
    doc_paths = list((root / DOC_DIR).rglob('*.md'))
    content = '\n'.join(p.read_text(encoding='utf-8', errors='ignore') for p in doc_paths)
    unused = []
    import re
    for t in terms:
        # Skip very short ones to avoid noise
        if len(t) < 2:
            continue
        pattern = re.compile(r'\b' + re.escape(t) + r'\b')
        if not pattern.search(content):
            unused.append(t)
    # Unknown detection could parse docs, but needs reference list—omitted here.
    return unused, []


def main():
    ap = argparse.ArgumentParser()
    ap.add_argument('--root', default='.', help='Project root (hinic3 directory)')
    ap.add_argument('--glossary', action='store_true', help='Check glossary term usage')
    ap.add_argument('--fail-on-missing', action='store_true', help='Non-zero exit on missing/short docs')
    args = ap.parse_args()

    root = pathlib.Path(args.root).resolve()
    sources = iter_sources(root)
    index_list = set(load_files_index(root))

    missing: Dict[str, str] = {}
    short: Dict[str, int] = {}
    covered = 0

    for s in sources:
        rel = s.relative_to(root).as_posix()
        exp = expected_doc_path(s, root)
        exp_rel = exp.relative_to(root).as_posix()
        lines = count_lines(exp)
        if lines == -1:
            missing[rel] = exp_rel
        else:
            covered += 1
            if lines < MIN_DOC_LINES:
                short[exp_rel] = lines

    print('== Documentation Coverage Report ==')
    print(f'Total .c sources scanned : {len(sources)}')
    print(f'Documented (.md exists)   : {covered}')
    print(f'Missing docs              : {len(missing)}')
    print(f'Short docs (<{MIN_DOC_LINES})    : {len(short)}')

    if missing:
        print('\n-- Missing (source -> expected doc) --')
        for k, v in sorted(missing.items()):
            listed = 'IN_INDEX' if k in index_list else 'NOT_IN_INDEX'
            print(f'{k} -> {v} [{listed}]')

    if short:
        print('\n-- Too Short (doc -> lines) --')
        for k, v in sorted(short.items(), key=lambda x: x[1]):
            print(f'{k} : {v} lines')

    exit_code = 0
    if args.fail_on_missing and (missing or short):
        # Distinguish codes for CI if needed
        if missing:
            exit_code = 2
        if short:
            exit_code = 3 if exit_code == 0 else exit_code

    if args.glossary:
        terms = glossary_terms(root)
        if terms:
            unused, unknown = scan_terms_usage(root, terms)
            print('\n== Glossary Usage ==')
            print(f'Total terms: {len(terms)}  Unused terms: {len(unused)}')
            if unused:
                print('Unused:', ', '.join(sorted(unused)))
            if unknown:
                print('Unknown terms found:', ', '.join(sorted(unknown)))
                if args.fail_on_missing:
                    exit_code = 4 if exit_code == 0 else exit_code
        else:
            print('Glossary not found or empty.')

    if missing:
        print('\nSUGGESTION: Run templating to create missing docs, then fill to >=100 lines.')
    if short:
        print('SUGGESTION: Expand short docs with required sections (overview/flows/concurrency/errors/perf/faq).')

    print('\nNaming rules encoded:')
    for k, v in NAMING_RULE_HELP.items():
        print(f'  {k}: {v}')

    sys.exit(exit_code)


if __name__ == '__main__':
    main()
