from __future__ import annotations

from dataclasses import dataclass
from typing import Dict, List, Tuple, Optional, Any
import html
import re


@dataclass
class ValidationReport:
    ok: bool
    summary: str
    mismatches: List[Tuple[str, int, int]]  # (table, expected, actual)
    details: str = ""
    kind: str = ""


def validate_counts(db_executor, expected_rows_per_table: Dict[str, int]) -> ValidationReport:
    tables = list(expected_rows_per_table.keys())
    actual = db_executor.fetch_table_row_counts(tables)
    actual_l = { (k.lower() if isinstance(k, str) else k): v for k, v in (actual or {}).items() }
    mismatches: List[Tuple[str, int, int]] = []
    rows_fmt: List[List[str]] = []
    for t in tables:
        tl = t.lower()
        exp = int(expected_rows_per_table.get(t, 0))
        act = int(actual_l.get(tl, -1))
        rows_fmt.append([t, str(exp), str(act)])
        if exp != act:
            mismatches.append((t, exp, act))
    ok = len(mismatches) == 0
    header = ["Table", "COUNT(mysql)", "COUNT(kwdb)"]
    table_lines = _format_columns_table(header, rows_fmt)
    summary = "Count(*) validation passed" if ok else "Row count mismatches detected"
    return ValidationReport(ok=ok, summary=summary, mismatches=mismatches, details="\n".join(table_lines), kind="count(*)校验")


def render_report(reports: List[ValidationReport]) -> str:
    parts: List[str] = []
    for r in reports:
        parts.append(r.summary)
        if r.details:
            parts.append(r.details)
    return "\n".join(parts)

def render_report_html(reports: List[ValidationReport], title: str = "Migration Validation Report") -> str:
    body_parts: List[str] = [f"<h1>{html.escape(title)}</h1>"]
    for r in reports:
        status = "PASS" if r.ok else "FAIL"
        body_parts.append(f"<h2>Status: {status}</h2>")
        body_parts.append("<pre>" + html.escape(r.summary) + "</pre>")
        if r.mismatches:
            body_parts.append("<table border=1 cellpadding=4 cellspacing=0><tr><th>Table</th><th>Expected</th><th>Actual</th></tr>")
            for t, e, a in r.mismatches:
                body_parts.append(f"<tr><td>{html.escape(t)}</td><td>{e}</td><td>{a}</td></tr>")
            body_parts.append("</table>")
        if r.details:
            body_parts.append("<pre>" + html.escape(r.details) + "</pre>")
    return "<html><head><meta charset='utf-8'><title>{}</title></head><body>{}</body></html>".format(
        html.escape(title), "\n".join(body_parts)
    )

def render_report_text(reports: List[ValidationReport], timings: Optional[Dict[str, float]] = None) -> str:
    lines: List[str] = []
    lines.append("============================================================")
    lines.append("迁移验证报告 (Migration Validation Report)")
    lines.append("============================================================")
    if timings:
        lines.append("阶段用时 (Stage Timings):")
        if 'conversion_time' in timings:
            lines.append(f"  - 语法转换: {timings['conversion_time']:.2f}s")
        if 'ddl_time' in timings:
            lines.append(f"  - 架构(DDL): {timings['ddl_time']:.2f}s")
        if 'insert_exec_time' in timings:
            lines.append(f"  - 数据(INSERT): {timings['insert_exec_time']:.2f}s")
        if 'constraints_time' in timings:
            lines.append(f"  - 约束与索引: {timings['constraints_time']:.2f}s")
        if 'total_time' in timings:
            lines.append(f"  - 整体迁移总用时: {timings['total_time']:.2f}s")
        lines.append("")
    for r in reports:
        status = "通过" if r.ok else "失败"
        kind = f" - 验证类型: {r.kind}" if r.kind else ""
        lines.append(f"[校验结果] {status}{kind}")
        lines.append(r.summary)
        if r.mismatches:
            lines.append("差异明细:")
            for t, e, a in r.mismatches:
                lines.append(f"  - 表 {t}: 期望 {e}, 实际 {a}")
        if r.details:
            lines.append(r.details)
        lines.append("")
    return "\n".join(lines)


def _format_columns_table(headers: List[str], rows: List[List[str]]) -> List[str]:
    """Return a simple padded text table with aligned columns using spaces."""
    if not headers:
        return []
    cols = len(headers)
    norm_rows: List[List[str]] = []
    for r in rows:
        rr = (r + [""] * cols)[:cols]
        norm_rows.append([str(c) if c is not None else "" for c in rr])
    widths = [len(str(h)) for h in headers]
    for r in norm_rows:
        for i in range(cols):
            widths[i] = max(widths[i], len(r[i]))
    def pad(s: str, w: int) -> str:
        return s + (" " * (w - len(s)))
    header_line = " | ".join(pad(str(headers[i]), widths[i]) for i in range(cols))
    sep_line = "-+-".join("-" * widths[i] for i in range(cols))
    body_lines = [
        " | ".join(pad(norm_rows[r][c], widths[c]) for c in range(cols))
        for r in range(len(norm_rows))
    ]
    return [header_line, sep_line, *body_lines]

def validate_structure(db_executor) -> ValidationReport:
    # Basic structure sanity on target: invalid indexes and FK presence
    details: List[str] = []
    ok = True
    try:
        invalid_idx = db_executor.fetch_scalar("SELECT COUNT(*) FROM pg_catalog.pg_index WHERE NOT indisvalid")
        if invalid_idx and int(invalid_idx) > 0:
            ok = False
            details.append(f"Invalid indexes: {int(invalid_idx)}")
    except Exception as e:
        details.append(f"Structure check (indexes) skipped: {e}")
    try:
        fk_cnt = db_executor.fetch_scalar("SELECT COUNT(*) FROM information_schema.referential_constraints")
        details.append(f"Foreign keys present: {int(fk_cnt or 0)}")
    except Exception as e:
        details.append(f"Structure check (FK) skipped: {e}")
    return ValidationReport(ok=ok, summary=("Structure validation passed" if ok else "Structure issues found"), mismatches=[], details="\n".join(details), kind="structure")



# ---- Source (MySQL dump) parsing + validations  ----

def parse_mysql_source(sql_text: str) -> Dict[str, Dict[str, int | str]]:
    meta: Dict[str, Dict[str, int | str]] = {}
    # Canonical table name map: lower -> original-case (from CREATE TABLE)
    canon: Dict[str, str] = {}
    text = sql_text
    # Expand MySQL conditional comments /*! ... */ to keep inner content
    text_expanded = re.sub(r"/\*!\d*\s*(.*?)\*/", r" \1 ", text, flags=re.DOTALL)
    # Database name (prefer backticked identifier)
    mdb = re.search(r"(?is)CREATE\s+DATABASE\s+(?:IF\s+NOT\s+EXISTS\s+)?`([A-Za-z0-9_]+)`", text_expanded)
    if not mdb:
        mdb = re.search(r"(?is)CREATE\s+DATABASE\s+(?:IF\s+NOT\s+EXISTS\s+)?([A-Za-z0-9_]+)\b", text_expanded)
    if mdb:
        meta.setdefault('__database__', {})['name'] = mdb.group(1)
    # CREATE TABLE: count indexes (exclude PRIMARY KEY) and FKs (preserve original case)
    for m in re.finditer(r"(?is)CREATE\s+TABLE\s+`?([A-Za-z0-9_\.]+)`?\s*\((.*?)\)\s*;", text_expanded):
        table = (m.group(1) or '').strip().strip('`"')
        if not table:
            continue
        table_l = table.lower()
        body = m.group(2)
        idx = 0
        for _ in re.finditer(r"(?im)^\s*(UNIQUE\s+KEY|UNIQUE\s+INDEX|KEY|INDEX)\b", body):
            idx += 1
        fks = 0
        for _ in re.finditer(r"(?im)\bCONSTRAINT\s+`?[^`\s]+`?\s+FOREIGN\s+KEY\b|\bFOREIGN\s+KEY\b", body):
            fks += 1
        # AUTO_INCREMENT columns imply sequences in target (approximate)
        # Count only when AUTO_INCREMENT appears within a column definition line
        # e.g., `id` INT NOT NULL AUTO_INCREMENT,
        auto_inc_cols = 0
        for _ in re.finditer(r"(?im)^\s*`[^`]+`\s+[^,]*\bAUTO_INCREMENT\b", body):
            auto_inc_cols += 1
        meta.setdefault(table, {})
        meta[table]['indexes'] = int(idx)
        meta[table]['fks'] = int(fks)
        meta[table]['sequences'] = int(auto_inc_cols)
        # Record canonical name for later INSERT normalization
        canon.setdefault(table_l, table)
    # INSERT INTO: count VALUES tuples and collect values text for md5
    table_values_concat: Dict[str, List[str]] = {}
    for im in re.finditer(r"(?is)INSERT\s+INTO\s+`?([A-Za-z0-9_\.]+)`?\b.*?\bVALUES\b\s*(\(.*?\))\s*;", text):
        raw_tbl = im.group(1)
        key = canon.get((raw_tbl or '').strip('`"').lower(), (raw_tbl or '').strip('`"'))
        values_block = im.group(2)
        tuples = re.split(r"\)\s*,\s*\(", values_block.strip()[1:-1]) if values_block else []
        prev = int(meta.get(key, {}).get('rows', 0))
        meta.setdefault(key, {})
        meta[key]['rows'] = prev + len(tuples)
        table_values_concat.setdefault(key, []).extend(tuples)

    return meta


def parse_kwdb_source(ddl_sql: str, insert_sql: str, constraints_sql: str = "") -> Dict[str, Dict[str, int | str]]:
    """
    Parse KWDB SQL files (DDL, INSERT, Constraints) to extract metadata for validation.
    Similar to parse_mysql_source but adapted for KWDB SQL format.
    
    Args:
        ddl_sql: DDL SQL text (CREATE TABLE statements)
        insert_sql: INSERT SQL text
        constraints_sql: Constraints SQL text (CREATE INDEX, ALTER TABLE ADD CONSTRAINT)
    
    Returns:
        Dictionary with table metadata (same format as parse_mysql_source)
    """
    meta: Dict[str, Dict[str, int | str]] = {}
    # Canonical table name map: lower -> original-case (from CREATE TABLE)
    canon: Dict[str, str] = {}
    
    # Parse DDL: CREATE TABLE statements
    for m in re.finditer(r"(?is)CREATE\s+TABLE\s+[`\"]?([A-Za-z0-9_\.]+)[`\"]?\s*\((.*?)\)\s*;", ddl_sql):
        table = (m.group(1) or '').strip().strip('`"')
        if not table:
            continue
        table_l = table.lower()
        body = m.group(2)
        # Count indexes defined inline in CREATE TABLE (INDEX ...)
        idx = 0
        for _ in re.finditer(r"(?im)\bINDEX\s+[`\"]?[A-Za-z0-9_]+[`\"]?\s*\(", body):
            idx += 1
        # Count foreign keys defined inline
        fks = 0
        for _ in re.finditer(r"(?im)\bFOREIGN\s+KEY\b", body):
            fks += 1
        # Count sequences (CREATE SEQUENCE statements, not in CREATE TABLE)
        # Sequences are counted separately in constraints parsing
        meta.setdefault(table, {})
        meta[table]['indexes'] = int(idx)
        meta[table]['fks'] = int(fks)
        meta[table]['sequences'] = 0  # Will be updated from constraints
        # Record canonical name for later INSERT normalization
        canon.setdefault(table_l, table)
    
    # Parse Constraints: CREATE INDEX and ALTER TABLE ADD CONSTRAINT
    # Count CREATE INDEX statements
    for m in re.finditer(r"(?is)CREATE\s+(?:UNIQUE\s+)?INDEX\s+[`\"]?[A-Za-z0-9_]+[`\"]?\s+ON\s+[`\"]?([A-Za-z0-9_\.]+)[`\"]?", constraints_sql):
        table = (m.group(1) or '').strip().strip('`"')
        if table:
            table_l = table.lower()
            key = canon.get(table_l, table)
            meta.setdefault(key, {})
            meta[key]['indexes'] = meta[key].get('indexes', 0) + 1
    
    # Count ALTER TABLE ADD CONSTRAINT FOREIGN KEY
    for m in re.finditer(r"(?is)ALTER\s+TABLE\s+[`\"]?([A-Za-z0-9_\.]+)[`\"]?\s+ADD\s+CONSTRAINT\s+[`\"]?[A-Za-z0-9_]+[`\"]?\s+FOREIGN\s+KEY", constraints_sql):
        table = (m.group(1) or '').strip().strip('`"')
        if table:
            table_l = table.lower()
            key = canon.get(table_l, table)
            meta.setdefault(key, {})
            meta[key]['fks'] = meta[key].get('fks', 0) + 1
    
    # Count CREATE SEQUENCE statements
    for m in re.finditer(r"(?is)CREATE\s+SEQUENCE\s+([A-Za-z0-9_]+)_seq", ddl_sql + constraints_sql):
        seq_name = m.group(1)
        # Try to find corresponding table (sequence name is usually table_column_seq)
        # For simplicity, we'll count all sequences and assign to a generic counter
        # This is an approximation
        pass  # Sequences are harder to map to tables, skip for now
    
    # Parse INSERT statements: count VALUES tuples
    for im in re.finditer(r"(?is)INSERT\s+INTO\s+[`\"]?([A-Za-z0-9_\.]+)[`\"]?\b.*?\bVALUES\b\s*(\(.*?\))\s*;", insert_sql):
        raw_tbl = im.group(1)
        key = canon.get((raw_tbl or '').strip('`"').lower(), (raw_tbl or '').strip('`"'))
        values_block = im.group(2)
        # Count tuples by counting opening parentheses at top level
        # Simple approach: count '(' that are not inside strings
        tuples = re.split(r"\)\s*,\s*\(", values_block.strip()[1:-1]) if values_block else []
        prev = int(meta.get(key, {}).get('rows', 0))
        meta.setdefault(key, {})
        meta[key]['rows'] = prev + len(tuples)
    
    return meta


def validate_structure_against_source(db_executor, source_meta: Dict[str, Dict[str, int | str]]) -> ValidationReport:
    details: List[str] = []
    mismatches: List[Tuple[str, int, int]] = []
    ok = True
    # Database name check (do not fail on db name difference; record only)
    try:
        tgt_db = db_executor.fetch_scalar("SELECT current_database()")
        src_db = source_meta.get('__database__', {}).get('name')
        if src_db:
            details.append(f"database: mysql={src_db}, kwdb={tgt_db}")
    except Exception:
        pass
    # Table existence set (current schema via pg_class/pg_namespace, include ordinary and partitioned tables)
    try:
        rows = db_executor.fetch_all(
            "SELECT lower(t.relname) AS table_name "
            "FROM pg_class t JOIN pg_namespace n ON n.oid = t.relnamespace "
            "WHERE n.nspname = current_schema() AND t.relkind IN ('r','p')"
        )
        tgt_tables = set()
        for r in (rows or []):
            if isinstance(r, dict):
                val = r.get('table_name') or next(iter(r.values()), None)
            elif isinstance(r, (list, tuple)):
                val = r[0] if r else None
            else:
                val = str(r)
            if isinstance(val, str) and val:
                tgt_tables.add(val)
    except Exception:
        tgt_tables = set()
    src_tables = {t for t in source_meta.keys() if t != '__database__'}
    # Extra tables: do not fail, record only (best-effort)
    try:
        extra = sorted(list(tgt_tables - src_tables))
        if extra:
            details.append(f"Extra tables in target: {', '.join(extra)}")
    except Exception:
        pass
    table_rows: List[Tuple[str, str, int, Optional[int], int, Optional[int], int, Optional[int]]] = []
    for table in sorted(source_meta.keys()):
        if table == '__database__':
            continue
        table_l = table.lower()
        # Existence check via pg_class/pg_namespace to avoid information_schema inconsistencies
        try:
            exists_val = db_executor.fetch_scalar(
                f"SELECT EXISTS (SELECT 1 FROM pg_class t JOIN pg_namespace n ON n.oid=t.relnamespace "
                f"WHERE n.nspname = current_schema() AND lower(t.relname) = '{table_l}' AND t.relkind IN ('r','p'))"
            )
            exists_bool = bool(exists_val) if exists_val is not None else False
        except Exception:
            exists_bool = (table_l in tgt_tables)
        # Target index count (exclude primary) using pg_indexes
        try:
            tgt_idx = db_executor.fetch_scalar(
                "SELECT COUNT(*) FROM pg_class c "
                "JOIN pg_index i ON i.indexrelid=c.oid "
                "JOIN pg_class t ON t.oid=i.indrelid "
                "JOIN pg_namespace n ON n.oid=t.relnamespace "
                f"WHERE n.nspname = current_schema() AND lower(t.relname) = '{table_l}' AND NOT i.indisprimary"
            )
        except Exception:
            tgt_idx = None
        # Target FK count per table
        try:
            tgt_fk = db_executor.fetch_scalar(
                f"SELECT COUNT(*) FROM information_schema.table_constraints WHERE constraint_type = 'FOREIGN KEY' AND table_schema = current_schema() AND lower(table_name) = '{table_l}'"
            )
        except Exception:
            tgt_fk = None
        # Target sequence count per table (approximate by sequences named like table_%_seq) — details only
        try:
            tgt_seq = db_executor.fetch_scalar(
                f"SELECT COUNT(*) FROM information_schema.sequences WHERE sequence_schema = current_schema() AND lower(sequence_name) LIKE '{table_l}_%_seq'"
            )
        except Exception:
            tgt_seq = None
        src_idx = int(source_meta.get(table, {}).get('indexes', 0) or 0)
        src_fk = int(source_meta.get(table, {}).get('fks', 0) or 0)
        src_seq = int(source_meta.get(table, {}).get('sequences', 0) or 0)

        # Record existence mismatch if table truly not found
        if not exists_bool:
            ok = False
            mismatches.append((f"{table} exists", 1, 0))
        tgt_idx_i = int(tgt_idx) if tgt_idx is not None else None
        if tgt_idx_i is not None and tgt_idx_i != src_idx:
            ok = False
            mismatches.append((f"{table} indexes", src_idx, tgt_idx_i))
        tgt_fk_i = int(tgt_fk) if tgt_fk is not None else None
        if tgt_fk_i is not None and tgt_fk_i != src_fk:
            ok = False
            mismatches.append((f"{table} fks", src_fk, tgt_fk_i))
        tgt_seq_i = int(tgt_seq) if tgt_seq is not None else None
        # Sequences: now enforce equality like indexes/FKs
        if tgt_seq_i is not None and tgt_seq_i != src_seq:
            ok = False
            mismatches.append((f"{table} sequences", src_seq, tgt_seq_i))
        table_rows.append((
            table_l,
            'Yes' if exists_bool else 'No',
            src_idx, tgt_idx_i,
            src_fk, tgt_fk_i,
            src_seq, tgt_seq_i,
        ))
    # Build a readable table for structure comparison
    header = ["Table", "Exists(kwdb)", "IDX(mysql)", "IDX(kwdb)", "FK(mysql)", "FK(kwdb)", "SEQ(mysql)", "SEQ(kwdb)"]
    rows_fmt: List[List[str]] = []
    for t, ex, isrc, itgt, fsrc, ftgt, ssrc, stgt in table_rows:
        rows_fmt.append([
            t,
            ex,
            str(isrc),
            ("?" if itgt is None else str(itgt)),
            str(fsrc),
            ("?" if ftgt is None else str(ftgt)),
            str(ssrc),
            ("?" if stgt is None else str(stgt)),
        ])
    table_lines = _format_columns_table(header, rows_fmt)
    lines_tbl: List[str] = []
    if details:
        lines_tbl.extend(details)
    lines_tbl.extend(table_lines)
    summary = "Structure validation (mysql vs kwdb) passed" if ok else "Structure mismatches detected (mysql vs kwdb)"
    return ValidationReport(ok=ok, summary=summary, mismatches=mismatches, details="\n".join(lines_tbl), kind="structure")


def validate_counts_against_source(db_executor, source_meta: Dict[str, Dict[str, int | str]]) -> ValidationReport:
    # Use exact table names to respect case sensitivity (quoted identifiers)
    tables = sorted([t for t in source_meta.keys() if t != '__database__'])
    actual = db_executor.fetch_table_row_counts(tables)
    mismatches: List[Tuple[str, int, int]] = []
    rows_fmt: List[List[str]] = []
    for t in tables:
        exp = int(source_meta.get(t, {}).get('rows', 0) or 0)
        act = int((actual or {}).get(t, -1))
        rows_fmt.append([t, str(exp), str(act)])
        if exp != act:
            mismatches.append((t, exp, act))
    ok = len(mismatches) == 0
    header = ["Table", "COUNT(mysql)", "COUNT(kwdb)"]
    table_lines = _format_columns_table(header, rows_fmt)
    summary = "Count(*) validation (mysql vs kwdb) passed" if ok else "Row count mismatches detected (mysql vs kwdb)"
    return ValidationReport(ok=ok, summary=summary, mismatches=mismatches, details="\n".join(table_lines), kind="count(*)校验")


