import re
import math
import os
import csv
import json
from typing import List, Tuple, Optional, Dict, Any

# ------------------------ Tag frequency loader and utils ------------------------
_TAG_COUNTS = None
_TAG_STATS = None
_LAST_RUN_HEADER = ""
_CSV_FILENAME = "danbooru_tags_post_count.csv"
_STATS_CACHE_FILENAME = "tag_stats_cache.json"


def _csv_path():
    return os.path.join(os.path.dirname(__file__), _CSV_FILENAME)


def _stats_cache_path():
    return os.path.join(os.path.dirname(__file__), _STATS_CACHE_FILENAME)

def _load_tag_data(compute_stats: bool = False):
    """Lazy-load tag counts from CSV and optionally recompute stats.
    - If compute_stats is False (default), we only build the counts map and use the precomputed stats.
    - If compute_stats is True, we also recompute avg/min/max and log10 stats.
    Values are cached for subsequent calls.
    """
    global _TAG_COUNTS, _TAG_STATS, _LAST_RUN_HEADER

    # If we don't need fresh stats, try to use cache and avoid recomputing
    if not compute_stats:
        # Load counts if not yet loaded
        if _TAG_COUNTS is None:
            path = _csv_path()
            counts = {}
            if os.path.exists(path):
                with open(path, "r", encoding="utf-8", newline="") as f:
                    reader = csv.reader(f)
                    _ = next(reader, None)
                    for row in reader:
                        if not row or len(row) < 2:
                            continue
                        tag = row[0].strip()
                        try:
                            cnt = int(row[1])
                        except Exception:
                            continue
                        counts[tag] = cnt
                _TAG_COUNTS = counts
            else:
                print(f"[TagFrequencyWeighter] CSV file not found at: {path}")
                _TAG_COUNTS = {}

        # Try stats cache; if missing, compute once to initialize cache for future runs.
        cache_file = _stats_cache_path()
        if os.path.exists(cache_file):
            try:
                with open(cache_file, "r", encoding="utf-8") as f:
                    cached = json.load(f)
                # basic validation
                if all(k in cached for k in ("avg", "min", "max", "x_min", "x_max", "x_avg")):
                    _TAG_STATS = dict(cached)
                    _LAST_RUN_HEADER = (
                        f"[TagFrequencyWeighter] cache | tags={len(_TAG_COUNTS)} avg={_TAG_STATS['avg']:.2f} "
                        f"min={_TAG_STATS['min']} max={_TAG_STATS['max']} x_avg={_TAG_STATS['x_avg']:.3f}"
                    )
                    return
            except Exception as e:
                print(f"[TagFrequencyWeighter] Failed to read stats cache: {e}")

        # No valid cache: attempt to compute from available counts and persist
        if _TAG_COUNTS:
            total = 0
            total_log = 0.0
            n = 0
            min_count = None
            max_count = None
            for cnt in _TAG_COUNTS.values():
                try:
                    c = int(cnt)
                except Exception:
                    continue
                total += c
                v = max(c, 1)
                total_log += math.log10(v)
                n += 1
                if min_count is None or c < min_count:
                    min_count = c
                if max_count is None or c > max_count:
                    max_count = c
            if n > 0:
                avg = total / n
                x_avg = total_log / n
                x_min = math.log10(max(min_count or 1, 1))
                x_max = math.log10(max(max_count or 1, 1))
                _TAG_STATS = {
                    "avg": avg,
                    "min": int(min_count or 1),
                    "max": int(max_count or 1),
                    "x_min": float(x_min),
                    "x_max": float(x_max),
                    "x_avg": float(x_avg),
                }
            else:
                _TAG_STATS = {"avg": 0, "min": 0, "max": 0, "x_min": 0.0, "x_max": 0.0, "x_avg": 0.0}
            try:
                with open(cache_file, "w", encoding="utf-8") as f:
                    json.dump(_TAG_STATS, f)
                _LAST_RUN_HEADER = (
                    f"[TagFrequencyWeighter] init  | tags={len(_TAG_COUNTS)} avg={_TAG_STATS['avg']:.2f} "
                    f"min={_TAG_STATS['min']} max={_TAG_STATS['max']} x_avg={_TAG_STATS['x_avg']:.3f}"
                )
            except Exception as e:
                print(f"[TagFrequencyWeighter] Failed to write stats cache: {e}")
        else:
            _TAG_STATS = {"avg": 0, "min": 0, "max": 0, "x_min": 0.0, "x_max": 0.0, "x_avg": 0.0}
            _LAST_RUN_HEADER = "[TagFrequencyWeighter] no-stats | tags=0 (CSV missing) using zeros"
        return

    path = _csv_path()
    counts = {}

    # Fast path when CSV missing: keep empty counts and use cached stats if available
    if not os.path.exists(path):
        print(f"[TagFrequencyWeighter] CSV file not found at: {path}")
        _TAG_COUNTS = {}
        # Try cache even if CSV missing
        cache_file = _stats_cache_path()
        if os.path.exists(cache_file):
            try:
                with open(cache_file, "r", encoding="utf-8") as f:
                    cached = json.load(f)
                if all(k in cached for k in ("avg", "min", "max", "x_min", "x_max", "x_avg")):
                    _TAG_STATS = dict(cached)
                    _LAST_RUN_HEADER = (
                        f"[TagFrequencyWeighter] cache | tags={len(_TAG_COUNTS)} avg={_TAG_STATS['avg']:.2f} "
                        f"min={_TAG_STATS['min']} max={_TAG_STATS['max']} x_avg={_TAG_STATS['x_avg']:.3f}"
                    )
                    return
            except Exception as e:
                print(f"[TagFrequencyWeighter] Failed to read stats cache: {e}")
        _TAG_STATS = {"avg": 0, "min": 0, "max": 0, "x_min": 0.0, "x_max": 0.0, "x_avg": 0.0}
        return

    total = 0
    total_log = 0.0
    n = 0
    min_count = None
    max_count = None

    with open(path, "r", encoding="utf-8", newline="") as f:
        reader = csv.reader(f)
        _ = next(reader, None)  # header
        for row in reader:
            if not row or len(row) < 2:
                continue
            tag = row[0].strip()
            try:
                cnt = int(row[1])
            except Exception:
                continue
            counts[tag] = cnt
            if compute_stats:
                total += cnt
                v = max(cnt, 1)
                total_log += math.log10(v)
                n += 1
                if min_count is None or cnt < min_count:
                    min_count = cnt
                if max_count is None or cnt > max_count:
                    max_count = cnt

    _TAG_COUNTS = counts

    if compute_stats:
        if n == 0:
            _TAG_STATS = {"avg": 0, "x_avg": 0.0, "min": 0, "max": 0, "x_min": 0.0, "x_max": 0.0}
            # write empty stats to cache to reflect state
            try:
                with open(_stats_cache_path(), "w", encoding="utf-8") as f:
                    json.dump(_TAG_STATS, f)
            except Exception:
                pass
            return
        avg = total / n
        min_count_val = min_count if isinstance(min_count, int) else 1
        max_count_val = max_count if isinstance(max_count, int) else 1
        x_min = math.log10(max(min_count_val, 1))
        x_max = math.log10(max(max_count_val, 1))
        x_avg = total_log / n
        _TAG_STATS = {
            "avg": avg,
            "min": min_count_val,
            "max": max_count_val,
            "x_min": x_min,
            "x_max": x_max,
            "x_avg": x_avg,
        }
    # Save to cache
        try:
            with open(_stats_cache_path(), "w", encoding="utf-8") as f:
                json.dump(_TAG_STATS, f)
            _LAST_RUN_HEADER = (
                f"[TagFrequencyWeighter] recompute | tags={n} avg={avg:.2f} min={min_count_val} max={max_count_val} x_avg={x_avg:.3f}"
            )
            return
        except Exception as e:
            print(f"[TagFrequencyWeighter] Failed to write stats cache: {e}")
    else:
    # Use cached if available else zeros
        cache_file = _stats_cache_path()
        if os.path.exists(cache_file):
            try:
                with open(cache_file, "r", encoding="utf-8") as f:
                    cached = json.load(f)
                if all(k in cached for k in ("avg", "min", "max", "x_min", "x_max", "x_avg")):
                    _TAG_STATS = dict(cached)
                    _LAST_RUN_HEADER = (
                        f"[TagFrequencyWeighter] cache | tags={len(_TAG_COUNTS)} avg={_TAG_STATS['avg']:.2f} "
                        f"min={_TAG_STATS['min']} max={_TAG_STATS['max']} x_avg={_TAG_STATS['x_avg']:.3f}"
                    )
                    return
            except Exception as e:
                print(f"[TagFrequencyWeighter] Failed to read stats cache: {e}")
    _TAG_STATS = {"avg": 0, "min": 0, "max": 0, "x_min": 0.0, "x_max": 0.0, "x_avg": 0.0}
    _LAST_RUN_HEADER = "[TagFrequencyWeighter] no-stats | cache missing; using zeros"


 


def _format_tag_for_output(original: str, preserve_underscores: bool = False) -> str:
    # Prefer spaces in output for readability unless preserving underscores
    s = original.strip()
    return s if preserve_underscores else s.replace("_", " ")


def _clamp(v: float, lo: float, hi: float) -> float:
    return max(lo, min(hi, v))


def _compute_weight_from_x(
    x: float,
    x_min: float,
    x_max: float,
    x_pivot: float,
    min_w: float,
    max_w: float,
) -> float:
    """Piecewise linear around a pivot in log space and clamped to [min_w, max_w].
    The pivot maps to the midpoint mid_w = (min_w + max_w)/2 to ensure values spread across the full range,
    even when [min,max] doesn't include 1.0 (e.g., [2,5]).
    - x < x_pivot: interpolate from mid_w (at pivot) up to max_w (at x_min)
    - x > x_pivot: interpolate from mid_w (at pivot) down to min_w (at x_max)
    """
    lo, hi = (min(min_w, max_w), max(min_w, max_w))
    if x_min >= x_max:
        return (lo + hi) / 2.0

    mid_w = (lo + hi) / 2.0

    if x <= x_pivot:
        denom = (x_pivot - x_min)
        t = 0.0 if denom <= 0 else (x_pivot - x) / denom  # 0 at pivot, 1 at x_min
        w = mid_w + t * (hi - mid_w)
        return _clamp(w, lo, hi)
    else:
        denom = (x_max - x_pivot)
        t = 0.0 if denom <= 0 else (x - x_pivot) / denom  # 0 at pivot, 1 at x_max
        w = mid_w - t * (mid_w - lo)
        return _clamp(w, lo, hi)


def _weight_to_str(w: float) -> str:
    # 2 decimal places, trimmed
    s = f"{w:.2f}".rstrip("0").rstrip(".")
    return s or "1"


def _is_escaped(s: str, idx: int) -> bool:
    # Returns True if s[idx] is escaped by an odd number of backslashes immediately before it
    backslashes = 0
    j = idx - 1
    while j >= 0 and s[j] == '\\':
        backslashes += 1
        j -= 1
    return (backslashes % 2) == 1


def _extract_group(s: str, start: int) -> Tuple[str, int]:
    """Given s[start] == '(', return the full group text and end index after ')'.
    Respects nested parentheses and escapes. If no closing ')', returns from start to end.
    """
    depth = 0
    i = start
    while i < len(s):
        ch = s[i]
        if ch == '(' and not _is_escaped(s, i):
            depth += 1
        elif ch == ')' and not _is_escaped(s, i):
            depth -= 1
            if depth == 0:
                # include this ')'
                return s[start : i + 1], i + 1
        i += 1
    return s[start:], len(s)


def _normalize_lookup_key(tag: str) -> str:
    # Lowercase, strip, unescape \( \) for lookup, spaces -> underscores
    t = tag.strip()
    # unescape only parentheses for lookup
    t = t.replace("\\(", "(").replace("\\)", ")")
    return t.lower().replace(" ", "_")


class TagFrequencyWeighter:
    def __init__(self):
        # ensure counts are ready (lazy, use precomputed stats by default)
        _load_tag_data(compute_stats=False)

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "input_prompt": ("STRING", {"multiline": True, "default": "2girls, megumin, from above", "tooltip": "Prompt to reweight; commas/spacing preserved."}),
                # FLOAT sliders
                "min_value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1, "tooltip": "Lower bound for output weights; popular tags trend toward this."}),
                "max_value": ("FLOAT", {"default": 1.5, "min": 0.0, "max": 5.0, "step": 0.1, "tooltip": "Upper bound for output weights; rare tags trend toward this."}),
                # Single mode with pivoted scaling into [min,max]
                "calculate_average": ("BOOLEAN", {"default": False, "tooltip": "Recompute averages from CSV now and update cache."}),
                "preserve_underscores_in_output": ("BOOLEAN", {"default": False, "tooltip": "Keep underscores in wrapped tags (do not replace with spaces)."}),
                "pivot": (["log10(avg)", "x_avg"], {"default": "log10(avg)", "tooltip": "Neutral pivot (1.0): log10(avg) or mean of log10 counts (x_avg)."}),
                "ignore_below_tag_count": ("INT", {"default": 0, "min": 0, "max": 7000000, "tooltip": "Leave tags with counts below this unchanged."}),
                "ignore_above_tag_count": ("INT", {"default": 2000, "min": 0, "max": 7000000, "tooltip": "Leave tags with counts above this unchanged."}),
                "debug": ("BOOLEAN", {"default": False, "tooltip": "Print per-tag counts while processing (verbose)."}),
            }
        }

    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("output_prompt",)
    FUNCTION = "weight_tags"
    CATEGORY = "prompt"

    def weight_tags(self, input_prompt: str, min_value: float, max_value: float, calculate_average: bool, preserve_underscores_in_output: bool, pivot: str, ignore_below_tag_count: int, ignore_above_tag_count: int, debug: bool):
        # Optionally recompute global stats and cache them
        _load_tag_data(compute_stats=bool(calculate_average))

        # Clamp and order
        min_v = float(min_value)
        max_v = float(max_value)
        if min_v > max_v:
            min_v, max_v = max_v, min_v

        s = input_prompt
        n = len(s)
        i = 0
        elements: List[Dict[str, Any]] = []

        # Pass 1: parse into elements while preserving whitespace and existing groups
        while i < n:
            ch = s[i]
            if ch == '(' and not _is_escaped(s, i):
                group_text, j = _extract_group(s, i)
                elements.append({"type": "group", "text": group_text})
                i = j
                continue
            if ch == ',' and not _is_escaped(s, i):
                elements.append({"type": "comma", "text": ","})
                i += 1
                # capture following whitespace (spaces/tabs only)
                ws_start = i
                while i < n and s[i] in " \t":
                    i += 1
                if i > ws_start:
                    elements.append({"type": "ws", "text": s[ws_start:i]})
                continue
            if ch in " \t\r\n":
                # accumulate plain whitespace outside comma context
                ws_start = i
                while i < n and s[i] in " \t\r\n":
                    i += 1
                elements.append({"type": "ws", "text": s[ws_start:i]})
                continue
            # regular text token up to next comma or group start
            start = i
            while i < n:
                if s[i] == ',' and not _is_escaped(s, i):
                    break
                if s[i] == '(' and not _is_escaped(s, i):
                    break
                i += 1
            elements.append({"type": "text", "text": s[start:i]})

        # Compute per-prompt x_min/x_max for scaling
        stats = _TAG_STATS or {"avg": 1.0, "x_avg": 0.0, "x_min": 0.0, "x_max": 0.0}

        x_values: List[float] = []
        counts_map = _TAG_COUNTS or {}
        for idx, el in enumerate(elements):
            if el.get("type") == "text":
                token_text = el["text"]
                key = _normalize_lookup_key(token_text.strip())
                if not key:
                    continue
                cnt = counts_map.get(key)
                if cnt and cnt > 0:
                    x_values.append(math.log10(max(cnt, 1)))

        # fallback to global stats if no known tags outside groups
        if x_values:
            x_min_prompt = min(x_values)
            x_max_prompt = max(x_values)
        else:
            x_min_prompt = float(stats.get("x_min", 0.0))
            x_max_prompt = float(stats.get("x_max", 0.0))

        # Choose pivot from stats
        if pivot == "x_avg":
            x_pivot = float(stats.get("x_avg", 0.0))
        else:
            x_pivot = math.log10(max(float(stats.get("avg", 1.0)), 1.0))

        # Pass 2: build output using scaling based on prompt and preserving spaces
        out: List[str] = []
        modified = 0
        i = 0
        while i < len(elements):
            el = elements[i]
            typ = el["type"]
            if typ in ("group", "comma", "ws"):
                out.append(el["text"])
                i += 1
                continue

            # text element
            raw_text = el["text"]
            stripped = raw_text.strip()
            key = _normalize_lookup_key(stripped)
            cnt = counts_map.get(key)
            if debug and cnt is not None:
                print(f"[TagFrequencyWeighter][debug] tag='{stripped}' count={cnt}")

            # Detect if next element is a comma separator (to relocate into parens)
            next_is_comma = (i + 1 < len(elements) and elements[i + 1]["type"] == "comma")
            following_ws = ""
            if next_is_comma and (i + 2) < len(elements) and elements[i + 2]["type"] == "ws":
                following_ws = elements[i + 2]["text"]

            def append_original_text_with_sep():
                out.append(raw_text)
                if next_is_comma:
                    out.append(",")
                    if following_ws:
                        out.append(following_ws)

            if not stripped or cnt is None or cnt <= 0:
                # Unknown or empty: leave exactly as-is
                append_original_text_with_sep()
                # advance over comma and ws if present
                i += 1 + (1 if next_is_comma else 0) + (1 if next_is_comma and following_ws else 0)
                continue

            # Threshold-based ignoring: leave unmodified if outside desired count band
            if (ignore_below_tag_count and cnt < ignore_below_tag_count) or (
                ignore_above_tag_count and cnt > ignore_above_tag_count
            ):
                append_original_text_with_sep()
                i += 1 + (1 if next_is_comma else 0) + (1 if next_is_comma and following_ws else 0)
                continue

            x = math.log10(max(cnt, 1))
            w = _compute_weight_from_x(x, x_min_prompt, x_max_prompt, x_pivot, min_v, max_v)
            # If rounds to 1.0 at 2 decimals, do not modify the token at all
            if round(w, 2) == 1.0:
                append_original_text_with_sep()
                i += 1 + (1 if next_is_comma else 0) + (1 if next_is_comma and following_ws else 0)
                continue

            # Build wrapped tag, normalize formatting inside parentheses only
            tag_out = _format_tag_for_output(stripped, preserve_underscores_in_output)
            w_str = _weight_to_str(w)
            if next_is_comma:
                out.append(f"({tag_out},:{w_str})")
                if following_ws:
                    out.append(following_ws)
                # skip the comma and ws elements
                i += 3 if following_ws else 2
            else:
                out.append(f"({tag_out}:{w_str})")
                i += 1
            modified += 1

        output = ''.join(out)
        # Single consolidated line: last run header + modified count
        try:
            hdr = _LAST_RUN_HEADER or "[TagFrequencyWeighter]"
            print(f"{hdr} | Tags modified = {modified}")
        except Exception:
            pass
        return (output,)