import os
import sys
import json
import math
from dataclasses import dataclass, asdict
from typing import List, Tuple, Optional, Dict, Any, Union

import cv2
import numpy as np


def imread_unicode(path: str, flags: int = cv2.IMREAD_COLOR) -> Optional[np.ndarray]:
    try:
        data = np.fromfile(path, dtype=np.uint8)
        if data.size == 0:
            return None
        img = cv2.imdecode(data, flags)
        return img
    except Exception:
        return None


@dataclass
class AssetMatch:
    name: str
    path: str
    x: int
    y: int
    width: int
    height: int
    score: float
    z_index: int = 0
    matched: bool = True
    scale: float = 1.0


@dataclass
class OcrText:
    text: str
    left: int
    top: int
    right: int
    bottom: int
    color_rgb: Tuple[int, int, int]
    font_size_px: int


def read_image_bgr_with_alpha(path: str) -> Tuple[np.ndarray, Optional[np.ndarray]]:
    image = imread_unicode(path, cv2.IMREAD_UNCHANGED)
    if image is None:
        raise FileNotFoundError(f"Failed to read image: {path}")
    if image.ndim == 2:
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        return image, None
    if image.shape[2] == 4:
        bgr = image[:, :, :3]
        alpha = image[:, :, 3]
        return bgr, alpha
    return image, None


def load_effect_image(effect_path: str) -> np.ndarray:
    effect = imread_unicode(effect_path, cv2.IMREAD_COLOR)
    if effect is None:
        raise FileNotFoundError(f"Failed to read effect image: {effect_path}")
    return effect


def list_asset_images(assets_dir: str) -> List[Tuple[str, str]]:
    results: List[Tuple[str, str]] = []
    for fname in os.listdir(assets_dir):
        lower = fname.lower()
        if lower.endswith((".png", ".jpg", ".jpeg", ".bmp")):
            results.append((fname, os.path.join(assets_dir, fname)))
    return results


def match_single_template(effect: np.ndarray, template_bgr: np.ndarray, mask: Optional[np.ndarray]) -> Tuple[int, int, float]:
    # Prefer masked SQDIFF when mask available; otherwise use CCOEFF_NORMED
    if mask is not None:
        method = cv2.TM_SQDIFF_NORMED
        res = cv2.matchTemplate(effect, template_bgr, method, mask=mask)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        top_left = min_loc
        score = 1.0 - float(min_val)  # higher is better
        return int(top_left[0]), int(top_left[1]), score
    else:
        method = cv2.TM_CCOEFF_NORMED
        res = cv2.matchTemplate(effect, template_bgr, method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        top_left = max_loc
        return int(top_left[0]), int(top_left[1]), float(max_val)


def match_template_multiscale(effect: np.ndarray, template_bgr: np.ndarray, mask: Optional[np.ndarray], scales: List[float]) -> Tuple[int, int, float, float, int, int]:
    best_score = -1.0
    best = (0, 0, 1.0, template_bgr.shape[1], template_bgr.shape[0])
    for s in scales:
        if abs(s - 1.0) < 1e-6:
            tb = template_bgr
            mk = mask
        else:
            new_w = max(1, int(template_bgr.shape[1] * s))
            new_h = max(1, int(template_bgr.shape[0] * s))
            tb = cv2.resize(template_bgr, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
            mk = None
            if mask is not None:
                mk = cv2.resize(mask, (new_w, new_h), interpolation=cv2.INTER_NEAREST)
        x, y, score = match_single_template(effect, tb, mk)
        # Edge-space backup matching to increase robustness to color differences
        try:
            eff_gray = cv2.cvtColor(effect, cv2.COLOR_BGR2GRAY)
            tmp_gray = cv2.cvtColor(tb, cv2.COLOR_BGR2GRAY)
            eff_edges = cv2.Canny(eff_gray, 50, 150)
            tmp_edges = cv2.Canny(tmp_gray, 50, 150)
            ex, ey, es = match_single_template(eff_edges, tmp_edges, mk)
            # combine by max to prefer clearer signal
            score = max(score, es)
            if es > score:
                x, y = ex, ey
        except Exception:
            pass
        if score > best_score:
            best_score = score
            best = (x, y, s, tb.shape[1], tb.shape[0])
    x, y, s, w, h = best
    return x, y, best_score, s, w, h


def match_template_anisotropic(effect: np.ndarray, template_bgr: np.ndarray, mask: Optional[np.ndarray], sx_list: List[float], sy_list: List[float]) -> Tuple[int, int, float, float, float, int, int]:
    best_score = -1.0
    best = (0, 0, 1.0, 1.0, template_bgr.shape[1], template_bgr.shape[0])
    for sy in sy_list:
        for sx in sx_list:
            new_w = max(1, int(template_bgr.shape[1] * sx))
            new_h = max(1, int(template_bgr.shape[0] * sy))
            tb = cv2.resize(template_bgr, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
            mk = None
            if mask is not None:
                mk = cv2.resize(mask, (new_w, new_h), interpolation=cv2.INTER_NEAREST)
            x, y, score = match_single_template(effect, tb, mk)
            if score > best_score:
                best_score = score
                best = (x, y, sx, sy, tb.shape[1], tb.shape[0])
    x, y, sx, sy, w, h = best
    return x, y, best_score, sx, sy, w, h


def find_assets_positions(effect: np.ndarray, assets: List[Tuple[str, str]], score_threshold: float = 0.5) -> List[AssetMatch]:
    matches: List[AssetMatch] = []
    for name, path in assets:
        template_bgr, alpha = read_image_bgr_with_alpha(path)
        # Crop template by alpha bounding box to remove empty transparent borders
        if alpha is not None:
            ys, xs = np.where(alpha > 0)
            if len(xs) > 0 and len(ys) > 0:
                x1, y1, x2, y2 = np.min(xs), np.min(ys), np.max(xs) + 1, np.max(ys) + 1
                template_bgr = template_bgr[y1:y2, x1:x2]
                alpha = alpha[y1:y2, x1:x2]
        mask = None
        if alpha is not None:
            mask = (alpha > 0).astype(np.uint8) * 255
        # Multi-scale search to be robust to scaled placements
        scales = [round(s, 2) for s in np.linspace(0.7, 1.3, 13)]
        x, y, score, scale, w_s, h_s = match_template_multiscale(effect, template_bgr, mask, scales)
        if not math.isfinite(score):
            continue
        if score >= score_threshold:
            matches.append(AssetMatch(name=name, path=path, x=x, y=y, width=w_s, height=h_s, score=score, matched=True, scale=scale))
        else:
            # Special fallback for bar-like assets: allow anisotropic scaling (e.g., width stretch)
            lowered = name.lower()
            if ("滑动条" in name) or ("进度" in name) or ("背景" in name and ("滑动" in name or "条" in name)):
                sx_list = [round(s, 2) for s in np.linspace(0.5, 1.8, 14)]
                sy_list = [round(s, 2) for s in np.linspace(0.6, 1.2, 7)]
                ax, ay, ascore, sx, sy, aw, ah = match_template_anisotropic(effect, template_bgr, mask, sx_list, sy_list)
                if math.isfinite(ascore) and ascore >= (score_threshold * 0.9):
                    matches.append(AssetMatch(name=name, path=path, x=ax, y=ay, width=aw, height=ah, score=ascore, matched=True, scale=sx))
                    continue
            # keep an unmatched placeholder; caller may include in tree
            matches.append(AssetMatch(name=name, path=path, x=0, y=0, width=template_bgr.shape[1], height=template_bgr.shape[0], score=score, matched=False, scale=1.0))
    return matches


def rect_overlap(a: AssetMatch, b: AssetMatch) -> Optional[Tuple[int, int, int, int]]:
    ax1, ay1, ax2, ay2 = a.x, a.y, a.x + a.width, a.y + a.height
    bx1, by1, bx2, by2 = b.x, b.y, b.x + b.width, b.y + b.height
    ox1, oy1 = max(ax1, bx1), max(ay1, by1)
    ox2, oy2 = min(ax2, bx2), min(ay2, by2)
    if ox2 > ox1 and oy2 > oy1:
        return ox1, oy1, ox2 - ox1, oy2 - oy1
    return None


def crop(img: np.ndarray, x: int, y: int, w: int, h: int) -> np.ndarray:
    return img[y:y + h, x:x + w]


def pairwise_top_on_overlap(effect: np.ndarray, a: AssetMatch, b: AssetMatch) -> Optional[Tuple[str, str]]:
    # Returns (bottom_name, top_name) if determinable, else None
    region = rect_overlap(a, b)
    if region is None:
        return None
    rx, ry, rw, rh = region
    effect_roi = crop(effect, rx, ry, rw, rh)

    ta_bgr, _ = read_image_bgr_with_alpha(a.path)
    tb_bgr, _ = read_image_bgr_with_alpha(b.path)

    a_roi = crop(ta_bgr, rx - a.x, ry - a.y, rw, rh)
    b_roi = crop(tb_bgr, rx - b.x, ry - b.y, rw, rh)

    # Compare which template is closer to effect in overlap region
    def mse(u: np.ndarray, v: np.ndarray) -> float:
        diff = u.astype(np.float32) - v.astype(np.float32)
        return float(np.mean(diff * diff))

    mse_a = mse(a_roi, effect_roi)
    mse_b = mse(b_roi, effect_roi)

    if mse_a < mse_b * 0.9:
        return b.name, a.name  # a on top
    if mse_b < mse_a * 0.9:
        return a.name, b.name  # b on top
    return None


def infer_z_order(effect: np.ndarray, matches: List[AssetMatch]) -> List[AssetMatch]:
    name_to_match: Dict[str, AssetMatch] = {m.name: m for m in matches}
    # Build DAG edges: bottom -> top
    edges: Dict[str, set] = {m.name: set() for m in matches}
    for i in range(len(matches)):
        for j in range(i + 1, len(matches)):
            order = pairwise_top_on_overlap(effect, matches[i], matches[j])
            if order is None:
                continue
            bottom, top = order
            edges[bottom].add(top)

    # Kahn topological sort; fallback by area if cycle
    in_deg: Dict[str, int] = {n: 0 for n in edges}
    for src, tos in edges.items():
        for t in tos:
            in_deg[t] += 1

    queue: List[str] = [n for n, d in in_deg.items() if d == 0]
    ordered: List[str] = []
    while queue:
        n = queue.pop(0)
        ordered.append(n)
        for t in edges[n]:
            in_deg[t] -= 1
            if in_deg[t] == 0:
                queue.append(t)

    if len(ordered) != len(matches):
        # cycle detected; sort by area ascending as heuristic (bigger under smaller)
        ordered = [m.name for m in sorted(matches, key=lambda m: m.width * m.height, reverse=True)]

    # Assign z_index according to order (bottom=0 ... top=n-1)
    name_to_z: Dict[str, int] = {name: idx for idx, name in enumerate(ordered)}
    result: List[AssetMatch] = []
    for m in matches:
        z = name_to_z.get(m.name, 0)
        result.append(AssetMatch(**{**asdict(m), "z_index": z}))
    # Sort by z for stable output
    result.sort(key=lambda m: m.z_index)
    return result


def estimate_text_color_and_size(effect: np.ndarray, left: int, top: int, right: int, bottom: int) -> Tuple[Tuple[int, int, int], int]:
    x1, y1, x2, y2 = max(0, left), max(0, top), right, bottom
    roi = effect[y1:y2, x1:x2]
    if roi.size == 0:
        return (255, 255, 255), max(12, bottom - top)

    # Edge-based mask highlights text strokes
    gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (3, 3), 0)
    grad_x = cv2.Sobel(blur, cv2.CV_16S, 1, 0)
    grad_y = cv2.Sobel(blur, cv2.CV_16S, 0, 1)
    grad = cv2.convertScaleAbs(cv2.addWeighted(cv2.convertScaleAbs(grad_x), 0.5, cv2.convertScaleAbs(grad_y), 0.5, 0))
    _, edge_mask = cv2.threshold(grad, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    ys, xs = np.where(edge_mask > 0)
    if len(xs) < 10:
        # Fallback to median color in ROI
        color = tuple(int(c) for c in np.median(roi.reshape(-1, 3), axis=0)[::-1])[::-1]
        font_size = max(12, int(0.8 * (y2 - y1)))
        bgr_mean = tuple(int(v) for v in np.mean(roi.reshape(-1, 3), axis=0))
        return (bgr_mean[2], bgr_mean[1], bgr_mean[0]), font_size

    sampled = roi[ys, xs]
    bgr = np.median(sampled, axis=0)
    b, g, r = int(bgr[0]), int(bgr[1]), int(bgr[2])
    font_size = max(12, int(0.8 * (y2 - y1)))
    return (r, g, b), font_size


def run_ocr_with_weixin(effect_path: str) -> List[OcrText]:
    # Prefer direct wcocr to avoid extra deps; fallback to OCR.wechat_ocr
    root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    wx_dir = os.path.join(root_dir, 'wei-xin-msg')
    if wx_dir not in sys.path:
        sys.path.insert(0, wx_dir)

    ocr_raw: List[Dict[str, Any]] = []
    # 1) Try wcocr directly
    try:
        import wcocr  # type: ignore
        wechat_path = os.path.join(wx_dir, 'path')
        wechatocr_path = os.path.join(wechat_path, 'WeChatOCR', 'WeChatOCR.exe')
        wcocr.init(wechatocr_path, wechat_path)
        res = wcocr.ocr(effect_path)
        ocr_raw = res.get('ocr_response', []) if isinstance(res, dict) else []
    except Exception as exc:  # pylint: disable=broad-except
        print(f"wcocr path failed: {exc}")
        # 2) Fallback to OCR.wechat_ocr if available
        try:
            from OCR import wechat_ocr  # type: ignore
            ocr_raw = wechat_ocr(effect_path)
        except Exception as exc2:  # pylint: disable=broad-except
            print(f"Failed to import OCR from wei-xin-msg: {exc2}")
            ocr_raw = []

    effect = load_effect_image(effect_path)
    texts: List[OcrText] = []
    for item in ocr_raw:
        left = int(item.get('left', 0))
        top = int(item.get('top', 0))
        right = int(item.get('right', left))
        bottom = int(item.get('bottom', top))
        color_rgb, font_size = estimate_text_color_and_size(effect, left, top, right, bottom)
        texts.append(OcrText(
            text=str(item.get('text', '')),
            left=left,
            top=top,
            right=right,
            bottom=bottom,
            color_rgb=color_rgb,
            font_size_px=font_size,
        ))
    return texts


def visualize_preview(effect: np.ndarray, matches: List[AssetMatch], texts: List[OcrText]) -> np.ndarray:
    vis = effect.copy()
    # Draw assets
    for m in matches:
        cv2.rectangle(vis, (m.x, m.y), (m.x + m.width, m.y + m.height), (0, 255, 0), 2)
        label = f"{m.name} z={m.z_index} s={m.score:.2f}"
        cv2.putText(vis, label, (m.x, max(0, m.y - 6)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 200, 0), 1, cv2.LINE_AA)
    # Draw texts
    for t in texts:
        cv2.rectangle(vis, (t.left, t.top), (t.right, t.bottom), (0, 128, 255), 2)
        cv2.putText(vis, t.text[:12], (t.left, max(0, t.top - 6)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 128, 255), 1, cv2.LINE_AA)
    return vis


def export_json(output_dir: str, canvas_w: int, canvas_h: int, matches: List[AssetMatch], texts: List[OcrText]) -> str:
    # Build z_stack (bottom -> top)
    z_stack = [{"name": m.name, "z_index": m.z_index} for m in sorted(matches, key=lambda m: m.z_index)]

    # Build hierarchy tree by containment
    def rect_of(obj: Union[AssetMatch, OcrText]) -> Tuple[int, int, int, int]:
        if isinstance(obj, AssetMatch):
            return obj.x, obj.y, obj.x + obj.width, obj.y + obj.height
        return obj.left, obj.top, obj.right, obj.bottom

    def contains(a: Tuple[int, int, int, int], b: Tuple[int, int, int, int], tol: int = 1) -> bool:
        ax1, ay1, ax2, ay2 = a
        bx1, by1, bx2, by2 = b
        return ax1 - tol <= bx1 and ay1 - tol <= by1 and ax2 + tol >= bx2 and ay2 + tol >= by2

    nodes: List[Dict[str, Any]] = []
    for m in matches:
        nodes.append({
            "id": f"asset::{m.name}",
            "type": "asset",
            "name": m.name,
            "bounds": {"x": m.x, "y": m.y, "w": m.width, "h": m.height},
            "z_index": m.z_index,
            "ref": m,
        })
    for i, t in enumerate(texts):
        nodes.append({
            "id": f"text::{i}",
            "type": "text",
            "name": t.text,
            "bounds": {"x": t.left, "y": t.top, "w": t.right - t.left, "h": t.bottom - t.top},
            "style": {"color_rgb": t.color_rgb, "font_size_px": t.font_size_px},
            "ref": t,
        })

    # Assign parent by smallest containing asset; texts prefer nearest containing asset
    root_id = "root"
    parent: Dict[str, str] = {}
    children: Dict[str, List[str]] = {root_id: []}

    asset_nodes = [n for n in nodes if n["type"] == "asset"]
    for n in nodes:
        rb = rect_of(n["ref"])  # type: ignore[index]
        candidates = []
        for a in asset_nodes:
            if a["id"] == n["id"]:
                continue
            ab = rect_of(a["ref"])  # type: ignore[index]
            if contains(ab, rb):
                area = (ab[2] - ab[0]) * (ab[3] - ab[1])
                candidates.append((area, a))
        if candidates:
            # choose smallest containing area
            _, p = min(candidates, key=lambda x: x[0])
            pid = p["id"]
        else:
            pid = root_id
        parent[n["id"]] = pid
        children.setdefault(pid, []).append(n["id"])
        children.setdefault(n["id"], [])

    def serialize(node_id: str) -> Dict[str, Any]:
        if node_id == root_id:
            node_info = {"type": "root", "name": "root", "bounds": {"x": 0, "y": 0, "w": canvas_w, "h": canvas_h}}
        else:
            n = next(x for x in nodes if x["id"] == node_id)
            node_info = {k: v for k, v in n.items() if k not in ("ref",)}
        node_info["children"] = [serialize(cid) for cid in sorted(children.get(node_id, []))]
        return node_info

    tree = serialize(root_id)

    # Ensure finite scores for JSON (avoid Infinity)
    safe_layers = []
    for m in matches:
        d = asdict(m)
        sc = d.get("score", 0.0)
        if not math.isfinite(sc):
            d["score"] = None
        else:
            d["score"] = max(0.0, min(1.0, float(sc)))
        safe_layers.append(d)

    data = {
        "canvas": {"width": canvas_w, "height": canvas_h},
        "layers": safe_layers,
        "texts": [asdict(t) for t in texts],
        "z_stack": z_stack,
        "tree": tree,
    }
    os.makedirs(output_dir, exist_ok=True)
    out_path = os.path.join(output_dir, 'result.json')
    with open(out_path, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    return out_path


def save_image(path: str, image: np.ndarray) -> None:
    os.makedirs(os.path.dirname(path), exist_ok=True)
    cv2.imwrite(path, image)


def rebuild_ui(effect_path: str, assets_dir: str, output_dir: str) -> Dict[str, str]:
    effect = load_effect_image(effect_path)
    canvas_h, canvas_w = effect.shape[:2]
    # Collect assets
    assets_all = list_asset_images(assets_dir)
    # Exclude the effect image itself if resides in same folder
    assets = [(n, p) for (n, p) in assets_all if os.path.abspath(p) != os.path.abspath(effect_path)]

    matches_all = find_assets_positions(effect, assets)
    # Separate matched vs unmatched
    matched = [m for m in matches_all if m.matched]
    unmatched = [m for m in matches_all if not m.matched]
    matches = infer_z_order(effect, matched)

    texts = run_ocr_with_weixin(effect_path)

    preview = visualize_preview(effect, matches, texts)

    # Pass unmatched via a global var? Adjust export to read all
    json_path = export_json(output_dir, canvas_w, canvas_h, matches + unmatched, texts)
    preview_path = os.path.join(output_dir, 'preview.png')
    save_image(preview_path, preview)

    return {"json": json_path, "preview": preview_path}


