from typing import Optional, Tuple
from dataclasses import dataclass
import io
import time
import base64
import difflib
import re

# Lazy imports to keep optional deps from breaking import
try:
    import mss
    from PIL import Image
except Exception:  # pragma: no cover
    mss = None
    Image = None

try:
    import httpx
except Exception:  # pragma: no cover
    httpx = None

try:
    import pyautogui
    pyautogui.FAILSAFE = False
except Exception:  # pragma: no cover
    pyautogui = None

try:
    from paddleocr import PaddleOCR
    import numpy as np
except BaseException:  # broaden to catch SystemExit from broken deps
    PaddleOCR = None
    np = None

from .config import (
    VISUAL_ENABLE,
    VISUAL_ENGINE,
    ALIYUN_BASE_URL,
    ALIYUN_VISION_MODEL,
    DASHSCOPE_API_KEY,
    CV_OCR_FALLBACK,
    CV_CLICK_RETRY,
    CV_SEARCH_TIMEOUT_SEC,
    CV_SAMPLE_DOWNSCALE,
)


@dataclass
class BBox:
    x: int
    y: int
    w: int
    h: int

    def center(self) -> Tuple[int, int]:
        return (self.x + self.w // 2, self.y + self.h // 2)


class VisionEngine:
    """
    Vision-driven UI automation: capture screen, locate target, click/type.

    Primary flow:
      1) capture_screen -> PIL.Image
      2) find_text_bbox(text) via Doubao (preferred) or OCR fallback
      3) click_bbox + optional type_text
    """

    def __init__(self) -> None:
        self.enabled = VISUAL_ENABLE
        self.engine = VISUAL_ENGINE
        self.ocr = None
        if CV_OCR_FALLBACK and PaddleOCR is not None:
            try:
                # Chinese-friendly OCR; adjust as needed
                self.ocr = PaddleOCR(use_angle_cls=True, lang="ch")
            except BaseException:  # broaden to catch SystemExit from paddle/protobuf conflicts
                self.ocr = None

    # ---------- screen capture ----------
    def capture_screen(self) -> Optional[Image.Image]:
        if mss is None or Image is None:
            return None
        try:
            with mss.mss() as sct:
                img = sct.grab(sct.monitors[0])
                # Convert MSS raw to PIL.Image
                pil = Image.frombytes("RGB", (img.width, img.height), img.rgb)
                # Optionally downscale for faster processing
                if CV_SAMPLE_DOWNSCALE and CV_SAMPLE_DOWNSCALE < 1.0:
                    try:
                        w = max(1, int(pil.width * CV_SAMPLE_DOWNSCALE))
                        h = max(1, int(pil.height * CV_SAMPLE_DOWNSCALE))
                        pil = pil.resize((w, h))
                    except Exception:
                        pass
                return pil
        except Exception:
            return None

    def save_screen_debug(self, path: str) -> bool:
        img = self.capture_screen()
        if img is None:
            return False
        try:
            img.save(path, format="PNG")
            return True
        except Exception:
            return False

    def _image_to_bytes(self, img: Image.Image) -> bytes:
        buf = io.BytesIO()
        img.save(buf, format="PNG")
        return buf.getvalue()

    # ---------- detectors ----------
    def find_text_bbox(self, text: str) -> Optional[BBox]:
        if not self.enabled:
            return None
        img = self.capture_screen()
        if img is None:
            return None

        # Prefer Aliyun vision when selected
        if self.engine == "aliyun":
            bbox = self._aliyun_locate_text(img, text)
            if bbox:
                return bbox


        # Fallback to local OCR
        if CV_OCR_FALLBACK:
            return self._ocr_locate_text(img, text)
        return None

    def _aliyun_locate_text(self, img: Image.Image, text: str) -> Optional[BBox]:
        """
        Use Aliyun DashScope compatible-mode OpenAI API to locate text region.
        Mirrors the Doubao payload shape but targets ALIYUN_BASE_URL and model.
        """
        if httpx is None or not DASHSCOPE_API_KEY:
            return None
        try:
            url = ALIYUN_BASE_URL.rstrip('/') + "/chat/completions"
            headers = {
                "Authorization": f"Bearer {DASHSCOPE_API_KEY}",
                "Content-Type": "application/json",
            }
            img_b64 = base64.b64encode(self._image_to_bytes(img)).decode("utf-8")
            instruction_text = (
                f"Locate the UI region containing text '{text}'. "
                "Return JSON object with 'bbox' having integer fields 'x','y','w','h'. "
                "Return only the JSON, no extra text."
            )
            payload = {
                "model": ALIYUN_VISION_MODEL,
                "messages": [
                    {
                        "role": "user",
                        "content": [
                            {"type": "input_text", "text": instruction_text},
                            # DashScope compatible-mode supports OpenAI-like multimodal; prefer base64 inline
                            {"type": "input_image", "image": img_b64},
                        ],
                    }
                ],
            }
            r = httpx.post(url, json=payload, headers=headers, timeout=15.0)
            if r.status_code != 200:
                return None
            data = r.json()
            # Expect assistant message containing only JSON
            msg = None
            try:
                for c in data.get("choices", []):
                    if c.get("finish_reason") == "stop":
                        msg = c.get("message", {}).get("content")
                        break
            except Exception:
                msg = None
            if not msg:
                return None
            # Extract bbox via regex or json parsing
            try:
                import json
                bbox = json.loads(msg)
                x, y, w, h = int(bbox["bbox"]["x"]), int(bbox["bbox"]["y"]), int(bbox["bbox"]["w"]), int(bbox["bbox"]["h"])
                return BBox(x, y, w, h)
            except Exception:
                # Fallback: best-effort regex extraction
                m = re.search(r"\{\s*\"bbox\"\s*:\s*\{\s*\"x\"\s*:\s*(\d+)\s*,\s*\"y\"\s*:\s*(\d+)\s*,\s*\"w\"\s*:\s*(\d+)\s*,\s*\"h\"\s*:\s*(\d+)\s*\}\s*\}", msg or "")
                if m:
                    x, y, w, h = map(int, m.groups())
                    return BBox(x, y, w, h)
                return None
        except Exception:
            return None

    def _ocr_locate_text(self, img: Image.Image, text: str) -> Optional[BBox]:
        if self.ocr is None or np is None:
            return None
        try:
            # Convert PIL RGB image to OpenCV-style BGR ndarray
            arr = np.array(img)
            if arr.ndim == 3 and arr.shape[2] == 3:
                bgr = arr[:, :, ::-1]
            else:
                # Ensure three-channel image
                bgr = np.dstack([arr] * 3)
            # PaddleOCR accepts ndarray directly
            result = self.ocr.ocr(bgr, det=True, rec=True)
            if not result:
                return None
            # Collect candidates with their bounding boxes
            candidates = []
            for line in result:
                for item in line:
                    pts = item[0]
                    txt = (item[1][0] or "")
                    xs = [p[0] for p in pts]
                    ys = [p[1] for p in pts]
                    x, y = int(min(xs)), int(min(ys))
                    w, h = int(max(xs) - x), int(max(ys) - y)
                    candidates.append((txt, BBox(x, y, w, h)))
            # Normalize and fuzzy match
            def _norm(s: str) -> str:
                s = (s or "").lower().strip()
                s = re.sub(r"\s+", "", s)
                return s
            target_norm = _norm(text)
            best = None
            best_score = -1.0
            for txt, bb in candidates:
                tn = _norm(txt)
                # score by similarity and bbox area
                score = difflib.SequenceMatcher(None, tn, target_norm).ratio() * 0.7 + (bb.w * bb.h) * 1e-6
                if (tn and (target_norm in tn)) or score > best_score:
                    best_score = score
                    best = bb
            return best
        except Exception:
            return None

    # ---------- actions ----------
    def click_bbox(self, bb: BBox) -> bool:
        if pyautogui is None:
            return False
        try:
            x, y = bb.center()
            pyautogui.moveTo(x, y, duration=0.05)
            pyautogui.click(x, y)
            return True
        except Exception:
            return False

    def click_text(self, text: str) -> bool:
        bb = self.find_text_bbox(text)
        if not bb:
            return False
        return self.click_bbox(bb)

    def type_text(self, text: str, press_enter: bool = False) -> bool:
        if pyautogui is None:
            return False
        try:
            pyautogui.typewrite(text, interval=0.02)
            if press_enter:
                pyautogui.press("enter")
            return True
        except Exception:
            return False

    # ---------- OCR list helpers ----------
    def ocr_list_texts(self, limit: int = 50):
        if self.ocr is None or np is None:
            return []
        img = self.capture_screen()
        if img is None:
            return []
        try:
            arr = np.array(img)
            if arr.ndim == 3 and arr.shape[2] == 3:
                bgr = arr[:, :, ::-1]
            else:
                bgr = np.dstack([arr] * 3)
            result = self.ocr.ocr(bgr, det=True, rec=True)
            items = []
            for line in (result or []):
                for it in line:
                    pts = it[0]
                    txt = it[1][0]
                    xs = [p[0] for p in pts]
                    ys = [p[1] for p in pts]
                    x, y = int(min(xs)), int(min(ys))
                    w, h = int(max(xs) - x), int(max(ys) - y)
                    items.append((txt, BBox(x, y, w, h)))
                    if len(items) >= limit:
                        return items
            return items
        except Exception:
            return []

    # ---------- ROI helpers ----------
    def ocr_list_texts_in_roi(self, rect, limit: int = 50):
        """
        Perform OCR within the given rectangle (pywinauto-style with left/top/right/bottom),
        and return a list of (text, BBox) with coordinates in global screen space.
        """
        if self.ocr is None or np is None:
            return []
        img = self.capture_screen()
        if img is None:
            return []
        try:
            left = int(getattr(rect, 'left', 0))
            top = int(getattr(rect, 'top', 0))
            right = int(getattr(rect, 'right', 0))
            bottom = int(getattr(rect, 'bottom', 0))
            # Clamp and crop
            left = max(0, left)
            top = max(0, top)
            right = max(left + 1, right)
            bottom = max(top + 1, bottom)
            crop = img.crop((left, top, right, bottom))
            arr = np.array(crop)
            if arr.ndim == 3 and arr.shape[2] == 3:
                bgr = arr[:, :, ::-1]
            else:
                bgr = np.dstack([arr] * 3)
            result = self.ocr.ocr(bgr, det=True, rec=True)
            items = []
            for line in (result or []):
                for it in line:
                    pts = it[0]
                    txt = it[1][0]
                    xs = [p[0] for p in pts]
                    ys = [p[1] for p in pts]
                    x, y = int(min(xs)), int(min(ys))
                    w, h = int(max(xs) - x), int(max(ys) - y)
                    # Map bbox back to screen coords
                    items.append((txt, BBox(x + left, y + top, w, h)))
                    if len(items) >= limit:
                        return items
            return items
        except Exception:
            return []