import ctypes
import platform
import threading
import tkinter as tk
import easyocr
import keyboard
import mss
import numpy as np
import pytesseract
import win32api
import win32con
from PIL import Image
from DeepSeek import DeepSeek


if platform.system() == "Windows":
    try:
        ctypes.windll.user32.SetProcessDPIAware()
    except Exception:
        pass

pytesseract.pytesseract.tesseract_cmd = r'D:\software_w\ORC\tesseract.exe'

api_key = "DeepSeek_api_key"


class MultiMonitorCapture:
    def __init__(self):
        self.vx = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
        self.vy = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
        self.vw = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
        self.vh = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)

        self.start_x = self.start_y = self.end_x = self.end_y = 0
        self.rect = None

        self.root = tk.Tk()
        self.root.geometry(f"{self.vw}x{self.vh}+{self.vx}+{self.vy}")
        self.root.overrideredirect(True) 
        self.root.attributes('-alpha', 0.3)
        self.root.config(bg='black')

        self.canvas = tk.Canvas(self.root, cursor="cross", bg="gray", highlightthickness=0)
        self.canvas.pack(fill=tk.BOTH, expand=True)

        self.canvas.bind('<ButtonPress-1>', self.on_mouse_press)
        self.canvas.bind('<B1-Motion>', self.on_mouse_drag)
        self.canvas.bind('<ButtonRelease-1>', self.on_mouse_release)
        self.root.bind('<Escape>', lambda e: self.root.destroy())
        self.translation_window = None
        self.root.mainloop()

    def on_mouse_press(self, event):
        self.start_x = self.canvas.canvasx(event.x)
        self.start_y = self.canvas.canvasy(event.y)
        if self.rect:
            self.canvas.delete(self.rect)
        self.rect = self.canvas.create_rectangle(self.start_x, self.start_y, self.start_x, self.start_y, outline='red',
                                                 width=2)

    def on_mouse_drag(self, event):
        cur_x = self.canvas.canvasx(event.x)
        cur_y = self.canvas.canvasy(event.y)
        self.canvas.coords(self.rect, self.start_x, self.start_y, cur_x, cur_y)

    def on_mouse_release(self, event):
        self.end_x = self.canvas.canvasx(event.x)
        self.end_y = self.canvas.canvasy(event.y)

        threading.Thread(target=self.capture_and_ocr).start()

    def capture_and_ocr(self):
        x1 = int(min(self.start_x, self.end_x) + self.vx)
        y1 = int(min(self.start_y, self.end_y) + self.vy)
        x2 = int(max(self.start_x, self.end_x) + self.vx)
        y2 = int(max(self.start_y, self.end_y) + self.vy)

        with mss.mss() as sct:
            monitor = {
                "top": y1,
                "left": x1,
                "width": x2 - x1,
                "height": y2 - y1
            }
            sct_img = sct.grab(monitor)
            img = Image.frombytes("RGB", sct_img.size, sct_img.rgb)

        gray = img.convert("L")


        config = r'--oem 3 --psm 6 -l jpn+chi_sim+eng'
        raw_text = pytesseract.image_to_string(gray, config=config).strip()
        reader = easyocr.Reader(['ja', 'en'])  # 中文简体+英文
        result = reader.readtext(np.array(img), detail=0)
        ocr_text = "\n".join(result)
        #两种OCR
        print("\n🧾 pytesseract识别结果：", raw_text)
        print("\n🧾 easyocr识别结果：", ocr_text)

        dpAI = DeepSeek(api_key)
        translated_text = dpAI.translate(raw_text)
        print(f"翻译：{translated_text}")


        self.root.after(0, lambda: self.show_translation_window(
            text=translated_text,
            left=x1, 
            top=y2 + 10,  
            width=x2 - x1  
        ))

    def show_translation_window(self, text, left, top, width):
        top_win = tk.Toplevel(self.root)
        top_win.overrideredirect(True) 
        top_win.attributes('-topmost', True)
        top_win.configure(bg='white')

  
        padding = 10
        wrap_len = max(200, int(width))
        label = tk.Label(top_win, text=text, bg='white', fg='black',
                         font=('Microsoft YaHei', 14), justify='left', wraplength=wrap_len)
        label.pack(padx=padding, pady=padding)


        top_win.update_idletasks()
        win_w = top_win.winfo_width()
        win_h = top_win.winfo_height()
        screen_height = self.vh + self.vy
        pos_x = int(left + width / 2 - win_w / 2)

        pos_y_below = int(top + 10)
        pos_y_above = int(top - 10 - win_h)

        if pos_y_below + win_h > screen_height:
            pos_y = max(pos_y_above, 0) 
        else:
            pos_y = pos_y_below

        top_win.geometry(f"{win_w}x{win_h}+{pos_x}+{pos_y}")


def start_capture_thread():

    threading.Thread(target=MultiMonitorCapture).start()


def listen_hotkey():
    print("监听快捷键 Ctrl+p，按下开始截图...")
    keyboard.add_hotkey('ctrl+p', start_capture_thread)
    keyboard.wait() 

if __name__ == "__main__":
    listen_hotkey()
