"""
llama-server 托盘守护（支持多模型切换）
"""

import os, sys, time, signal, socket, subprocess, threading, logging
from pathlib import Path
from functools import lru_cache

import psutil, requests, webbrowser, keyboard, pystray
from PIL import Image

# ---------------- 日志 ----------------
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s %(levelname)s %(message)s",
    handlers=[logging.StreamHandler(sys.stdout)]
)

# ---------------- 单实例 ----------------
try:
    import win32event, win32api, winerror
    _WINDOWS = True
    _SINGLE_MUTEX = None
except ImportError:
    _WINDOWS = False
    _SINGLE_MUTEX = None

def single_instance_check() -> bool:
    global _SINGLE_MUTEX
    if not _WINDOWS:
        return False
    _SINGLE_MUTEX = win32event.CreateMutex(None, False, "llama_Manager_SingleInstance")
    return win32api.GetLastError() == winerror.ERROR_ALREADY_EXISTS

# ---------------- 路径工具 ----------------
@lru_cache
def resource_path(rel: str) -> str:
    if os.path.isabs(rel):
        return os.path.normpath(rel)
    base = Path(sys._MEIPASS).parent if hasattr(sys, "_MEIPASS") else Path(__file__).resolve().parent
    return str((base / rel).resolve())

# ---------------- 局域网 IP 自动选择 ----------------
def pick_access_host() -> str:
    try:
        with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
            s.connect(("8.8.8.8", 80))
            ip = s.getsockname()[0]
            if not ip.startswith("127."):
                return ip
    except Exception:
        pass
    for _, _, _, _, sockaddr in socket.getaddrinfo(socket.gethostname(), None):
        ip = sockaddr[0]
        if not ip.startswith("127."):
            return ip
    return "127.0.0.1"

# ---------------- 模型管理 ----------------
MODEL_DIR      = Path(resource_path("llama"))
CURRENT_MODEL  = None          # 当前模型文件名
ALL_MODELS     = []            # 扫描到的 .gguf

def scan_models():
    global ALL_MODELS
    ALL_MODELS = [f.name for f in MODEL_DIR.glob("*.gguf")]
    if not ALL_MODELS:
        logging.error("未在 llama/ 目录下发现 .gguf 模型，程序即将退出")
        sys.exit(1)

# ---------------- 常量 ----------------
HOST        = "0.0.0.0"
ACCESS_HOST = pick_access_host()
PORT        = 8080
SERVER_URL  = f"http://{ACCESS_HOST}:{PORT}"
CPU_COUNT   = psutil.cpu_count(logical=True)

# ---------------- 进程管理 ----------------
server_proc = None
SERVER_CMD  = []

def build_cmd():
    return [
        resource_path("llama/llama-server.exe" if os.name == "nt" else "llama/llama-server"),
        "--model", str(MODEL_DIR / CURRENT_MODEL),
        "--host", HOST,
        "--port", str(PORT),
        "-c", "4096",
        "-ngl", "99",
        "-t", str(CPU_COUNT),
        "--mlock", "--no-mmap",
        "--temp", "0.5",
        "--cont-batching",
        "--prio", "2",
        "--log-disable",
        "--chat-template", "chatml",
    ]

def kill_port_occupant(port):
    for conn in psutil.net_connections():
        if conn.laddr.port == port:
            try:
                p = psutil.Process(conn.pid)
                if p.is_running():
                    if os.name == "nt":
                        subprocess.run(
                            ["taskkill", "/F", "/T", "/PID", str(p.pid)],
                            stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
                            creationflags=subprocess.CREATE_NO_WINDOW,
                        )
                    else:
                        p.kill()
            except (psutil.NoSuchProcess, psutil.AccessDenied):
                pass

def start_server():
    global server_proc, SERVER_CMD
    SERVER_CMD = build_cmd()
    logging.info("启动服务：%s", " ".join(SERVER_CMD))
    kwargs = {}
    if os.name == "nt":
        si = subprocess.STARTUPINFO()
        si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
        kwargs["startupinfo"] = si
    else:
        kwargs["preexec_fn"] = os.setsid
    server_proc = subprocess.Popen(SERVER_CMD, **kwargs)

def stop_server():
    global server_proc
    if not server_proc:
        return
    try:
        if os.name == "nt":
            if server_proc.poll() is None:
                subprocess.run(
                    ["taskkill", "/F", "/T", "/PID", str(server_proc.pid)],
                    stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
                    creationflags=subprocess.CREATE_NO_WINDOW,
                )
        else:
            try:
                os.killpg(os.getpgid(server_proc.pid), signal.SIGKILL)
            except (ProcessLookupError, OSError):
                server_proc.kill()
        server_proc.wait(timeout=5)
    except subprocess.TimeoutExpired:
        server_proc.kill()
        server_proc.wait()
    finally:
        if server_proc.poll() is None:
            server_proc.kill()
            server_proc.wait()
        server_proc = None
        time.sleep(0.5)
        kill_port_occupant(PORT)

def wait_server_ready(timeout=30) -> bool:
    t0 = time.time()
    while time.time() - t0 < timeout:
        try:
            if requests.get(f"{SERVER_URL}/health", timeout=1).status_code == 200:
                return True
        except requests.RequestException:
            pass
        time.sleep(1)
    return False

# ---------------- 托盘 ----------------
def open_browser():
    webbrowser.open(SERVER_URL, autoraise=True)

def on_exit(icon, item):
    keyboard.unhook_all_hotkeys()
    icon.stop()

def switch_model(icon, item):
    global CURRENT_MODEL
    new_model = str(item.text)
    if new_model == CURRENT_MODEL:
        return
    logging.info("切换模型 => %s", new_model)
    stop_server()
    CURRENT_MODEL = new_model
    start_server()
    if not wait_server_ready():
        logging.error("新模型启动失败")
    else:
        open_browser()

def model_submenu():
    return pystray.Menu(
        *[pystray.MenuItem(m, switch_model, checked=lambda _, m=m: m == CURRENT_MODEL)
          for m in ALL_MODELS]
    )

def setup_tray():
    img = Image.open(resource_path("Heart.ico"))
    menu = pystray.Menu(
        pystray.MenuItem("打开浏览器", open_browser, default=True),
        pystray.Menu.SEPARATOR,
        pystray.MenuItem("切换模型", model_submenu()),
        pystray.MenuItem("退出", on_exit)
    )
    tray = pystray.Icon("llama-server", img, "llama-server", menu)
    threading.Thread(target=lambda: keyboard.add_hotkey("F3", open_browser) or keyboard.wait(),
                     daemon=True).start()
    tray.run()

# ---------------- 主流程 ----------------
def main():
    if single_instance_check():
        open_browser()
        return

    scan_models()               # 扫描模型
    global CURRENT_MODEL
    CURRENT_MODEL = ALL_MODELS[0]   # 避免 CURRENT_MODEL 仍为 None

    kill_port_occupant(PORT)
    start_server()
    if not wait_server_ready():
        stop_server()
        sys.exit(1)
    open_browser()
    setup_tray()
    stop_server()

if __name__ == "__main__":
    main()
