import cv2
import pytest
import torch
from iopaint.model.utils import torch_gc
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy
from iopaint.schema import LDMSampler, InpaintRequest, SDSampler

# ['lama', 'cv2', 'runwayml/stable-diffusion-inpainting', 'runwayml/stable-diffusion-v1-5']
model_name = "lama"


def check_device(device: str) -> int:
    if device == "cuda" and not torch.cuda.is_available():
        pytest.skip("CUDA is not available, skip test on cuda")
    if device == "mps" and not torch.backends.mps.is_available():
        pytest.skip("mps is not available, skip test on mps")
    steps = 2 if device == "cpu" else 20
    return steps


def get_data(
        img_p="image.png",
        mask_p="mask.png",
):
    img = cv2.imread(str(img_p))
    img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
    mask = cv2.imread(str(mask_p), cv2.IMREAD_GRAYSCALE)
    # img = cv2.resize(img, None, interpolation=cv2.INTER_AREA)
    # mask = cv2.resize(mask, None,interpolation=cv2.INTER_NEAREST)
    return img, mask


def get_config(**kwargs):
    data = dict(
        sd_sampler=kwargs.get("sd_sampler", SDSampler.uni_pc),
        ldm_steps=1,
        ldm_sampler=LDMSampler.plms,
        hd_strategy=kwargs.get("strategy", HDStrategy.ORIGINAL),
        hd_strategy_crop_margin=32,
        hd_strategy_crop_trigger_size=200,
        hd_strategy_resize_limit=200,
    )
    data.update(**kwargs)
    return InpaintRequest(image="", mask="", **data)


def clear_water(img_file, mask_file, save_path, device):
    sd_steps = check_device(device)
    model = ModelManager(name=model_name, device=device, disable_nsfw=True)
    cfg = get_config(strategy=HDStrategy.ORIGINAL, sd_steps=sd_steps)
    img, mask = get_data(img_p=img_file, mask_p=mask_file)
    print(f"Input image shape: {img.shape}")
    try:
        res = model(img, mask, cfg)
        ret = cv2.imwrite(
            save_path,
            res,
            [int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0],
        )
        return ret
    except RuntimeError as e:
        if "CUDA out of memory. " in str(e):
            # NOTE: the string may change?
            # return "CUDA out of memory", 500
            print("CUDA out of memory", 500)
        else:
            # return f"{str(e)}", 500
            print(f"{str(e)}", 500)
    finally:
        torch_gc()
    return False
