import os
import time

start_time = time.time()
print("Start time:", time.strftime(r"%Y-%m-%d %H:%M:%S", time.localtime(start_time)))
os.environ.update(
    {
        "TMP": os.environ.get("TMP", "TEMP"),
        "TEMP": os.environ.get("TEMP", "TEMP"),
        "HF_HOME": os.environ.get("HF_HOME", "models"),
        "ENV_PATH": os.environ.get("ENV_PATH", "birefnet"),
        "TORCH_HOME": os.environ.get("TORCH_HOME", "models"),
        "GRADIO_TEMP_DIR": os.environ.get("GRADIO_TEMP_DIR", "TEMP/Gradio"),
        "HF_ENDPOINT": os.environ.get("HF_ENDPOINT", "https://hf-mirror.com"),
    }
)

import cv2
import torch
import gradio
import psutil
import shutil
import zipfile
import warnings
import threading
import numpy as np
from PIL import Image
from glob import glob
from torchvision import transforms
from transformers import AutoModelForImageSegmentation

# 全局
latest_model = ""
identity = lambda x: x
model_lock = threading.Lock()
save_file_path: dict[str, str] = {}
device = "cuda" if torch.cuda.is_available() else "cpu"
# 配置
warnings.filterwarnings("ignore")
torch.set_float32_matmul_precision("high")
# 清空缓存文件
SAVE_DIR = "TEMP/preds-BiRefNet"
shutil.rmtree("TEMP", ignore_errors=True)
os.makedirs(SAVE_DIR, exist_ok=True)


# 加载模型
def load_model(model):
    with model_lock:
        global birefnet, latest_model
        if model == latest_model:
            return
        model_start = time.time()
        print(f"Using model: {model}.")

        birefnet = AutoModelForImageSegmentation.from_pretrained(
            model, trust_remote_code=True, local_files_only=True
        )
        birefnet.to(device)
        birefnet.eval()
        latest_model = model
        print(f"Load [{model}] time: {time.time()-model_start:.1f}s.")


threading.Thread(target=load_model, args=("zhengpeng7/BiRefNet",)).start()

# 打印启动信息
print("Starting at: ", time.strftime(r"%Y-%m-%d %H:%M:%S", time.localtime()))
gpu = torch.cuda.get_device_properties(0) if torch.cuda.is_available() else None
print(f'Device: {gpu.name if gpu else "No GPU"}')
print(f"Total VRAM: {gpu.total_memory // (1024 * 1024)} MB" if gpu else "")
print(f"Total RAM: {psutil.virtual_memory().total // (1024 * 1024)} MB\n")

name_model_tuple = (
    ("General", "zhengpeng7/BiRefNet"),
    ("General-HR", "zhengpeng7/BiRefNet_HR"),
    ("General-Lite", "zhengpeng7/BiRefNet_lite"),
    ("General-dynamic", "zhengpeng7/BiRefNet_dynamic"),
    ("Matting", "zhengpeng7/BiRefNet-matting"),
    ("Matting-HR", "zhengpeng7/BiRefNet_HR-matting"),
    ("Portrait", "zhengpeng7/BiRefNet-portrait"),
)

model_resolution_map = {
    "_HR": "2048x2048",
    "_2K": "2560x1440",
    "512": "512x512",
    "_dynamic": "auto",
}


def transform_image(size, image) -> torch.Tensor:
    return transforms.Compose(
        [
            transforms.Resize(size),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]
    )(image)


def get_resolution(resolution, size) -> tuple[int, int]:
    if resolution and resolution != "auto":
        resolution = resolution.strip().split("x")
        return (
            int(resolution[0]) // 32 * 32,
            int(resolution[1]) // 32 * 32,
        )
    else:
        return (
            int(size[0]) // 32 * 32,
            int(size[1]) // 32 * 32,
        )


## CPU version refinement
def FB_blur_fusion_foreground_estimator_cpu(image, FG, B, alpha, r=90):
    if isinstance(image, Image.Image):
        image = np.array(image) / 255.0
    blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None]

    blurred_FGA = cv2.blur(FG * alpha, (r, r))
    blurred_FG = blurred_FGA / (blurred_alpha + 1e-5)

    blurred_B1A = cv2.blur(B * (1 - alpha), (r, r))
    blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5)
    FG = blurred_FG + alpha * (image - alpha * blurred_FG - (1 - alpha) * blurred_B)
    FG = np.clip(FG, 0, 1)
    return FG, blurred_B


def FB_blur_fusion_foreground_estimator_cpu_2(image, alpha, r=90):
    # Thanks to the source: https://github.com/Photoroom/fast-foreground-estimation
    alpha = alpha[:, :, None]
    FG, blur_B = FB_blur_fusion_foreground_estimator_cpu(image, image, image, alpha, r)
    return FB_blur_fusion_foreground_estimator_cpu(image, FG, blur_B, alpha, r=6)[0]


## GPU version refinement
def mean_blur(x, kernel_size):
    """
    equivalent to cv.blur
    x:  [B, C, H, W]
    """
    if kernel_size % 2 == 0:
        pad_l = kernel_size // 2 - 1
        pad_r = kernel_size // 2
        pad_t = kernel_size // 2 - 1
        pad_b = kernel_size // 2
    else:
        pad_l = pad_r = pad_t = pad_b = kernel_size // 2

    x_padded = torch.nn.functional.pad(
        x, (pad_l, pad_r, pad_t, pad_b), mode="replicate"
    )

    return torch.nn.functional.avg_pool2d(
        x_padded,
        kernel_size=(kernel_size, kernel_size),
        stride=1,
        count_include_pad=False,
    )


def FB_blur_fusion_foreground_estimator_gpu(image, FG, B, alpha, r=90):
    as_dtype = lambda x, dtype: x.to(dtype) if x.dtype != dtype else x

    input_dtype = image.dtype
    # convert image to float to avoid overflow
    image = as_dtype(image, torch.float32)
    FG = as_dtype(FG, torch.float32)
    B = as_dtype(B, torch.float32)
    alpha = as_dtype(alpha, torch.float32)

    blurred_alpha = mean_blur(alpha, kernel_size=r)

    blurred_FGA = mean_blur(FG * alpha, kernel_size=r)
    blurred_FG = blurred_FGA / (blurred_alpha + 1e-5)

    blurred_B1A = mean_blur(B * (1 - alpha), kernel_size=r)
    blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5)

    FG_output = blurred_FG + alpha * (
        image - alpha * blurred_FG - (1 - alpha) * blurred_B
    )
    FG_output = torch.clamp(FG_output, 0, 1)

    return as_dtype(FG_output, input_dtype), as_dtype(blurred_B, input_dtype)


def FB_blur_fusion_foreground_estimator_gpu_2(image, alpha, r=90):
    # Thanks to the source: https://github.com/ZhengPeng7/BiRefNet/issues/226#issuecomment-3016433728
    FG, blur_B = FB_blur_fusion_foreground_estimator_gpu(image, image, image, alpha, r)
    return FB_blur_fusion_foreground_estimator_gpu(image, FG, blur_B, alpha, r=6)[0]


def refine_foreground(image, mask, r=90, device="cuda"):
    """both image and mask are in range of [0, 1]"""
    if mask.size != image.size:
        mask = mask.resize(image.size)

    if device == "cuda":
        image = transforms.functional.to_tensor(image).float().cuda()
        mask = transforms.functional.to_tensor(mask).float().cuda()
        image = image.unsqueeze(0)
        mask = mask.unsqueeze(0)

        estimated_foreground = FB_blur_fusion_foreground_estimator_gpu_2(
            image, mask, r=r
        )

        estimated_foreground = estimated_foreground.squeeze()
        estimated_foreground = (estimated_foreground.mul(255.0)).to(torch.uint8)
        estimated_foreground = (
            estimated_foreground.permute(1, 2, 0)
            .contiguous()
            .cpu()
            .numpy()
            .astype(np.uint8)
        )
    else:
        image = np.array(image, dtype=np.float32) / 255.0
        mask = np.array(mask, dtype=np.float32) / 255.0
        estimated_foreground = FB_blur_fusion_foreground_estimator_cpu_2(
            image, mask, r=r
        )
        estimated_foreground = (estimated_foreground * 255.0).astype(np.uint8)

    estimated_foreground = Image.fromarray(np.ascontiguousarray(estimated_foreground))

    return estimated_foreground


def predict(images: str | list[str] | Image.Image, resolution, model: str):
    assert images is not None, "请先上传图片！"
    predict_time = time.time()

    global birefnet, save_file_path, zip_file_path, latest_model
    if model != latest_model:
        load_model(model)

    processed_images = []

    for image_src in images if isinstance(images, list) else [images]:
        orig_image = Image.open(image_src) if isinstance(image_src, str) else image_src
        w, h = orig_image.size
        image = orig_image.convert("RGB")
        # 预处理图片
        reso = get_resolution(resolution, image.size)
        print(f"Using resolution: {reso}.")
        im_tensor = transform_image(reso, image)
        im_tensor = im_tensor.unsqueeze(0)
        im_tensor = im_tensor.to(device)
        # 预测
        with torch.no_grad(), model_lock:
            result = birefnet(im_tensor)[-1].sigmoid().cpu()
        result = torch.squeeze(torch.nn.functional.interpolate(result, size=(h, w)))
        torch.cuda.empty_cache()
        # 处理边缘
        pred_pil = transforms.ToPILImage()(result)
        image_masked = refine_foreground(image, pred_pil)
        image_masked.putalpha(pred_pil.resize(image.size))

        if isinstance(image_src, str):
            file_path = os.path.join(
                SAVE_DIR, f"{os.path.splitext(os.path.basename(image_src))[0]}.png"
            )
            save_file_path[image_src] = file_path
        else:
            file_path = os.path.join(SAVE_DIR, "image.png")
            save_file_path["image"] = file_path

        image_masked.save(file_path)
        processed_images.append(file_path)

    print(f"Predict time: {time.time()-predict_time:.1f}s.")
    if isinstance(images, list):
        zip_file_path = os.path.join(SAVE_DIR, f"images.zip")
        with zipfile.ZipFile(zip_file_path, "w") as zipf:
            for file in processed_images:
                zipf.write(file, os.path.basename(file))
        save_file_path["zip"] = zip_file_path
        return processed_images

    return processed_images[0], images


examples = [[_] for _ in glob("examples/*")][:]
# Add the option of resolution in a text box.
for idx_example, example in enumerate(examples):
    examples[idx_example].append("1024x1024" if "My_" in example[0] else "2048x2048")
examples.append(examples[-1].copy())
examples[-1][1] = "512x512"

examples_url = [
    ["https://img2.baidu.com/it/u=4257123520,1345811573&fm=253", "512x512"],
]

with gradio.Blocks(
    title="BiRefNet抠图",
    css=r".block.fullscreen{width: 100% !important}",  # 修复放大图片时，按钮被滚动条遮挡的问题
) as demo:
    gradio.Markdown(r"#### 上传一张图片，模型将从中提取出主体的高精度分割图.")

    with gradio.Row():
        with gradio.Column():
            resolution_radio = gradio.Radio(
                ("1024x1024",)
                + tuple(v for v in model_resolution_map.values() if v and v != "auto")
                + (("图像原大小", "auto"), ("自定义", "")),
                value="1024x1024",
                label="分辨率",
                info="推荐使用1024x1024分辨率！对于 BiRefNet_HR，建议使用2048x2048分辨率.",
            )
            resolution = gradio.Textbox(
                value="1024x1024",
                visible=False,
                show_label=False,
                placeholder="留空则为图片原大小(图片越大所需性能越高)",
            )

        model = gradio.Radio(
            name_model_tuple,
            value="zhengpeng7/BiRefNet",
            label="算法",
            info="HR：高分辨率；General：通用抠图；Matting：镂空或透明物体；Portrait: 人像抠图.",
        )
        resolution_radio.change(
            lambda reso: (gradio.Textbox(visible=not reso, value=reso)),
            inputs=resolution_radio,
            outputs=resolution,
        )
        model.change(
            lambda model: next(
                (v for k, v in model_resolution_map.items() if k in model), "1024x1024"
            ),
            inputs=model,
            outputs=resolution_radio,
        )

    with gradio.Tab("本地图像", elem_id="image"):
        with gradio.Row(equal_height=True):
            with gradio.Column(min_width=500):
                with gradio.Row(scale=0):
                    image_clear_btn = gradio.Button("清除", variant="huggingface")
                    image_upload_btn = gradio.UploadButton(
                        "上传图片", file_types=["image"], variant="primary"
                    )
                with gradio.Row(max_height=600):
                    image_input = gradio.Image(
                        label="图像", height="100%", show_label=False, type="filepath"
                    )
            with gradio.Column(min_width=500):
                with gradio.Row(scale=0):
                    image_submit_btn = gradio.Button("开始抠图", variant="primary")
                    image_download_btn = gradio.DownloadButton(label="下载")
                image_output = gradio.ImageSlider(
                    label="抠图结果",
                    format="png",
                    height=600,
                    interactive=False,
                    show_download_button=False,
                )

        image_clear_btn.click(lambda: None, outputs=image_input)
        image_upload_btn.upload(identity, image_upload_btn, image_input)
        image_submit_btn.click(
            predict,
            inputs=[image_input, resolution, model],
            outputs=image_output,
        )
        image_output.change(
            lambda img: gradio.DownloadButton(
                value=save_file_path[img], variant="primary"
            ),
            inputs=image_input,
            outputs=image_download_btn,
        )
        gradio.Examples(examples, inputs=[image_input, resolution_radio], label="示例")

    with gradio.Tab("网络图像"):
        with gradio.Row(equal_height=True):
            with gradio.Column(min_width=500):
                url_submit_btn = gradio.Button("开始抠图", variant="primary")
                url_input = gradio.Textbox(label="粘贴图像的URL", scale=0, max_lines=1)
                url_image = gradio.Image(
                    label="网络图像",
                    height=500,
                    type="pil",
                    interactive=False,
                )
            with gradio.Column(min_width=500):
                url_download_btn = gradio.DownloadButton(label="下载")
                url_output = gradio.ImageSlider(
                    label="抠图结果",
                    height=600,
                    format="png",
                    interactive=False,
                    show_download_button=False,
                )

            url_submit_btn.click(
                predict, inputs=[url_image, resolution, model], outputs=url_output
            )
            url_input.change(identity, inputs=url_input, outputs=url_image)
            url_output.change(
                lambda: gradio.DownloadButton(
                    variant="primary", value=save_file_path["image"]
                ),
                outputs=url_download_btn,
            )
        gradio.Examples(
            examples_url, inputs=[url_input, resolution_radio], label="示例"
        )

    with gradio.Tab("批量抠图"):
        with gradio.Row(equal_height=True):
            with gradio.Column(min_width=500):
                with gradio.Row(scale=0):
                    batch_clear_btn = gradio.Button("清除", variant="huggingface")
                    batch_upload_btn = gradio.UploadButton(
                        "上传图片",
                        file_types=["image"],
                        file_count="multiple",
                        variant="primary",
                    )
                batch_input = gradio.Files(
                    label="图片",
                    height=600,
                    type="filepath",
                    show_label=False,
                    file_types=["image"],
                )
            with gradio.Column(min_width=500):
                with gradio.Row(scale=0):
                    batch_submit_btn = gradio.Button("开始抠图", variant="primary")
                    batch_download_btn = gradio.DownloadButton(label="下载")
                batch_output = gradio.Gallery(
                    label="抠图结果", height=600, format="png"
                )

            batch_clear_btn.click(lambda: None, outputs=batch_input)
            batch_upload_btn.upload(identity, batch_upload_btn, batch_input)
            batch_submit_btn.click(
                predict,
                inputs=[batch_input, resolution, model],
                outputs=batch_output,
            )
            batch_output.change(
                lambda: gradio.DownloadButton(
                    variant="primary", value=save_file_path["zip"]
                ),
                outputs=batch_download_btn,
            )

print(f"Startup time: {time.time()-start_time:.1f}s\n")
demo.launch(debug=True, inbrowser=True, favicon_path="logo.ico")
