from fastapi import FastAPI, UploadFile, Form, File
from hivision import IDCreator
from hivision.error import FaceError
from hivision.creator.layout_calculator import (
    generate_layout_array,
    generate_layout_image,
)
from hivision.creator.choose_handler import choose_handler
from hivision.utils import (
    add_background,
    resize_image_to_kb,
    bytes_2_base64,
    base64_2_numpy,
    hex_to_rgb,
    add_watermark,
    save_image_dpi_to_bytes,
)
import numpy as np
import cv2
import os
import logging
import asyncio
import requests
from starlette.middleware.cors import CORSMiddleware
from starlette.formparsers import MultiPartParser
from tqdm import tqdm

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 模型下载配置
MODEL_CONFIGS = {
    "hivision_modnet": {
        "url": "https://github.com/Zeyi-Lin/HivisionIDPhotos/releases/download/pretrained-model/hivision_modnet.onnx",
        "format": "onnx",
        "location": "hivision/creator/weights",
    },
    "modnet_photographic_portrait_matting": {
        "url": "https://github.com/Zeyi-Lin/HivisionIDPhotos/releases/download/pretrained-model/modnet_photographic_portrait_matting.onnx",
        "format": "onnx",
        "location": "hivision/creator/weights",
    },
    "rmbg-1.4": {
        "url": "https://huggingface.co/briaai/RMBG-1.4/resolve/main/onnx/model.onnx?download=true",
        "format": "onnx",
        "location": "hivision/creator/weights",
    },
    "birefnet-v1-lite": {
        "url": "https://github.com/ZhengPeng7/BiRefNet/releases/download/v1/BiRefNet-general-bb_swin_v1_tiny-epoch_232.onnx",
        "format": "onnx",
        "location": "hivision/creator/weights",
    },
    "retinaface-resnet50": {
        "url": "https://github.com/Zeyi-Lin/HivisionIDPhotos/releases/download/pretrained-model/retinaface-resnet50.onnx",
        "format": "onnx",
        "location": "hivision/creator/retinaface/weights",
    },
}

# 设置Starlette表单字段大小限制
MultiPartParser.max_part_size = 10 * 1024 * 1024  # 10MB
# 设置Starlette文件上传大小限制
MultiPartParser.max_file_size = 20 * 1024 * 1024  # 20MB


class ModelManager:
    """模型管理器，负责检查和下载模型"""

    def __init__(self):
        self.base_path = os.path.dirname(os.path.abspath(__file__))

    def download_file(self, url, save_path):
        """下载文件"""
        try:
            logger.info(f"开始下载: {url}")
            response = requests.get(url, stream=True)
            response.raise_for_status()

            # 获取文件总大小
            total_size = int(response.headers.get("content-length", 0))

            # 创建目录
            os.makedirs(os.path.dirname(save_path), exist_ok=True)

            # 下载文件
            with open(save_path, "wb") as file, tqdm(
                total=total_size,
                unit="B",
                unit_scale=True,
                unit_divisor=1024,
                desc=os.path.basename(save_path),
            ) as bar:
                for chunk in response.iter_content(chunk_size=8192):
                    file.write(chunk)
                    bar.update(len(chunk))
            logger.info(f"下载完成: {save_path}")
            return True
        except Exception as e:
            logger.error(f"下载失败 {url}: {e}")
            return False

    def check_model_exists(self, model_name):
        """检查模型文件是否存在"""
        if model_name not in MODEL_CONFIGS:
            return False

        config = MODEL_CONFIGS[model_name]
        file_name = f"{model_name}.{config['format']}"
        model_path = os.path.join(self.base_path, config["location"], file_name)

        return os.path.exists(model_path) and os.path.getsize(model_path) > 0

    def download_model(self, model_name):
        """下载指定模型"""
        if model_name not in MODEL_CONFIGS:
            logger.error(f"未知模型: {model_name}")
            return False

        config = MODEL_CONFIGS[model_name]
        file_name = f"{model_name}.{config['format']}"
        model_path = os.path.join(self.base_path, config["location"], file_name)

        # 检查是否已经存在
        if self.check_model_exists(model_name):
            logger.info(f"模型 {model_name} 已存在，跳过下载")
            return True

        logger.info(f"开始下载模型: {model_name}")
        success = self.download_file(config["url"], model_path)

        if success:
            logger.info(f"模型 {model_name} 下载成功")
        else:
            logger.error(f"模型 {model_name} 下载失败")

        return success

    def download_all_models(self):
        """下载所有模型"""
        logger.info("开始下载所有模型...")
        success_count = 0
        total_count = len(MODEL_CONFIGS)

        for model_name in MODEL_CONFIGS.keys():
            if self.download_model(model_name):
                success_count += 1

        logger.info(f"模型下载完成: {success_count}/{total_count}")
        return success_count == total_count

    def get_missing_models(self, required_models):
        """获取缺失的模型列表"""
        missing_models = []
        for model_name in required_models:
            if not self.check_model_exists(model_name):
                missing_models.append(model_name)
        return missing_models

    async def ensure_models_available(self, required_models):
        """确保所需模型可用，必要时自动下载"""
        missing_models = self.get_missing_models(required_models)

        if not missing_models:
            logger.info("所有必需模型已存在")
            return True

        logger.warning(f"缺失模型: {missing_models}")

        # 自动下载缺失的模型
        for model_name in missing_models:
            logger.info(f"正在下载模型: {model_name}")
            loop = asyncio.get_event_loop()

            # 在新线程中执行下载，避免阻塞
            success = await loop.run_in_executor(None, self.download_model, model_name)

            if not success:
                logger.error(f"模型 {model_name} 下载失败")
                return False

        logger.info("所有必需模型下载完成")
        return True


# 全局模型管理器实例
model_manager = ModelManager()

app = FastAPI()
creator = IDCreator()

# 添加 CORS 中间件 解决跨域问题
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许的请求来源
    allow_credentials=True,  # 允许携带 Cookie
    allow_methods=[
        "*"
    ],  # 允许的请求方法，例如：GET, POST 等，也可以指定 ["GET", "POST"]
    allow_headers=["*"],  # 允许的请求头，也可以指定具体的头部
)


# 证件照智能制作接口
@app.post("/idphoto")
async def idphoto_inference(
    input_image: UploadFile = File(None),
    input_image_base64: str = Form(None),
    height: int = Form(413),
    width: int = Form(295),
    human_matting_model: str = Form("modnet_photographic_portrait_matting"),
    face_detect_model: str = Form("mtcnn"),
    hd: bool = Form(True),
    dpi: int = Form(300),
    face_align: bool = Form(False),
    whitening_strength: int = Form(0),
    head_measure_ratio: float = Form(0.2),
    head_height_ratio: float = Form(0.45),
    top_distance_max: float = Form(0.12),
    top_distance_min: float = Form(0.10),
    brightness_strength: float = Form(0),
    contrast_strength: float = Form(0),
    sharpen_strength: float = Form(0),
    saturation_strength: float = Form(0),
):
    # 验证输入图像
    if input_image_base64:
        img = base64_2_numpy(input_image_base64)
    else:
        image_bytes = await input_image.read()
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # 检查输入图像是否有效
    if img is None or img.size == 0:
        return {"status": False, "error": "Invalid input image"}

    # 确保所需模型可用
    required_models = [human_matting_model]
    if face_detect_model not in ["face++ (联网Online API)", "mtcnn"]:
        required_models.append(face_detect_model)

    models_available = await model_manager.ensure_models_available(required_models)
    if not models_available:
        return {"status": False, "error": "Failed to download required models"}

    # ------------------- 选择抠图与人脸检测模型 -------------------
    choose_handler(creator, human_matting_model, face_detect_model)

    # 将字符串转为元组
    size = (int(height), int(width))
    try:
        result = creator(
            img,
            size=size,
            head_measure_ratio=head_measure_ratio,
            head_height_ratio=head_height_ratio,
            head_top_range=(top_distance_max, top_distance_min),
            face_alignment=face_align,
            whitening_strength=whitening_strength,
            brightness_strength=brightness_strength,
            contrast_strength=contrast_strength,
            sharpen_strength=sharpen_strength,
            saturation_strength=saturation_strength,
        )
    except FaceError:
        result_message = {"status": False}
    # 如果检测到人脸数量等于1, 则返回标准证和高清照结果（png 4通道图像）
    else:
        result_image_standard_bytes = save_image_dpi_to_bytes(
            result.standard, None, dpi
        )

        result_message = {
            "status": True,
            "image_base64_standard": bytes_2_base64(result_image_standard_bytes),
        }

        # 如果hd为True, 则增加高清照结果（png 4通道图像）
        if hd:
            result_image_hd_bytes = save_image_dpi_to_bytes(result.hd, None, dpi)
            result_message["image_base64_hd"] = bytes_2_base64(result_image_hd_bytes)

    return result_message


# 人像抠图接口
@app.post("/human_matting")
async def human_matting_inference(
    input_image: UploadFile = File(None),
    input_image_base64: str = Form(None),
    human_matting_model: str = Form("hivision_modnet"),
    dpi: int = Form(300),
):
    # 验证输入图像
    if input_image_base64:
        img = base64_2_numpy(input_image_base64)
    else:
        image_bytes = await input_image.read()
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    # 检查输入图像是否有效
    if img is None or img.size == 0:
        return {"status": False, "error": "Invalid input image"}

    # 确保所需模型可用
    models_available = await model_manager.ensure_models_available(
        [human_matting_model]
    )
    if not models_available:
        return {"status": False, "error": "Failed to download required models"}

    # ------------------- 选择抠图与人脸检测模型 -------------------
    choose_handler(creator, human_matting_model, None)

    try:
        result = creator(
            img,
            change_bg_only=True,
        )
    except FaceError:
        result_message = {"status": False}

    else:
        result_image_standard_bytes = save_image_dpi_to_bytes(
            cv2.cvtColor(result.standard, cv2.COLOR_RGBA2BGRA), None, dpi
        )
        result_message = {
            "status": True,
            "image_base64": bytes_2_base64(result_image_standard_bytes),
        }
    return result_message


# 透明图像添加纯色背景接口
@app.post("/add_background")
async def photo_add_background(
    input_image: UploadFile = File(None),
    input_image_base64: str = Form(None),
    color: str = Form("000000"),
    kb: int = Form(None),
    dpi: int = Form(300),
    render: int = Form(0),
):
    render_choice = ["pure_color", "updown_gradient", "center_gradient"]

    # 验证输入图像
    if input_image_base64:
        img = base64_2_numpy(input_image_base64)
    else:
        image_bytes = await input_image.read()
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)

    # 检查输入图像是否有效
    if img is None or img.size == 0:
        return {"status": False, "error": "Invalid input image"}

    color = hex_to_rgb(color)
    color = (color[2], color[1], color[0])

    result_image = add_background(
        img,
        bgr=color,
        mode=render_choice[render],
    ).astype(np.uint8)

    result_image = cv2.cvtColor(result_image, cv2.COLOR_RGB2BGR)
    if kb:
        result_image_bytes = resize_image_to_kb(result_image, None, int(kb), dpi=dpi)
    else:
        result_image_bytes = save_image_dpi_to_bytes(result_image, None, dpi=dpi)

    result_messgae = {
        "status": True,
        "image_base64": bytes_2_base64(result_image_bytes),
    }

    return result_messgae


# 六寸排版照生成接口
@app.post("/generate_layout_photos")
async def generate_layout_photos(
    input_image: UploadFile = File(None),
    input_image_base64: str = Form(None),
    height: int = Form(413),
    width: int = Form(295),
    kb: int = Form(None),
    dpi: int = Form(300),
):
    # 验证输入图像
    if input_image_base64:
        img = base64_2_numpy(input_image_base64)
    else:
        image_bytes = await input_image.read()
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    # 检查输入图像是否有效
    if img is None or img.size == 0:
        return {"status": False, "error": "Invalid input image"}

    size = (int(height), int(width))

    typography_arr, typography_rotate = generate_layout_array(
        input_height=size[0], input_width=size[1]
    )

    result_layout_image = generate_layout_image(
        img, typography_arr, typography_rotate, height=size[0], width=size[1]
    ).astype(np.uint8)

    result_layout_image = cv2.cvtColor(result_layout_image, cv2.COLOR_RGB2BGR)
    if kb:
        result_layout_image_bytes = resize_image_to_kb(
            result_layout_image, None, int(kb), dpi=dpi
        )
    else:
        result_layout_image_bytes = save_image_dpi_to_bytes(
            result_layout_image, None, dpi=dpi
        )

    result_layout_image_base64 = bytes_2_base64(result_layout_image_bytes)

    result_messgae = {
        "status": True,
        "image_base64": result_layout_image_base64,
    }

    return result_messgae


# 透明图像添加水印接口
@app.post("/watermark")
async def watermark(
    input_image: UploadFile = File(None),
    input_image_base64: str = Form(None),
    text: str = Form("Hello"),
    size: int = 20,
    opacity: float = 0.5,
    angle: int = 30,
    color: str = "#000000",
    space: int = 25,
    kb: int = Form(None),
    dpi: int = Form(300),
):
    # 验证输入图像
    if input_image_base64:
        img = base64_2_numpy(input_image_base64)
    else:
        image_bytes = await input_image.read()
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    # 检查输入图像是否有效
    if img is None or img.size == 0:
        return {"status": False, "error": "Invalid input image"}

    try:
        result_image = add_watermark(img, text, size, opacity, angle, color, space)

        result_image = cv2.cvtColor(result_image, cv2.COLOR_RGB2BGR)
        if kb:
            result_image_bytes = resize_image_to_kb(
                result_image, None, int(kb), dpi=dpi
            )
        else:
            result_image_bytes = save_image_dpi_to_bytes(result_image, None, dpi=dpi)
        result_image_base64 = bytes_2_base64(result_image_bytes)

        result_messgae = {
            "status": True,
            "image_base64": result_image_base64,
        }
    except Exception as e:
        result_messgae = {
            "status": False,
            "error": str(e),
        }

    return result_messgae


# 设置照片KB值接口(RGB图)
@app.post("/set_kb")
async def set_kb(
    input_image: UploadFile = File(None),
    input_image_base64: str = Form(None),
    dpi: int = Form(300),
    kb: int = Form(50),
):
    # 验证输入图像
    if input_image_base64:
        img = base64_2_numpy(input_image_base64)
    else:
        image_bytes = await input_image.read()
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    # 检查输入图像是否有效
    if img is None or img.size == 0:
        return {"status": False, "error": "Invalid input image"}

    try:
        result_image = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        result_image_bytes = resize_image_to_kb(result_image, None, int(kb), dpi=dpi)
        result_image_base64 = bytes_2_base64(result_image_bytes)

        result_messgae = {
            "status": True,
            "image_base64": result_image_base64,
        }
    except Exception as e:
        result_messgae = {
            "status": False,
            "error": e,
        }

    return result_messgae


# 证件照智能裁剪接口
@app.post("/idphoto_crop")
async def idphoto_crop_inference(
    input_image: UploadFile = File(None),
    input_image_base64: str = Form(None),
    height: int = Form(413),
    width: int = Form(295),
    face_detect_model: str = Form("mtcnn"),
    hd: bool = Form(True),
    dpi: int = Form(300),
    head_measure_ratio: float = Form(0.2),
    head_height_ratio: float = Form(0.45),
    top_distance_max: float = Form(0.12),
    top_distance_min: float = Form(0.10),
):
    # 验证输入图像
    if input_image_base64:
        img = base64_2_numpy(input_image_base64)
    else:
        image_bytes = await input_image.read()
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)  # 读取图像(4通道)

    # 检查输入图像是否有效
    if img is None or img.size == 0:
        return {"status": False, "error": "Invalid input image"}

    # 确保所需模型可用（只需要人脸检测模型）
    required_models = []
    if face_detect_model not in ["face++ (联网Online API)", "mtcnn"]:
        required_models.append(face_detect_model)

    if required_models:
        models_available = await model_manager.ensure_models_available(required_models)
        if not models_available:
            return {"status": False, "error": "Failed to download required models"}

    # ------------------- 选择抠图与人脸检测模型 -------------------
    choose_handler(creator, face_detect_option=face_detect_model)

    # 将字符串转为元组
    size = (int(height), int(width))
    try:
        result = creator(
            img,
            size=size,
            head_measure_ratio=head_measure_ratio,
            head_height_ratio=head_height_ratio,
            head_top_range=(top_distance_max, top_distance_min),
            crop_only=True,
        )
    except FaceError:
        result_message = {"status": False}
    # 如果检测到人脸数量等于1, 则返回标准证和高清照结果（png 4通道图像）
    else:
        result_image_standard_bytes = save_image_dpi_to_bytes(
            cv2.cvtColor(result.standard, cv2.COLOR_RGBA2BGRA), None, dpi
        )

        result_message = {
            "status": True,
            "image_base64_standard": bytes_2_base64(result_image_standard_bytes),
        }

        # 如果hd为True, 则增加高清照结果（png 4通道图像）
        if hd:
            result_image_hd_bytes = save_image_dpi_to_bytes(
                cv2.cvtColor(result.hd, cv2.COLOR_RGBA2BGRA), None, dpi
            )
            result_message["image_base64_hd"] = bytes_2_base64(result_image_hd_bytes)

    return result_message


if __name__ == "__main__":
    import uvicorn

    # 在8080端口运行推理服务
    uvicorn.run(app, host="0.0.0.0", port=8080)
