# Copyright (c) 2024-present AI-Labs

import time
import requests, json


from comps import (
    CustomLogger,
    SDInpaintingInputs,
    SDOutputs,
    ServiceType,
    opea_microservices,
    register_microservice,
    register_statistics,
    statistics_dict,
)

from configs import config

logger = CustomLogger("opea_service@inpainting")
logflag = config.opea_service.inpainting.logs

"""
注册微服务
"""
@register_microservice(
    name="opea_service@inpainting",
    service_type=ServiceType.IMAGE2IMAGE,
    host=config.opea_service.inpainting.host,
    port=config.opea_service.inpainting.port,
    endpoint="/v1/inpainting",
    input_datatype=SDInpaintingInputs,
    output_datatype=SDOutputs,
)

# 微服务的具体处理逻辑
@register_statistics(names=["opea_service@inpainting"])
def inpainting(input: SDInpaintingInputs):
    start = time.time()

    prompt = input.prompt
    init_image = input.image
    mask_image = input.mask_image
    num_images_per_prompt = input.num_images_per_prompt
    if logflag:
        logger.info(f"接收到用户请求：{prompt}")

    url = f"{config.opea_service.inpainting.endpoint}/image/stable_diffusion_xl/inpainting"

    headers = {'Content-Type': 'application/json; charset=utf-8'}
    data=json.dumps({'prompt': prompt, 'init_image': init_image, 'mask_image': mask_image, 'samples': num_images_per_prompt, 'output_format': 'base64'})

    # 请求底层基础功能进行图片处理
    response = requests.post(url=url, headers=headers, data=data)

    # 统计耗时
    statistics_dict["opea_service@inpainting"].append_latency(time.time() - start, None)

    if logflag:
        logger.info(f"接收到处理结果：{response}")

    # 返回响应结果
    return SDOutputs(images=[i["b64_json"] for i in response.json()["data"]])

"""
启动微服务
"""
def start():
    opea_microservices["opea_service@inpainting"].start()

if __name__ == "__main__":
    start()
