# Copyright (c) 2024-present AI-Labs

import json
import time

import requests

from comps import (
    CustomLogger,
    VisionChatDoc,
    ServiceType,
    TextDoc,
    opea_microservices,
    register_microservice,
    register_statistics,
    statistics_dict,
)

from configs import config

logger = CustomLogger("opea_service@vlm")
logflag = config.opea_service.vlm.logs

"""
注册微服务
"""
@register_microservice(
    name="opea_service@vlm",
    service_type=ServiceType.VLM,
    host=config.opea_service.vlm.host,
    port=config.opea_service.vlm.port,
    endpoint="/v1/vision/chat",
    input_datatype=VisionChatDoc,
    output_datatype=TextDoc,
)

# 微服务的具体处理逻辑
@register_statistics(names=["opea_service@vlm"])
async def chat_to_media(input: VisionChatDoc):
    start = time.time()
    inputs = {
        "media": input.media,
        "media_type": input.media_type,
        "filename": input.filename,
        "text": input.text,
    }
    if logflag:
        logger.info(f"接收到用户请求：{inputs}")

    # 请求底层基础功能进行多模态理解
    response = requests.post(url=f"{config.opea_service.vlm.endpoint}/vision_language/qwen2vl/v1/chat", data=json.dumps(inputs), proxies={"http": None})
    
    # 统计耗时
    statistics_dict["opea_service@vlm"].append_latency(time.time() - start, None)

    if logflag:
        logger.info(f"接收到处理结果：{response}")

    # 返回响应结果
    return TextDoc(text=response.json()["text"])

"""
启动微服务
"""
def start():
    opea_microservices["opea_service@vlm"].start()

if __name__ == "__main__":
    start()
