# Copyright (c) 2024-present AI-Labs

import json
import time

import requests

from comps import (
    Base64ByteStrDoc,
    CustomLogger,
    LLMParamsDoc,
    ServiceType,
    opea_microservices,
    register_microservice,
    register_statistics,
    statistics_dict,
)

from configs import config

logger = CustomLogger("opea_service@asr")
logflag = config.opea_service.asr.logs

"""
注册微服务
"""
@register_microservice(
    name="opea_service@asr",
    service_type=ServiceType.ASR,
    host=config.opea_service.asr.host,
    port=config.opea_service.asr.port,
    endpoint="/v1/audio/transcriptions",
    input_datatype=Base64ByteStrDoc,
    output_datatype=LLMParamsDoc,
)

# 微服务的具体处理逻辑
@register_statistics(names=["opea_service@asr"])
async def audio_to_text(audio: Base64ByteStrDoc):
    start = time.time()
    byte_str = audio.byte_str
    inputs = {"audio": byte_str}
    if logflag:
        logger.info(f"接收到用户请求：{inputs}")

    # 请求底层基础功能进行语音识别处理
    response = requests.post(url=f"{config.opea_service.asr.endpoint}/asr/funasr/v1", data=json.dumps(inputs), proxies={"http": None})

    # 统计耗时
    statistics_dict["opea_service@asr"].append_latency(time.time() - start, None)
    
    if logflag:
        logger.info(f"接收到处理结果：{response}")

    # 返回响应结果
    return LLMParamsDoc(query=response.json()["asr_result"])

"""
启动微服务
"""
def start():
    opea_microservices["opea_service@asr"].start()

if __name__ == "__main__":
    start()
