# import time
# import json
# import base64
# from pathlib import Path
# from sanic import Websocket
# from sanic.log import logger

# from apps.base.hunter import AppError
# from modules.xfyun.auth import XFYunAuth
# from aiohttp import ClientSession, ClientTimeout, WSMsgType

# STATUS_FIRST_FRAME = 0  # 第一帧的标识
# STATUS_CONTINUE_FRAME = 1  # 中间帧标识
# STATUS_LAST_FRAME = 2  # 最后一帧的标识

# AUTH_URL = "wss://iat.xf-yun.com/v1"
# class SparkAuthExt(XFYunAuth):
#     """
#     用于扩展大模型专用的参数类型
#     """
#     def __init__(self, AudioFile: Path):
#         """
#         AudioFile: 语音文件保存路径
#         """
#         super().__init__(XFYun_URL=AUTH_URL)
#         self.AudioFile = AudioFile
#         self.iat_params = {
#             "domain": "slm",
#             "language": "zh_cn",
#             "accent": "mandarin",
#             "dwa": "wpgs",
#             "result": {"encoding": "utf8", "compress": "raw", "format": "plain"},
#         }
#         self.status = STATUS_FIRST_FRAME # 音频的状态信息，标识音频是第一帧，还是中间帧、最后一帧



# async def send_data(ws: Websocket, wsParam: SparkAuthExt):
#     """
#     把音频文件发送到spark
#     """
#     frameSize = 1280  # 每一帧的音频大小
#     intervel = 0.04  # 发送音频间隔(单位:s)

#     with open(wsParam.AudioFile, "rb") as fp:
#         while True:
#             buf = fp.read(frameSize)
#             audio = str(base64.b64encode(buf), "utf-8")
#             # 文件结束
#             if not buf:
#                 wsParam.status = STATUS_LAST_FRAME
#             # 第一帧处理
#             if wsParam.status == STATUS_FIRST_FRAME:
#                 d = {
#                     "header": {"status": 0, "app_id": wsParam.APP_ID},
#                     "parameter": {"iat": wsParam.iat_params},
#                     "payload": {
#                         "audio": {
#                             "audio": audio,
#                             "sample_rate": 16000,
#                             "encoding": "raw",
#                         }
#                     },
#                 }
#                 d = json.dumps(d)
#                 await ws.send_str(d)
#                 wsParam.status = STATUS_CONTINUE_FRAME
#             # 中间帧处理
#             elif wsParam.status == STATUS_CONTINUE_FRAME:
#                 d = {
#                     "header": {"status": 1, "app_id": wsParam.APP_ID},
#                     "parameter": {"iat": wsParam.iat_params},
#                     "payload": {
#                         "audio": {
#                             "audio": audio,
#                             "sample_rate": 16000,
#                             "encoding": "raw",
#                         }
#                     },
#                 }
#                 d = json.dumps(d)
#                 await ws.send_str(d)
#             # 最后一帧处理, *** 好像就没进入到最后一帧的逻辑, 奇奇怪怪 ***
#             elif wsParam.status == STATUS_LAST_FRAME:
#                 d = {
#                     "header": {"status": 2, "app_id": wsParam.APP_ID},
#                     "parameter": {"iat": wsParam.iat_params},
#                     "payload": {
#                         "audio": {
#                             "audio": audio,
#                             "sample_rate": 16000,
#                             "encoding": "raw",
#                         }
#                     },
#                 }
#                 d = json.dumps(d)
#                 await ws.send_str(d)
#                 break

#             # 模拟音频采样间隔
#             time.sleep(intervel)


# async def rcev_data(ws: Websocket, message: str) -> str:
#     """
#     解析spark的返回信息
#     """
#     message = json.loads(message)
#     code = message["header"]["code"]
#     status = message["header"]["status"]
#     if code != 0:
#         await ws.close()
#         raise AppError(10002, F"spark返回错误code:{code}")
#     else:
#         payload = message.get("payload")
#         if payload:
#             text = payload["result"]["text"]
#             text = json.loads(str(base64.b64decode(text), "utf8"))
#             text_ws = text['ws']
#             # print(text_ws)
#             result = ''
#             for i in text_ws:
#                 for j in i["cw"]:
#                     w = j["w"]
#                     result += w
#             return result
#         if status == 2:
#             await ws.close()
#             logger.info("spark连接已关闭.")


# async def process_file_and_send_to_spark(file_path, logger):
#     async with ClientSession(timeout=ClientTimeout(total=30)) as session:
#         # 初始化
#         wsParam = SparkAuthExt(AudioFile=file_path)
#         # 创建认证url
#         url = wsParam.create_url()

#         try:
#             async with session.ws_connect(url) as spark_ws:
#                 logger.info(f"data sending to spark: {file_path}")
#                 # 发送数据到spark
#                 await send_data(spark_ws, wsParam)

#                 received_data = ""
#                 # 接收数据
#                 async for msg in spark_ws:
#                     if msg.type == WSMsgType.TEXT:
#                         spark_data = await rcev_data(spark_ws, msg.data)
#                         # 数据不为空并且不是句号
#                         if spark_data and spark_data != "。":
#                             logger.info(f"spark data: {spark_data}")
#                             received_data += spark_data
#                             # 可选：如果需要，在此处处理接收到的数据或将其返回

#             return received_data

#         except Exception as e:
#             logger.error(f"Error processing file and sending to Spark: {e}")
#             return None

