from fastapi import Request, APIRouter, UploadFile, File, Form, WebSocket, Cookie, status, Query,Depends, WebSocketException, WebSocketDisconnect, Depends
from fastapi.responses import JSONResponse, Response
from fastapi.encoders import jsonable_encoder
from starlette.datastructures import FormData
from ai import ModelMap, NlpAbsModel, VitsAbsModel, ASRTranslate
from services import AiModelCache, save_wav_tourl, chat_stream_txt, tts_stream
from services.storage import BaseStorage
from services.deps import get_current_user
from services.utils import calc_timeLine
from api import WebSocketConnectionManager
from models.stream import StreamChatConfig, StreamSpeechConfig, StreamRecognitionConfig
from models.core import SystemUser
import numpy as np
import shutil
import logging
import uuid
from typing import Annotated
from io import BytesIO
router = APIRouter()

logger = logging.getLogger(__name__)
ws_pool = WebSocketConnectionManager()

@router.post("/translation/{model}")
async def translation(model: str, request: Request, user: SystemUser = Depends(get_current_user)) -> Response:
    # 翻译/nllb|m2m100
    # 输出语种, 提示词
    fd: FormData = await request.form()
    prompt = fd.get("prompt")
    # English|Thai|Arabic
    language = fd.get("language")
    logger.info("task:translation, post form prompt:%s, language:%s", prompt, language)
    model_dir = ModelMap.get(model, "facebook/nllb-200-distilled-600M")
    # ---------------------------------------------------------------
    cacheModel: AiModelCache = request.app.state.cache
    modelIns: NlpAbsModel = await cacheModel.get_nlpAbsModel(model_dir, model)
    # ---------------------------------------------------------------
    content = jsonable_encoder({"result": modelIns.translation(prompt, language), "language": language})
    return JSONResponse(content)

@router.post("/translation/batch/{model}")
async def batchTranslation(model: str, request: Request, user: SystemUser = Depends(get_current_user)) -> Response:
    fd: FormData = await request.form()
    # 用逗号分隔
    prompts = fd.get("content")
    # English|Thai|Arabic
    language = fd.get("language")
    logger.info("task:batch translation, post form prompt:%s, language:%s", prompts, language)
    model_dir = ModelMap.get(model, "facebook/nllb-200-distilled-600M")
    # ---------------------------------------------------------------
    cacheModel: AiModelCache = request.app.state.cache
    modelIns: NlpAbsModel = await cacheModel.get_nlpAbsModel(model_dir, model)
    # ---------------------------------------------------------------
    # { result: [
    #     {prompt:x, result:'?'},
    #     {prompt:x, result:'?'},
    #     ...
    #   ],
    #   language: '?'
    # }
    promptList:list[str] = prompts.split(',')
    resultList:list[str] = modelIns.batch_translation(promptList, language)
    if len(promptList) == len(resultList):
        rsList:list[dict] = [{'prompt': prompt, 'result': result} for prompt, result in zip(promptList, resultList)]
        content:dict = {"result": rsList, "language": language}
    else:
        content:dict = {'result':[], "language": language}
    
    return JSONResponse(jsonable_encoder(content))

@router.post("/speech")
async def speech(request: Request, user: SystemUser = Depends(get_current_user)) -> Response:
    # TTS/tts-{nod|eng|tha}
    # 输出语种, 提示词
    fd: FormData = await request.form()
    prompt = fd.get("prompt")
    # English|Thai|Arabic
    language = fd.get("language")
    logger.info("task:speech, post form prompt:%s, language:%s", prompt, language)
    model_dir = ModelMap.get(f"mms-{language}", "facebook/mms-tts-eng")
    # ---------------------------------------------------------------
    cacheModel: AiModelCache = request.app.state.cache
    modelIns: VitsAbsModel = await cacheModel.get_vitsAbsModel(model_dir, "mms")
    # ---------------------------------------------------------------
    nyArr: np.ndarray = modelIns.generateNpArr(prompt)
    #
    storage: BaseStorage = request.app.state.storage
    wav_url, filename = save_wav_tourl(nyArr, storage)
    #
    content = jsonable_encoder({"text": prompt, "language": language, "path": wav_url, "filename": filename})
    return JSONResponse(content)

@router.post("/recognition")
async def automaticSpeechRecognition(request: Request, audio: UploadFile = File(...), model:str=Form(...), user: SystemUser = Depends(get_current_user))-> Response:
    # ASR/whisper
    # 语音识别
    if not audio:
        return {"message": "No upload file sent"}
    
    logger.info("task:asr, upload form file:%s, size:%s", audio.filename, audio.size)
    model_dir = ModelMap.get(model or 'faster-whisper', 'Systran/faster-whisper-large-v3')
    #
    cache_path = f"voice/tmp_{audio.filename}"
    with open(cache_path, 'w+b') as file:
        shutil.copyfileobj(audio.file, file)
    # ---------------------------------------------------------------
    cacheModel: AiModelCache = request.app.state.cache
    modelIns: ASRTranslate = await cacheModel.get_asrAbsModel(model_dir, model)
    # ---------------------------------------------------------------
    result = modelIns.transcribe(cache_path)
    return {"result": result}

@router.post("/chat/create")
async def createStreamChatRoom(request: Request, user: SystemUser = Depends(get_current_user))-> Response:
    fd: FormData = await request.form()
    language:str = fd.get('language', 'English')
    translation:str = fd.get('translation', 'nllb')
    speech:str = fd.get('speech', 'mms')
    expire:int = int(fd.get('ttl', '3600'))
    # 返回一下token
    token = str(uuid.uuid4())
    sc = StreamChatConfig(
        language=language,
        translation=translation,
        speech=speech,
        expire=expire,
        nlp_model_dir=ModelMap.get(translation, "facebook/nllb-200-distilled-600M"),
        tts_model_dir=ModelMap.get(f"mms-{language}", "facebook/mms-tts-eng"),
        token=token)
    cacheModel: AiModelCache = request.app.state.cache
    await cacheModel.get_cache_client().set_key(token, sc, expire)
    return {'result': token, 'expire': expire}

@router.post("/speech/create")
async def createStreamSpeechPipe(request: Request, user: SystemUser = Depends(get_current_user))-> Response:
    fd: FormData = await request.form()
    language:str = fd.get('language', 'English')
    speech:str = fd.get('model', 'mms')
    expire:int = int(fd.get('ttl', '3600'))
    #
    sampleRate:int = int(fd.get('rate', '16000'))
    audioChannel:int = int(fd.get('channel', '1'))
    # 返回一下token
    token = str(uuid.uuid4())
    sc = StreamSpeechConfig(
        language=language,
        model=speech,
        expire=expire,
        sample_rate=sampleRate,
        channel=audioChannel,
        model_dir=ModelMap.get(f"mms-{language}", "facebook/mms-tts-eng"),
        token=token)
    cacheModel: AiModelCache = request.app.state.cache
    await cacheModel.get_cache_client().set_key(token, sc, expire)
    return {'result': token, 'expire': expire}

@router.post("/recognition/create")
async def createStreamRecognitionPipe(request: Request, user: SystemUser = Depends(get_current_user))-> Response:
    fd: FormData = await request.form()
    model:str = fd.get('model', 'faster-whisper')
    expire:int = int(fd.get('ttl', '3600'))
    date_type = fd.get('type', 'wav')
    # 返回一下token
    token = str(uuid.uuid4())
    sc = StreamRecognitionConfig(
        model=model,
        expire=expire,
        date_type=date_type,
        model_dir=ModelMap.get(model, "Systran/faster-whisper-large-v3"),
        token=token)
    cacheModel: AiModelCache = request.app.state.cache
    await cacheModel.get_cache_client().set_key(token, sc, expire)
    return {'result': token, 'expire': expire}

# websocket token deps
async def get_cookie_or_token(websocket: WebSocket, session: Annotated[str | None, Cookie()] = None, token: Annotated[str | None, Query()] = None, user: SystemUser = Depends(get_current_user)):
    if user is None:
        raise WebSocketException(code=status.WS_1008_POLICY_VIOLATION, reason='lost authorized ')
    if token is None and session is None:
        raise WebSocketException(code=status.WS_1008_POLICY_VIOLATION)
    return token or session

@router.websocket("/stream/chat")
async def chatRoomStream(websocket: WebSocket, cookie_or_token: Annotated[str, Depends(get_cookie_or_token)],)->Response:
    # 流式: 文本翻译+语音生成(wav url)
    # prompt, language=english, translation=nllb, speech=mms
    # {"text": 翻译后的文件, "language": 翻译后的语种, "path": tts后的语音路径}
    # ---------------------------------------------------------------
    storage: BaseStorage = websocket.app.state.storage
    cacheModel: AiModelCache = websocket.app.state.cache
    sc: StreamChatConfig = await cacheModel.get_cache_client().get_key(cookie_or_token)
    nlpModel: NlpAbsModel = await cacheModel.get_nlpAbsModel(sc.nlp_model_dir, sc.translation)
    ttsModel: VitsAbsModel = await cacheModel.get_vitsAbsModel(sc.tts_model_dir, sc.speech)
    # ---------------------------------------------------------------
    language:str = sc.language
    await ws_pool.connect(websocket)
    try:
        while True:
            prompt = await websocket.receive_text()
            if await cacheModel.get_cache_client().exist_key(cookie_or_token):
                res_content:dict = chat_stream_txt(prompt, language, nlpModel, ttsModel, storage)
                res_content['status'] = 'connected'
            else:
                res_content:dict = {'language': language, 'status': 'disconnected'}

            logger.info("task:stream.chat, send json:%s, rev word:%s", res_content, prompt)
            await websocket.send_json(res_content)
    except WebSocketDisconnect:
        ws_pool.disconnect(websocket)
        await ws_pool.broadcast(f"connection #{cookie_or_token} discontinue")

@router.websocket("/stream/speech")
async def speechPipeStream(websocket: WebSocket, cookie_or_token: Annotated[str, Depends(get_cookie_or_token)],)->Response:
    # 流式: 文本到语音(bytes):
    # ---------------------------------------------------------------
    cacheModel: AiModelCache = websocket.app.state.cache
    sc: StreamSpeechConfig = await cacheModel.get_cache_client().get_key(cookie_or_token)
    ttsModel: VitsAbsModel = await cacheModel.get_vitsAbsModel(sc.model_dir, sc.model)
    # ---------------------------------------------------------------
    await ws_pool.connect(websocket)
    try:
        while True:
            prompt = await websocket.receive_text()
            byData: bytes = b''
            if await cacheModel.get_cache_client().exist_key(cookie_or_token):
                byData = tts_stream(prompt, ttsModel)

            logger.info("task:stream.speech, send byte length:%d, rev word:%s", len(byData), prompt)
            await websocket.send_bytes(byData)
    except WebSocketDisconnect:
        ws_pool.disconnect(websocket)
        await ws_pool.broadcast(f"connection #{cookie_or_token} discontinue")

@router.websocket("/stream/recognition") 
async def recognitionPipStream(websocket: WebSocket, cookie_or_token: Annotated[str, Depends(get_cookie_or_token)],)->Response:
    # 流式: 语音识别(bytes):
    # ---------------------------------------------------------------
    cacheModel: AiModelCache = websocket.app.state.cache
    sc:StreamRecognitionConfig = await cacheModel.get_cache_client().get_key(cookie_or_token)
    modelIns: ASRTranslate = await cacheModel.get_asrAbsModel(sc.model_dir, sc.model)
    # ---------------------------------------------------------------
    await ws_pool.connect(websocket)
    try:
        while True:
            
            bytesData = await websocket.receive_bytes()
            sendict: dict = {'status': 'disconnected', 'result': ''}
            if await cacheModel.get_cache_client().exist_key(cookie_or_token):
                #wav file的bytes
                if sc.date_type == 'wav':
                    audio_buffer: BytesIO = BytesIO()
                    audio_buffer.write(bytesData)
                    audio_buffer.seek(0)
                    words = modelIns.transcribe_bytes(audio_buffer)
                    sendict = {'status': 'connected', 'result': words}
                #dtype=float32 数组的bytes
                elif sc.date_type == 'numpy':
                    np_audio_arr = np.frombuffer(bytesData, dtype=np.float32)
                    words = modelIns.transcribe_array(np_audio_arr)
                    sendict = {'status': 'connected', 'result': words}
                
            await websocket.send_json(sendict)
    except WebSocketDisconnect:
        ws_pool.disconnect(websocket)
        await ws_pool.broadcast(f"connection #{cookie_or_token} discontinue")