from fastapi import APIRouter, UploadFile, File, HTTPException
from model import model, processor
import torchaudio
from pydantic import BaseModel

router = APIRouter()

class ASRRequest(BaseModel):
    src_lang: str

@router.post("/recognize")
async def automatic_speech_recognition(file: UploadFile = File(...), request: ASRRequest = None):
    try:
        # Validate language
        if not request.src_lang:
            raise HTTPException(status_code=400, detail="Source language is required")

        # Check if model and processor are loaded
        if model is None or processor is None:
            raise HTTPException(status_code=503, detail="Model not initialized")

        # Read and process audio file
        audio_content = await file.read()
        with open("temp_audio.wav", "wb") as f:
            f.write(audio_content)
        
        audio, orig_freq = torchaudio.load("temp_audio.wav")
        audio = torchaudio.functional.resample(audio, orig_freq=orig_freq, new_freq=16_000)
        audio_inputs = processor(audios=audio, return_tensors="pt")

        # Generate text transcription
        output_tokens = model.generate(**audio_inputs, tgt_lang=request.src_lang, generate_speech=False)
        transcribed_text = processor.decode(output_tokens[0].tolist(), skip_special_tokens=True)

        return {"transcribed_text": transcribed_text}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))