from fastapi import APIRouter, UploadFile, File, HTTPException
from model import model, processor
import torchaudio
import scipy.io.wavfile
import numpy as np
from pydantic import BaseModel

router = APIRouter()

class S2STRequest(BaseModel):
    src_lang: str
    tgt_lang: str

@router.post("/translate")
async def speech_to_speech(file: UploadFile = File(...), request: S2STRequest = None):
    try:
        # Validate languages
        if not request.src_lang or not request.tgt_lang:
            raise HTTPException(status_code=400, detail="Source and target languages are required")

        # Check if model and processor are loaded
        if model is None or processor is None:
            raise HTTPException(status_code=503, detail="Model not initialized")

        # Read and process audio file
        audio_content = await file.read()
        with open("temp_audio.wav", "wb") as f:
            f.write(audio_content)
        
        audio, orig_freq = torchaudio.load("temp_audio.wav")
        audio = torchaudio.functional.resample(audio, orig_freq=orig_freq, new_freq=16_000)
        audio_inputs = processor(audios=audio, return_tensors="pt")

        # Generate translation
        audio_array = model.generate(**audio_inputs, tgt_lang=request.tgt_lang)[0].cpu().numpy().squeeze()
        
        # Save output audio
        output_path = "output_s2st.wav"
        scipy.io.wavfile.write(output_path, rate=model.config.sampling_rate, data=audio_array)

        return {"message": "Speech-to-speech translation completed", "output_file": output_path}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))