package com.yang.chattranscription.controller;

import jakarta.annotation.Resource;
import org.springframework.ai.openai.OpenAiAudioTranscriptionClient;
import org.springframework.ai.openai.OpenAiAudioTranscriptionOptions;
import org.springframework.ai.openai.api.OpenAiAudioApi;
import org.springframework.ai.openai.audio.transcription.AudioTranscriptionPrompt;
import org.springframework.ai.openai.audio.transcription.AudioTranscriptionResponse;
import org.springframework.core.io.ClassPathResource;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;


@RestController
public class TranscriptionController {
    @Resource
    private OpenAiAudioTranscriptionClient client;

    @GetMapping("/ai/transcript")
    public String transcription(){
        ClassPathResource resource = new ClassPathResource("半生雪_out.flac");
        OpenAiAudioTranscriptionOptions transcriptionOptions = OpenAiAudioTranscriptionOptions.builder()
                .withResponseFormat(OpenAiAudioApi.TranscriptResponseFormat.TEXT)
                .withTemperature(0f)
                .withModel("whisper-1")
                .build();
        AudioTranscriptionPrompt transcriptionRequest = new AudioTranscriptionPrompt(resource, transcriptionOptions);
        AudioTranscriptionResponse response = client.call(transcriptionRequest);
        System.out.print(response.getResult().getOutput());
        return response.getResult().getOutput();
    }
}
