<template>
  <div>
    <h1>MP3 to Text Conversion</h1>
    <input type="file" @change="handleFileUpload" accept=".mp3" />
    <p v-if="transcript">Transcript: {{ transcript }}</p>
  </div>
</template>

<script>
import { createFFmpeg, fetchFile } from '@ffmpeg/ffmpeg';

export default {
  data() {
    return {
      transcript: '',
      ffmpeg: null,
    };
  },
  async created() {
    // 加载FFmpeg
    this.ffmpeg = createFFmpeg({ log: true });
    await this.ffmpeg.load();
  },
  methods: {
    async handleFileUpload(event) {
      const file = event.target.files[0];
      if (!file) return;

      // Convert MP3 to WAV using FFmpeg
      await this.convertMp3ToWav(file);
    },
    async convertMp3ToWav(file) {
      const ffmpeg = this.ffmpeg;
      ffmpeg.FS('writeFile', 'input.mp3', await fetchFile(file));
      await ffmpeg.run('-i', 'input.mp3', 'output.wav');
      const data = ffmpeg.FS('readFile', 'output.wav');

      // Convert WAV file to Blob
      const wavBlob = new Blob([data.buffer], { type: 'audio/wav' });
      this.transcribeAudio(wavBlob);
    },
    transcribeAudio(blob) {
      const recognizer = new webkitSpeechRecognition();
      recognizer.lang = 'en-US';
      recognizer.continuous = false;
      recognizer.interimResults = false;

      recognizer.onresult = (event) => {
        this.transcript = event.results[0][0].transcript;
      };

      recognizer.onerror = (event) => {
        console.error('Recognition error:', event.error);
      };

      // 使用FileReader读取Blob并开始识别
      const reader = new FileReader();
      reader.onload = (e) => {
        const audioContext = new (window.AudioContext || window.webkitAudioContext)();
        audioContext.decodeAudioData(e.target.result, (buffer) => {
          const source = audioContext.createBufferSource();
          source.buffer = buffer;
          source.connect(audioContext.destination);
          source.start(0);

          // 创建MediaStream并将其传递给SpeechRecognition
          const dest = audioContext.createMediaStreamDestination();
          source.connect(dest);
          const audioTrack = dest.stream.getAudioTracks()[0];
          const speechStream = new MediaStream([audioTrack]);

          recognizer.onaudiostart = () => {
            console.log('Audio started');
          };

          recognizer.onaudioend = () => {
            console.log('Audio ended');
          };

          recognizer.start(speechStream);
        });
      };

      reader.readAsArrayBuffer(blob);
    }
  }
};
</script>
