import { NextRequest, NextResponse } from 'next/server'
import { getUserFromRequest } from '@/lib/auth'
import { prisma } from '@/lib/db'
import AliyunSenseVoiceService from '@/lib/aliyunSenseVoice'

export async function POST(request: NextRequest) {
  try {
    const user = await getUserFromRequest(request)
    
    if (!user) {
      return NextResponse.json(
        { error: '未授权访问' },
        { status: 401 }
      )
    }

    const body = await request.json()
    const { videoId, videoIds, audioUrls, languageHints = ['auto'], disfluencyRemovalEnabled = false } = body

    // 初始化SenseVoice服务
    const senseVoiceService = new AliyunSenseVoiceService()

    if (videoId) {
      // 单个视频语音识别
      return await handleSingleVideoTranscription(videoId, languageHints, disfluencyRemovalEnabled, senseVoiceService)
    } else if (videoIds && Array.isArray(videoIds)) {
      // 批量视频语音识别
      return await handleBatchVideoTranscription(videoIds, languageHints, disfluencyRemovalEnabled, senseVoiceService)
    } else if (audioUrls && Array.isArray(audioUrls)) {
      // 直接音频URL识别
      return await handleDirectAudioTranscription(audioUrls, languageHints, disfluencyRemovalEnabled, senseVoiceService)
    } else {
      return NextResponse.json(
        { error: '请提供 videoId、videoIds 或 audioUrls' },
        { status: 400 }
      )
    }

  } catch (error) {
    console.error('Transcription error:', error)
    return NextResponse.json(
      { error: '语音识别失败' },
      { status: 500 }
    )
  }
}

// 处理单个视频的语音识别
async function handleSingleVideoTranscription(
  videoId: number,
  languageHints: string[],
  disfluencyRemovalEnabled: boolean,
  senseVoiceService: AliyunSenseVoiceService
) {
  try {
    // 获取视频信息
    const video = await prisma.tikTokVideo.findUnique({
      where: { id: videoId }
    })

    if (!video) {
      return NextResponse.json(
        { error: '视频不存在' },
        { status: 404 }
      )
    }

    if (!video.audioUrl) {
      return NextResponse.json(
        { error: '该视频没有音频URL' },
        { status: 400 }
      )
    }

    // 检查是否已经有识别记录
    const existingExtraction = await prisma.textExtraction.findFirst({
      where: {
        videoId: videoId,
        method: 'aliyun_sensevoice',
        status: 'completed'
      }
    })

    if (existingExtraction) {
      return NextResponse.json({
        success: true,
        message: '该视频已有语音识别结果',
        extraction: existingExtraction
      })
    }

    // 创建识别任务记录
    const extraction = await prisma.textExtraction.create({
      data: {
        videoId: videoId,
        method: 'aliyun_sensevoice',
        status: 'processing',
        extractedText: '',
        confidence: 0
      }
    })

    try {
      // 执行语音识别
      const results = await senseVoiceService.transcribeAudio(
        [video.audioUrl],
        languageHints,
        disfluencyRemovalEnabled
      )

      const result = results[0]
      
      if (result.success && result.cleanText) {
        // 更新识别结果
        await prisma.textExtraction.update({
          where: { id: extraction.id },
          data: {
            status: 'completed',
            extractedText: result.cleanText,
            confidence: 0.95, // SenseVoice通常有较高的准确率
            language: senseVoiceService.detectLanguage(result.cleanText || '')
          }
        })

        return NextResponse.json({
          success: true,
          message: '语音识别完成',
          extraction: {
            id: extraction.id,
            extractedText: result.cleanText,
            confidence: 0.95,
            method: 'aliyun_sensevoice'
          }
        })
      } else {
        // 识别失败
        await prisma.textExtraction.update({
          where: { id: extraction.id },
          data: {
            status: 'failed'
          }
        })

        return NextResponse.json({
          success: false,
          error: result.error || '语音识别失败'
        }, { status: 400 })
      }
    } catch (transcriptionError) {
      // 更新任务状态为失败
      await prisma.textExtraction.update({
        where: { id: extraction.id },
        data: {
          status: 'failed'
        }
      })

      throw transcriptionError
    }

  } catch (error) {
    console.error('Single video transcription error:', error)
    return NextResponse.json({
      success: false,
      error: error instanceof Error ? error.message : '语音识别失败'
    }, { status: 500 })
  }
}

// 处理批量视频的语音识别
async function handleBatchVideoTranscription(
  videoIds: number[],
  languageHints: string[],
  disfluencyRemovalEnabled: boolean,
  senseVoiceService: AliyunSenseVoiceService
) {
  try {
    const results = []
    
    for (const videoId of videoIds) {
      try {
        const result = await handleSingleVideoTranscription(
          videoId,
          languageHints,
          disfluencyRemovalEnabled,
          senseVoiceService
        )
        
        const resultData = await result.json()
        results.push({
          videoId,
          success: resultData.success,
          extraction: resultData.extraction,
          error: resultData.error
        })
      } catch (error) {
        results.push({
          videoId,
          success: false,
          error: error instanceof Error ? error.message : 'Unknown error'
        })
      }
    }

    const successCount = results.filter(r => r.success).length
    const failedCount = results.length - successCount

    return NextResponse.json({
      success: true,
      message: `批量识别完成: ${successCount} 成功, ${failedCount} 失败`,
      results,
      summary: {
        total: results.length,
        success: successCount,
        failed: failedCount
      }
    })

  } catch (error) {
    console.error('Batch video transcription error:', error)
    return NextResponse.json({
      success: false,
      error: error instanceof Error ? error.message : '批量语音识别失败'
    }, { status: 500 })
  }
}

// 处理直接音频URL的语音识别
async function handleDirectAudioTranscription(
  audioUrls: string[],
  languageHints: string[],
  disfluencyRemovalEnabled: boolean,
  senseVoiceService: AliyunSenseVoiceService
) {
  try {
    // 执行语音识别
    const results = await senseVoiceService.transcribeAudio(
      audioUrls,
      languageHints,
      disfluencyRemovalEnabled
    )

    return NextResponse.json({
      success: true,
      message: '语音识别完成',
      results: results.map(result => ({
        fileUrl: result.fileUrl,
        success: result.success,
        text: result.cleanText,
        originalText: result.text,
        duration: result.duration,
        error: result.error
      }))
    })

  } catch (error) {
    console.error('Direct audio transcription error:', error)
    return NextResponse.json({
      success: false,
      error: error instanceof Error ? error.message : '语音识别失败'
    }, { status: 500 })
  }
}
