import { getRandomEnabledApiKey } from '../dao/apiKeys'
import fs, { promises as fsPromises } from 'fs'
import path from 'path'
import { app } from 'electron'
import { TextToSpeechClient } from '@google-cloud/text-to-speech'
import { SpeechClient } from '@google-cloud/speech'
import OpenAI from 'openai'
import { getAudioDuration } from '../util/ffmpeg'
import { getProxyConfig } from '../dao/proxyConfig'

process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'

// 语言代码映射
const LANGUAGE_MAP = {
  zh: 'cmn-Hans-CN',
  'cmn-CN': 'cmn-Hans-CN',
  Chinese: 'cmn-Hans-CN',
  en: 'en-US',
  'en-US': 'en-US',
  English: 'en-US',
  vi: 'vi-VN',
  'vi-VN': 'vi-VN',
  Vietnamese: 'vi-VN',
  th: 'th-TH',
  'th-TH': 'th-TH',
  Thai: 'th-TH'
}

const keyFilename = {
  'development-win32': path.join(__dirname, '../../resources/demo_text_to_speech.json'),
  'production-win32': path.join(
    process.resourcesPath,
    'app.asar.unpacked',
    'resources',
    'demo_text_to_speech.json'
  )
}

const keyFilenamePathValue = keyFilename[`${process.env.NODE_ENV}-${process.platform}`]

const client = new TextToSpeechClient({
  keyFilename: keyFilenamePathValue
})

// 初始化Google Speech客户端
const speechClient = new SpeechClient({
  keyFilename: keyFilenamePathValue
})

// 获取应用数据目录，用于保存合成的音频文件
const getSynthesizedAudioDir = () => {
  const userDataPath = app.getPath('userData')
  const audioDir = path.join(userDataPath, 'synthesized_audio')
  const captionDir = path.join(userDataPath, 'synthesized_caption')

  // 确保目录存在
  if (!fs.existsSync(audioDir)) {
    fs.mkdirSync(audioDir, { recursive: true })
  }

  // 确保目录存在
  if (!fs.existsSync(captionDir)) {
    fs.mkdirSync(captionDir, { recursive: true })
  }

  return audioDir
}

const getSynthesizedCaptionDir = () => {
  const userDataPath = app.getPath('userData')
  const captionDir = path.join(userDataPath, 'synthesized_caption')

  // 确保目录存在
  if (!fs.existsSync(captionDir)) {
    fs.mkdirSync(captionDir, { recursive: true })
  }

  return captionDir
}

/**
 * 生成语音方法
 * @returns {Promise<{path: string}>}
 */
export async function google_textToSpeech(
  text,
  languageCode = 'zh-CN',
  ssmlGender = 'NEUTRAL',
  name,
  speakingRate = 1.0
) {
  const request = {
    input: { text },
    voice: {
      languageCode: languageCode,
      ssmlGender: ssmlGender,
      name: name
    },
    audioConfig: {
      audioEncoding: 'MP3',
      speakingRate: speakingRate
    }
  }
  try {
    const [response] = await client.synthesizeSpeech(request)
    // 生成唯一文件名
    const timestamp = Date.now()
    const randomNum = Math.floor(1000 + Math.random() * 9000)
    const fileName = `speech_${timestamp}_${randomNum}.mp3`
    let filePath
    let duration
    filePath = path.join(getSynthesizedAudioDir(), fileName)

    await fsPromises.writeFile(filePath, response.audioContent, 'binary')

    console.log(`音频内容已写入文件：${fileName}`)

    // 估算音频时长
    duration = await getAudioDuration(filePath)

    return {
      path: filePath,
      duration: duration
    }
  } catch (err) {
    console.error(`发生错误：${err}`)
    throw new Error(`音频生成失败: ${err}`)
  }
}

/**
 * 列出语言列表
 * @returns {Promise<{}>}
 */
export async function listVoices() {
  // [START tts_list_voices]
  const [result] = await client.listVoices({})
  const targetLanguages = ['th-TH', 'vi-VN', 'en-US', 'cmn-CN']

  const voices = result.voices

  // 过滤并分类
  const groupedVoices = {}

  // 先筛选目标语言支持的 voices
  const filteredVoices = voices.filter((voice) =>
    voice.languageCodes.some((code) => targetLanguages.includes(code))
  )

  // 按语言和性别分类
  filteredVoices.forEach((voice) => {
    voice.languageCodes.forEach((lang) => {
      if (!targetLanguages.includes(lang)) return

      if (!groupedVoices[lang]) {
        groupedVoices[lang] = {}
      }

      const gender = voice.ssmlGender
      if (!groupedVoices[lang][gender]) {
        groupedVoices[lang][gender] = []
      }

      groupedVoices[lang][gender].push(voice)
    })
  })

  return groupedVoices
}

/**
 * 提取原始文案中的关键词和专业术语
 * @param {string} scriptContent - 原始文案内容
 * @returns {Promise<Array>} 关键词数组
 */
async function extractKeyTerms(scriptContent) {
  if (!scriptContent) return []

  // 从数据库获取随机启用的API密钥
  const apiKeyData = getRandomEnabledApiKey()

  if (!apiKeyData) {
    throw new Error('没有启用的API密钥，请在API密钥管理页面添加并启用至少一个密钥')
  }

  // 获取代理配置
  const proxyConfig = getProxyConfig();

  // 初始化OpenAI客户端
  const openai = new OpenAI({
    apiKey: apiKeyData.key,
    baseURL: proxyConfig.proxy_url
  })

  try {
    const prompt = `请从以下文案中提取关键词、专业术语、人名、地名、品牌名等重要词汇，每行一个词汇：\n\n${scriptContent}`

    const response = await openai.chat.completions.create({
      model: 'gpt-4o-mini',
      messages: [
        { role: 'system', content: '你是一个专业的文本分析助手，善于提取文本中的关键信息。' },
        { role: 'user', content: prompt }
      ]
    })

    return response.choices[0].message.content
      .split('\n')
      .map((term) => term.trim())
      .filter((term) => term.length > 0)
  } catch (error) {
    console.error('提取关键词失败:', error)
    return []
  }
}

/**
 * 根据音频文件扩展名获取编码格式和采样率
 * @param {string} audioFilePath - 音频文件路径
 * @returns {Object} 包含编码格式和采样率的对象
 */
function getAudioConfig(audioFilePath) {
  const ext = path.extname(audioFilePath).toLowerCase()

  switch (ext) {
    case '.mp3':
      return {
        encoding: 'MP3',
        sampleRateHertz: null // MP3不需要指定采样率，API会自动检测
      }
    case '.wav':
      return {
        encoding: 'LINEAR16',
        sampleRateHertz: 24000 // WAV通常使用16kHz
      }
    case '.flac':
      return {
        encoding: 'FLAC',
        sampleRateHertz: null // FLAC不需要指定采样率
      }
    case '.ogg':
      return {
        encoding: 'OGG_OPUS',
        sampleRateHertz: null // OGG不需要指定采样率
      }
    case '.webm':
      return {
        encoding: 'WEBM_OPUS',
        sampleRateHertz: null // WEBM不需要指定采样率
      }
    default:
      console.warn(`未知音频格式: ${ext}，使用默认LINEAR16编码`)
      return {
        encoding: 'LINEAR16',
        sampleRateHertz: 24000
      }
  }
}

/**
 * 使用Google Cloud Speech-to-Text进行音频转录
 * @param {string} audioFilePath - 音频文件路径
 * @param {string} language - 语言代码 (zh, en, ja, ko, vi, th)
 * @param {null} scriptContent - 原始文案内容（可选，用于提高准确性）
 * @returns {Promise<Object>} 转录结果
 */
async function transcribeWithGoogle(audioFilePath, language, scriptContent) {
  try {
    // 读取音频文件
    const audioBytes = await fsPromises.readFile(audioFilePath)

    // 根据文件扩展名获取音频配置
    const audioConfig = getAudioConfig(audioFilePath)

    // 获取采样率（这里简化处理，实际可能需要使用音频库获取）
    const sampleRateHertz = 24000 // 默认采样率，实际应该从音频文件中提取

    console.log(`开始使用 Google Cloud Speech-to-Text 转录音频...`)
    console.log(`检测到音频文件: ${path.basename(audioFilePath)}`)

    // 配置转录请求的基础设置
    const config = {
      encoding: audioConfig.encoding, // 根据音频文件格式调整
      sampleRateHertz: sampleRateHertz,
      languageCode: language,
      enableWordTimeOffsets: true,
      enableAutomaticPunctuation: true, // 自动标点
      useEnhanced: true,
      model: 'default'
    }

    // 如果提供了原始文案，提取关键词作为提示
    if (scriptContent) {
      console.log('使用原始文案提取关键词以提高转录准确性...')
      const keyTerms = await extractKeyTerms(scriptContent)

      if (keyTerms.length > 0) {
        // 创建语音适应（Speech Adaptation）配置
        config.adaptation = {
          phraseSets: [
            {
              phrases: keyTerms.map((term) => ({ value: term, boost: 10 }))
            }
          ]
        }
        console.log(`添加了 ${keyTerms.length} 个关键词提示`)
      }
    }

    const request = {
      audio: { content: audioBytes },
      config: config
    }

    // 执行长时间运行的转录
    const [operation] = await speechClient.longRunningRecognize(request)
    console.log('等待转录完成...')
    const [response] = await operation.promise()

    return response
  } catch (error) {
    console.error('Google Speech-to-Text 转录失败:', error)
    // 提供更详细的错误信息和建议
    if (error.message.includes('encoding')) {
      console.error('音频编码错误建议:')
      console.error('- 确保音频文件格式正确')
      console.error('- 支持的格式: MP3, WAV, FLAC, OGG, WEBM')
      console.error('- 当前检测到的格式:', getAudioConfig(audioFilePath).encoding)
    }
    throw error
  }
}

/**
 * 获取支持的音频格式列表
 * @returns {Object} 支持的音频格式信息
 */
export function getSupportedAudioFormats() {
  return {
    '.mp3': { encoding: 'MP3', description: 'MPEG Audio Layer III' },
    '.wav': { encoding: 'LINEAR16', description: 'Waveform Audio File Format' },
    '.flac': { encoding: 'FLAC', description: 'Free Lossless Audio Codec' },
    '.ogg': { encoding: 'OGG_OPUS', description: 'OGG Opus' },
    '.webm': { encoding: 'WEBM_OPUS', description: 'WebM Opus' }
  }
}

/**
 * 验证音频文件格式
 * @param {string} audioFilePath - 音频文件路径
 * @returns {boolean} 是否为支持的格式
 */
export function validateAudioFormat(audioFilePath) {
  const supportedFormats = getSupportedAudioFormats()
  const ext = path.extname(audioFilePath).toLowerCase()
  return ext in supportedFormats
}

// async function convertSpeechToSRT(googleResult, originalText) {
//   if (!googleResult.results || !googleResult.results[0] || !googleResult.results[0].alternatives) {
//     throw new Error('无效的Google Speech-to-Text结果格式')
//   }
//
//   const words = googleResult.results.flatMap(result =>
//     result.alternatives.flatMap(alt => alt.words || [])
//   )
//   // log.log(JSON.stringify(googleResult))
//   const param = {
//     correct_text: originalText,
//     words
//   }
//   return await convertSrt(param)
// }

/**
 * 将时间对象转换为秒数
 * @param {Object} timeObj - 包含seconds和nanos的时间对象
 * @returns {number} 秒数
 */
function parseTime(timeObj) {
  const seconds = parseInt(timeObj.seconds || 0);
  const nanos = parseInt(timeObj.nanos || 0);
  return seconds + nanos / 1000000000;
}

/**
 * 将秒数格式化为SRT时间格式
 * @param {number} seconds - 秒数
 * @returns {string} SRT时间格式 HH:MM:SS,mmm
 */
function formatSrtTime(seconds) {
  const hours = Math.floor(seconds / 3600);
  const minutes = Math.floor((seconds % 3600) / 60);
  const secs = Math.floor(seconds % 60);
  const milliseconds = Math.floor((seconds % 1) * 1000);

  return `${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')},${milliseconds.toString().padStart(3, '0')}`;
}

/**
 * 将Google Speech-to-Text结果转换为SRT格式
 * @param {Object} googleResult - Google API返回的结果
 * @param {number} wordsPerSubtitle - 每个字幕的最大单词数
 * @param {number} maxDuration - 每个字幕的最大持续时间（秒）
 * @returns {string} SRT格式的字符串
 */
function convertToSrt(googleResult, wordsPerSubtitle = 8, maxDuration = 3.0) {
  if (!googleResult.results || !googleResult.results[0] || !googleResult.results[0].alternatives) {
    throw new Error('无效的Google Speech-to-Text结果格式');
  }

  // console.log(JSON.stringify(googleResult));

  const words = googleResult.results[0].alternatives[0].words || [];
  const subtitles = [];
  let currentSubtitle = [];
  let currentStart = null;

  for (const word of words) {
    const wordText = word.word;
    const wordStart = parseTime(word.startTime);
    const wordEnd = parseTime(word.endTime);

    // 开始新字幕
    if (currentStart === null) {
      currentStart = wordStart;
    }

    // 检查是否需要创建新字幕
    const duration = wordEnd - currentStart;

    // 如果满足以下条件之一，创建新字幕：
    // 1. 单词数达到上限
    // 2. 持续时间过长
    // 3. 遇到标点符号
    if (currentSubtitle.length >= wordsPerSubtitle ||
      duration > maxDuration ||
      wordText.match(/[。！？，.!?,]/)) {

      if (currentSubtitle.length > 0) {
        // 如果当前词不是标点，先添加到当前字幕
        if (!wordText.match(/[。！？，.!?,]/)) {
          currentSubtitle.push(wordText);
          subtitles.push({
            start: currentStart,
            end: wordEnd,
            text: currentSubtitle.join(' ')
          });
          currentSubtitle = [];
          currentStart = null;
        } else {
          // 如果是标点，添加后结束当前字幕
          currentSubtitle.push(wordText);
          subtitles.push({
            start: currentStart,
            end: wordEnd,
            text: currentSubtitle.join(' ')
          });
          currentSubtitle = [];
          currentStart = null;
        }
      } else {
        currentSubtitle.push(wordText);
      }
    } else {
      currentSubtitle.push(wordText);
    }
  }

  // 添加剩余的词
  if (currentSubtitle.length > 0 && words.length > 0) {
    subtitles.push({
      start: currentStart,
      end: parseTime(words[words.length - 1].endTime),
      text: currentSubtitle.join(' ')
    });
  }

  // 生成SRT内容
  const srtLines = [];
  for (let i = 0; i < subtitles.length; i++) {
    const subtitle = subtitles[i];
    const startTime = formatSrtTime(subtitle.start);
    const endTime = formatSrtTime(subtitle.end);

    srtLines.push(`${i + 1}`);
    srtLines.push(`${startTime} --> ${endTime}`);
    srtLines.push(subtitle.text);
    srtLines.push(''); // 空行分隔
  }

  return srtLines.join('\n');
}

/**
 * 主要的语音转文本处理函数
 * @param {string} audioFilePath - 音频文件路径
 * @param {string} language - 语言代码 (zh, en, vi, th)
 * @param {string} outputFormat - 输出格式 (srt, vtt, txt, json, all)
 * @param {null} outputDir - 输出目录
 * @param {null} scriptContent - 原始文案内容（可选，用于提高准确性）
 * @returns {Promise<string>} 输出文件路径
 */
export async function speechToText(
  audioFilePath,
  language = 'zh',
  outputFormat = 'srt',
  outputDir = null,
  scriptContent = null
) {
  try {
    // 检查音频文件是否存在
    if (!fs.existsSync(audioFilePath)) {
      throw new Error(`音频文件不存在: ${audioFilePath}`)
    }

    // 验证音频格式
    if (!validateAudioFormat(audioFilePath)) {
      const supportedFormats = Object.keys(getSupportedAudioFormats()).join(', ')
      throw new Error(`不支持的音频格式。支持的格式: ${supportedFormats}`)
    }

    // 提取文件名后缀
    const ext = path.extname(audioFilePath)
    // 获取文件基本名称
    const baseName = path.basename(audioFilePath, ext)

    // 转换语言代码
    const googleLanguage = LANGUAGE_MAP[language] || 'zh-CN'
    console.log(`使用语言: ${googleLanguage}`)
    console.log(`音频文件: ${audioFilePath}`)
    console.log(`音频格式: ${getAudioConfig(audioFilePath).encoding}`)

    // 执行转录
    const result = await transcribeWithGoogle(audioFilePath, googleLanguage, scriptContent)

    // 根据输出格式处理结果
    const outputFiles = []

    // 处理不同的输出格式
    if (outputFormat === 'srt' || outputFormat === 'all') {
      // const srtLines = await convertSpeechToSRT(result, scriptContent)
      const srtLines = convertToSrt(result)
      const srtPath = path.join(getSynthesizedCaptionDir(), `${baseName}.srt`)
      fs.writeFileSync(srtPath, srtLines, 'utf8')
      console.log(`SRT文件已保存: ${srtPath}`)
      outputFiles.push(srtPath)
    }

    if (outputFormat === 'vtt' || outputFormat === 'all') {
      // VTT格式转换（WebVTT）
      // const srtLines = await convertSpeechToSRT(result, scriptContent)
      const srtLines = convertToSrt(result)
      const vttContent = 'WEBVTT\n\n' + srtLines.replace(/,/g, '.')
      const vttPath = path.join(getSynthesizedCaptionDir(), `${baseName}.vtt`)
      fs.writeFileSync(vttPath, vttContent, 'utf8')
      console.log(`VTT文件已保存: ${vttPath}`)
      outputFiles.push(vttPath)
    }

    if (outputFormat === 'txt' || outputFormat === 'all') {
      // 纯文本格式
      const transcript = result.results[0]?.alternatives[0]?.transcript || ''
      const txtPath = path.join(getSynthesizedCaptionDir(), `${baseName}.txt`)
      fs.writeFileSync(txtPath, transcript, 'utf8')
      console.log(`TXT文件已保存: ${txtPath}`)
      outputFiles.push(txtPath)
    }

    if (outputFormat === 'json' || outputFormat === 'all') {
      // JSON格式（原始结果）
      const jsonPath = path.join(getSynthesizedCaptionDir(), `${baseName}.json`)
      fs.writeFileSync(jsonPath, JSON.stringify(result, null, 2), 'utf8')
      console.log(`JSON文件已保存: ${jsonPath}`)
      outputFiles.push(jsonPath)
    }

    // 返回主要输出文件路径
    return outputFiles[0] || path.join(getSynthesizedCaptionDir(), `${baseName}.${outputFormat}`)
  } catch (error) {
    console.error('语音转文本处理失败:', error)
    throw error
  }
}

function jsonToSrt(json) {
  const srtLines = [];

  json.data.forEach((item, index) => {

    srtLines.push(`${index + 1}`);
    srtLines.push(`${item.start} --> ${item.end}`);
    srtLines.push(item.text);
    srtLines.push('');
  });

  return srtLines.join('\n');
}

