package cn.iocoder.yudao.module.ai.service.voice;

import cn.hutool.core.codec.Base64;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.http.HttpRequest;
import cn.hutool.http.HttpResponse;
import cn.hutool.json.JSONObject;
import cn.hutool.json.JSONUtil;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.InputStream;

/**
 * AI 语音转文字 Service 实现类
 * 
 * 使用 Ollama 本地部署的 Whisper 模型进行语音识别
 *
 * @author 芋道源码
 */
@Service
@Slf4j
public class AiVoiceServiceImpl implements AiVoiceService {

    @Value("${spring.ai.ollama.base-url:http://127.0.0.1:11434}")
    private String ollamaBaseUrl;

    @Value("${spring.ai.ollama.whisper.model:whisper}")
    private String whisperModel;

    @Value("${spring.ai.ollama.whisper.service-url:http://127.0.0.1:8080}")
    private String whisperServiceUrl;

    @Value("${spring.ai.openai.base-url:}")
    private String openAiBaseUrl;

    @Value("${spring.ai.openai.api-key:}")
    private String openAiApiKey;

    private static final String WHISPER_CPP_API_PATH = "/inference";
    private static final String OPENAI_WHISPER_API_PATH = "/v1/audio/transcriptions";

    @Override
    public String transcribeAudio(InputStream audioStream, String format, String language) {
        try {
            // 将音频流转换为字节数组
            byte[] audioBytes = IoUtil.readBytes(audioStream);
            
            // 创建临时文件
            String tempFilePath = System.getProperty("java.io.tmpdir") + "/temp_audio." + format;
            File tempFile = new File(tempFilePath);
            FileUtil.writeBytes(audioBytes, tempFile);
            
            try {
            // 使用替代方法进行语音识别
            String result = transcribeWithAlternativeMethod(tempFile, language);
            return result;
            } finally {
                // 删除临时文件
                FileUtil.del(tempFile);
            }
        } catch (Exception e) {
            log.error("[transcribeAudio] 语音转文字失败", e);
            throw new RuntimeException("语音转文字失败: " + e.getMessage());
        }
    }

    @Override
    public String transcribeAudioBase64(String audioBase64, String format, String language) {
        try {
            // 移除 Base64 前缀（如果存在）
            if (audioBase64.contains(",")) {
                audioBase64 = audioBase64.split(",")[1];
            }
            
            // 解码 Base64
            byte[] audioBytes = Base64.decode(audioBase64);
            
            // 转换为输入流
            InputStream audioStream = new ByteArrayInputStream(audioBytes);
            
            return transcribeAudio(audioStream, format, language);
        } catch (Exception e) {
            log.error("[transcribeAudioBase64] 语音转文字失败", e);
            throw new RuntimeException("语音转文字失败: " + e.getMessage());
        }
    }

    /**
     * 使用替代方法进行语音识别
     * 
     * 支持以下几种方式：
     * 1. whisper.cpp HTTP 服务
     * 2. OpenAI Whisper API
     * 3. 其他兼容的 Whisper 服务
     */
    private String transcribeWithAlternativeMethod(File audioFile, String language) {
        // 优先尝试 whisper.cpp HTTP 服务
        if (StrUtil.isNotBlank(whisperServiceUrl)) {
            try {
                return transcribeWithWhisperCpp(audioFile, language);
            } catch (Exception e) {
                log.warn("[transcribeWithAlternativeMethod] whisper.cpp 服务调用失败，尝试其他方式: {}", e.getMessage());
            }
        }
        
        // 尝试使用 OpenAI Whisper API
        if (StrUtil.isNotBlank(openAiApiKey) && StrUtil.isNotBlank(openAiBaseUrl)) {
            try {
                return transcribeWithOpenAI(audioFile, language);
            } catch (Exception e) {
                log.warn("[transcribeWithAlternativeMethod] OpenAI Whisper API 调用失败: {}", e.getMessage());
            }
        }
        
        // 如果所有方式都失败，返回提示信息
        log.error("[transcribeWithAlternativeMethod] 所有语音识别方式都不可用");
        return "【提示】语音识别功能需要配置实际的 Whisper 服务。\n\n" +
               "请参考以下部署方式：\n\n" +
               "方式1: whisper.cpp HTTP 服务\n" +
               "  1. 下载 whisper.cpp: git clone https://github.com/ggerganov/whisper.cpp.git\n" +
               "  2. 编译并启动 HTTP 服务: ./server -m models/ggml-base.bin --port 8080\n" +
               "  3. 配置 spring.ai.ollama.whisper.service-url=http://127.0.0.1:8080\n\n" +
               "方式2: OpenAI Whisper API\n" +
               "  1. 获取 OpenAI API Key\n" +
               "  2. 配置 spring.ai.openai.api-key 和 spring.ai.openai.base-url\n\n" +
               "方式3: Ollama + Whisper 模型\n" +
               "  1. 安装 Ollama: https://ollama.ai/\n" +
               "  2. 拉取模型: ollama pull whisper\n" +
               "  3. 配置 spring.ai.ollama.base-url=http://127.0.0.1:11434";
    }
    
    /**
     * 使用 whisper.cpp HTTP 服务进行语音识别
     */
    private String transcribeWithWhisperCpp(File audioFile, String language) {
        String url = whisperServiceUrl + WHISPER_CPP_API_PATH;
        
        log.info("[transcribeWithWhisperCpp] 调用 whisper.cpp 服务, url: {}", url);
        
        try {
            // 方式1：尝试文件上传（更高效）
            try {
                HttpResponse response = HttpRequest.post(url)
                        .form("file", audioFile)
                        .form("language", StrUtil.isNotBlank(language) ? language : "zh")
                        .timeout(60000)
                        .execute();
                
                if (response.isOk()) {
                    JSONObject jsonResponse = JSONUtil.parseObj(response.body());
                    String text = jsonResponse.getStr("text");
                    if (StrUtil.isNotBlank(text)) {
                        return text.trim();
                    }
                }
            } catch (Exception e) {
                log.debug("[transcribeWithWhisperCpp] 文件上传方式失败，尝试 JSON 方式: {}", e.getMessage());
            }
            
            // 方式2：使用 JSON Base64（兼容旧版本）
            byte[] audioBytes = FileUtil.readBytes(audioFile);
            String audioBase64 = Base64.encode(audioBytes);
            
            JSONObject requestBody = new JSONObject();
            requestBody.put("data", audioBase64);
            requestBody.put("language", StrUtil.isNotBlank(language) ? language : "zh");
            
            HttpResponse response = HttpRequest.post(url)
                    .header("Content-Type", "application/json")
                    .body(requestBody.toString())
                    .timeout(60000)
                    .execute();
            
            if (!response.isOk()) {
                throw new RuntimeException("whisper.cpp 服务返回错误: " + response.getStatus() + " - " + response.body());
            }
            
            String responseBody = response.body();
            JSONObject jsonResponse = JSONUtil.parseObj(responseBody);
            String text = jsonResponse.getStr("text");
            
            if (StrUtil.isBlank(text)) {
                throw new RuntimeException("whisper.cpp 服务返回空文本");
            }
            
            return text.trim();
            
        } catch (Exception e) {
            log.error("[transcribeWithWhisperCpp] 调用失败", e);
            throw new RuntimeException("whisper.cpp 服务调用失败: " + e.getMessage());
        }
    }
    
    /**
     * 使用 OpenAI Whisper API 进行语音识别
     */
    private String transcribeWithOpenAI(File audioFile, String language) {
        String url = openAiBaseUrl + OPENAI_WHISPER_API_PATH;
        
        log.info("[transcribeWithOpenAI] 调用 OpenAI Whisper API, url: {}", url);
        
        try {
            HttpRequest request = HttpRequest.post(url)
                    .header("Authorization", "Bearer " + openAiApiKey)
                    .form("file", audioFile)
                    .form("model", "whisper-1")
                    .timeout(60000); // 60秒超时
            
            if (StrUtil.isNotBlank(language)) {
                request.form("language", language);
            }
            
            HttpResponse response = request.execute();
            
            if (!response.isOk()) {
                throw new RuntimeException("OpenAI API 返回错误: " + response.getStatus() + " - " + response.body());
            }
            
            String responseBody = response.body();
            JSONObject jsonResponse = JSONUtil.parseObj(responseBody);
            
            // OpenAI 返回格式: { "text": "转换后的文本" }
            String text = jsonResponse.getStr("text");
            
            if (StrUtil.isBlank(text)) {
                throw new RuntimeException("OpenAI API 返回空文本");
            }
            
            return text.trim();
            
        } catch (Exception e) {
            log.error("[transcribeWithOpenAI] 调用失败", e);
            throw new RuntimeException("OpenAI Whisper API 调用失败: " + e.getMessage());
        }
    }

}

