package io.github.zatter.qiniucorejava.utils;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.qiniu.common.QiniuException;
import io.github.zatter.qiniucorejava.mapper.AiMapper;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.*;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import org.springframework.web.client.RestTemplate;
import org.springframework.web.multipart.MultipartFile;

import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.time.Duration;
import java.util.Base64;
import java.util.LinkedList;
import java.util.Queue;
import java.util.concurrent.CopyOnWriteArrayList;

//语音转文字->AI识别->文字转语音
@Slf4j
@Component
public class AudioProcessing {
    // 模型配置
    private static final String API_KEY = "sk-610453bad9743dfb294ae3234ada68874c4c1640e6c4cd716dfac043a774eb21";
    private static final String BASE_URL = "https://openai.qiniu.com/v1"; // 主地址
    private static final String MODEL_ID = "x-ai/grok-4-fast";
    private final RestTemplate restTemplate;
    private final ObjectMapper objectMapper;

    @Autowired
    private AiMapper aiMapper;

    // 存储所有的语音链接,在凌晨统一删除
    CopyOnWriteArrayList<String> audioFiles = new CopyOnWriteArrayList<>();

    // 上下文最大消息数量
    private static final int MAX_CONTEXT_MESSAGES = 20;

    // 存储上下文消息，使用Queue实现FIFO机制
    // 每条消息是一个包含"role"和"content"的ObjectNode
    private final Queue<ObjectNode> contextMessages = new LinkedList<>();

    // 初始化RestTemplate和ObjectMapper
    public AudioProcessing() {
        this.restTemplate = new RestTemplate();
        this.objectMapper = new ObjectMapper();

        // 配置超时
        ((org.springframework.http.client.SimpleClientHttpRequestFactory) restTemplate.getRequestFactory())
                .setConnectTimeout(Duration.ofSeconds(30));
        ((org.springframework.http.client.SimpleClientHttpRequestFactory) restTemplate.getRequestFactory())
                .setReadTimeout(Duration.ofSeconds(60));
    }

    /**
     * 处理语音文件：通过chat/completions接口发送JSON格式请求
     */
    public String processAudio(MultipartFile audioFile, String prompt) {

        log.info("语音聊天传入的角色："+prompt);

        //首先校验角色有没有发生变化
        String role = aiMapper.GetSetting("ai_role");
        if(role == null) {
            //如果当前用户没有角色 则直接添加
            aiMapper.UpdateSetting("ai_role", prompt);
        }
        else if(!role.equals(prompt)) {
            //如果用户角色改变 则改变数据库+清空上下文
            aiMapper.UpdateSetting("ai_role", prompt);
            contextMessages.clear();
            log.info("角色改变，清空上下文");
        }

        try {
            //语音转文字
            QiniuOssUtil qiniuOssUtil = new QiniuOssUtil();
            String audioFileURL = qiniuOssUtil.uploadFile(audioFile);

            audioFiles.add(audioFileURL);

            //进行语音转文字提取
            VoiceToTextService voiceToTextService = new VoiceToTextService();
            String chatMessage = voiceToTextService.convertVoiceToTextByUrl(audioFileURL);

            // 将用户消息添加到上下文
            addToContext("user", chatMessage);

            // 4. 构建请求头
            HttpHeaders headers = new HttpHeaders();
            headers.set("Authorization", "Bearer " + API_KEY);
            headers.setContentType(MediaType.APPLICATION_JSON);

            // 5. 构建请求体JSON
            ObjectNode requestBody = objectMapper.createObjectNode();
            requestBody.put("stream", false);
            requestBody.put("model", MODEL_ID);

            // 构建messages数组
            ArrayNode messages = objectMapper.createArrayNode();

            // 添加system消息
            ObjectNode systemMessage = objectMapper.createObjectNode();
            systemMessage.put("role", "system");
            //指定角色
            systemMessage.put("content", "你现在是"+prompt+"，请使用中文，像打电话一样和我聊天吧!");
            messages.add(systemMessage);

            // 添加user消息
//            ObjectNode userMessage = objectMapper.createObjectNode();
//            userMessage.put("role", "user");
//
//            // 构建user消息内容数组
//            ArrayNode userContent = objectMapper.createArrayNode();
//
//
//            ObjectNode textContent = objectMapper.createObjectNode();
//            textContent.put("type", "text");
//            textContent.put("text", chatMessage); //这里写语音转文字后的数据
//            userContent.add(textContent);
//
//            userMessage.set("content", userContent);
//            messages.add(userMessage);

            // 添加上下文消息
            contextMessages.forEach(messages::add);

            requestBody.set("messages", messages);

            // 6. 构建完整请求
            HttpEntity<String> request = new HttpEntity<>(
                    objectMapper.writeValueAsString(requestBody),
                    headers
            );

            // 7. 调用chat/completions接口
            String apiUrl = BASE_URL + "/chat/completions";
//            log.info("调用接口：{}，文件：{}，大小：{}MB", apiUrl, audioFile.getOriginalFilename(), fileSizeMB);

            // 发送POST请求并获取响应
            ResponseEntity<String> response = restTemplate.postForEntity(apiUrl, request, String.class);

            // 8. 处理响应
            if (response.getStatusCode() == HttpStatus.OK) {
                String responseBody = response.getBody();
                if (responseBody != null) {
                    // 解析响应获取结果
                    JsonNode jsonNode = objectMapper.readTree(responseBody);
                    String result = jsonNode.path("choices").get(0).path("message").path("content").asText();
                    log.info("大模型处理成功，结果：{}", result);

                    // 将AI回复添加到上下文
                    addToContext("assistant", result);

                    //文字转语音
                    TextToVoiceService textToVoiceService = new TextToVoiceService();
                    byte[] bytes = textToVoiceService.textToSpeech(result);

                    String mp3 = qiniuOssUtil.uploadAudioBytes(bytes, "mp3");
                    audioFiles.add(mp3);
                    return  mp3;
                }
                return "处理成功，但未返回内容";
            } else {
                return "错误：模型返回非成功状态码 " + response.getStatusCode() + "，响应内容：" + response.getBody();
            }

        } catch (Exception e) {
            log.error("音频处理失败", e);
            String errorMsg = e.getMessage();
            if (e instanceof org.springframework.web.client.HttpClientErrorException) {
                String responseBody = ((org.springframework.web.client.HttpClientErrorException) e).getResponseBodyAsString();
                errorMsg += "，响应详情：" + responseBody;
            }
            return "处理语音时发生错误：" + errorMsg;
        }
    }

    /**
     * 添加消息到上下文，并确保不超过最大数量限制
     */
    private void addToContext(String role, String content) {
        ObjectNode message = objectMapper.createObjectNode();
        message.put("role", role);
        message.put("content", content);

        // 添加新消息
        contextMessages.add(message);

        // 如果超过最大限制，移除最旧的消息
        while (contextMessages.size() > MAX_CONTEXT_MESSAGES) {
            contextMessages.poll(); // 移除队列头部的消息（最旧的）
        }

        log.info("上下文消息数量: {}", contextMessages.size());
    }

    //定时任务 每天0点清除前一天所有的数据
    @Scheduled(cron = "0 0 0 * * ?")
    public void SchedulingDeleteAudioFile(){
        QiniuOssUtil qiniuOssUtil = new QiniuOssUtil();
        audioFiles.forEach(audioFile -> {
            String fileName = audioFile.substring(audioFile.lastIndexOf('/') + 1);
            log.info("删除文件："+fileName);
            try {
                qiniuOssUtil.deleteFile(fileName);
            } catch (QiniuException e) {
                throw new RuntimeException(e);
            }
        });
    }
}
