package com.share.ai.spring.web;

import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.ai.audio.transcription.AudioTranscriptionPrompt;
import org.springframework.ai.audio.transcription.AudioTranscriptionResponse;
import org.springframework.ai.chat.client.ChatClient;
import org.springframework.ai.chat.messages.Message;
import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.ai.chat.model.ChatModel;
import org.springframework.ai.chat.model.ChatResponse;
import org.springframework.ai.chat.prompt.ChatOptions;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.image.Image;
import org.springframework.ai.image.ImageModel;
import org.springframework.ai.image.ImagePrompt;
import org.springframework.ai.image.ImageResponse;
import org.springframework.ai.model.Media;
import org.springframework.ai.openai.*;
import org.springframework.ai.openai.api.OpenAiApi;
import org.springframework.ai.openai.api.OpenAiAudioApi;
import org.springframework.ai.openai.api.OpenAiImageApi;
import org.springframework.ai.openai.audio.speech.SpeechPrompt;
import org.springframework.ai.openai.audio.speech.SpeechResponse;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.ByteArrayResource;
import org.springframework.core.io.Resource;
import org.springframework.http.MediaType;
import org.springframework.util.MimeTypeUtils;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import reactor.core.publisher.Flux;

import java.nio.charset.StandardCharsets;
import java.util.Base64;

@Slf4j
@RestController
public class ChatGptController {
    // ChatClient是通用方法,而OpenAiChatModel是openai的独有类
    @Autowired
    private ChatClient chatClient;

    //starter 自动装载 ChatModel包装后就是ChatClient, ChatModel没啥鸟用
    @Autowired
    private ChatModel chatModel;

    //OpenAiImageModel implements ImageModel  所有随便选一个
    @Autowired
    private ImageModel imageModel;

    //汉子 生 音频
    @Autowired
    private OpenAiAudioSpeechModel openAiAudioSpeechModel;


    //音频 转 文字
    @Autowired
    private OpenAiAudioTranscriptionModel openAiAudioTranscriptionModel;

    /**
     * 简单对话,一问一答,一次性返回所有内容
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    用户的问题
     * @return
     */
    @RequestMapping(value = "/chat")
    public String chat(@RequestParam(defaultValue = "default") String userId,
                       @RequestParam(defaultValue = "admin") String sessionId,
                       @RequestParam(value = "prompt", defaultValue = "你好我是openai智能机器人") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();
        String content = chatClient.prompt().user(prompt) //用户问题
                .call()//开始调用 一次返回所有
                .content();//获取返回信息
        long end = System.currentTimeMillis();
        log.info("耗时={},响应内容={}", end - start, content);
        return content;
    }

    /**
     * 流式对话,后端慢慢吐字 每秒5个字就是  5token/s
     * 要用到 webflux  sse
     * produces = "text/stream;chartset=UTF-8"  是设置响应流格式
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    用户的问题
     * @return
     */
    @GetMapping(value = "/streamChat", produces = MediaType.TEXT_EVENT_STREAM_VALUE)
    public Flux<String> streamChat(@RequestParam(defaultValue = "default") String userId,
                                   @RequestParam(defaultValue = "admin") String sessionId,
                                   @RequestParam(value = "prompt", defaultValue = "你好，很高兴认识你，能简单介绍一下自己吗？") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //流式对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();
        Flux<String> flux = chatClient.prompt()
                .user(prompt) //用户问题
                .stream()//开始调用
                .content();//获取返回信息
        long end = System.currentTimeMillis();
        log.info("思考耗时={},响应内容={}", end - start);
        return flux;
    }

    /**
     * 文生图
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    图像的汉字描述
     * @return
     */
    @RequestMapping(value = "/text2image")
    public String text2image(@RequestParam(defaultValue = "default") String userId,
                             @RequestParam(defaultValue = "admin") String sessionId,
                             @RequestParam(value = "prompt", defaultValue = "给我画个猫") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();
        OpenAiImageOptions imageOptions = OpenAiImageOptions.builder()
                .withModel(OpenAiImageApi.DEFAULT_IMAGE_MODEL) //设置模型
                .withQuality("hd")//设置清晰度
                .withHeight(1024)
                .withWidth(1024)
                .withN(1) //生成一张图片
                .build();
        ImageResponse imageResponse = imageModel.call(new ImagePrompt(prompt, imageOptions));
        Image output = imageResponse.getResult().getOutput();
        String url = output.getUrl();//返回文件地址
        String content = output.getB64Json();//返回base64格式
        long end = System.currentTimeMillis();
        log.info("耗时={},url={},响应内容={}", end - start, url, content);
        return content;
    }


    /**
     * 文生音频
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    待汉字转语音的文本
     * @return
     */
    @RequestMapping(value = "/text2audio")
    public String text2audio(@RequestParam(defaultValue = "default") String userId,
                             @RequestParam(defaultValue = "admin") String sessionId,
                             @RequestParam(value = "prompt", defaultValue = "你是我的小呀小苹果,怎么爱你都不嫌多") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();
        OpenAiAudioSpeechOptions speechOptions = OpenAiAudioSpeechOptions.builder()
                .model(OpenAiAudioApi.TtsModel.TTS_1.value) //设置模型
                .speed(1.0F)//设置语速
                .responseFormat(OpenAiAudioApi.SpeechRequest.AudioResponseFormat.MP3)//设置格式
                .voice(OpenAiAudioApi.SpeechRequest.Voice.ECHO)//设置声音
                .build();
        SpeechResponse speechResponse = openAiAudioSpeechModel.call(new SpeechPrompt(prompt, speechOptions));
        byte[] output = speechResponse.getResult().getOutput();
        String content = Base64.getEncoder().encodeToString(output);//返回base64格式
        long end = System.currentTimeMillis();
        log.info("耗时={},响应内容={}", end - start, content);
        return content;
    }

    /**
     * 音频生文
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    语音转base64的字符串
     * @return
     */
    @RequestMapping(value = "/audio2text")
    public String audio2text(@RequestParam(defaultValue = "default") String userId,
                             @RequestParam(defaultValue = "admin") String sessionId,
                             @RequestParam(value = "prompt", defaultValue = "base64语音字符串") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();
        OpenAiAudioTranscriptionOptions transcriptionOptions = OpenAiAudioTranscriptionOptions.builder()
                .temperature(0F)//不需要创意
                .responseFormat(OpenAiAudioApi.TranscriptResponseFormat.TEXT)//设置输出格式
                .build();

        byte[] bytes = Base64.getDecoder().decode(prompt.getBytes(StandardCharsets.UTF_8));
        Resource resource = new ByteArrayResource(bytes);
        AudioTranscriptionResponse audioTranscriptionResponse = openAiAudioTranscriptionModel.call(new AudioTranscriptionPrompt(resource, transcriptionOptions));
        String content = audioTranscriptionResponse.getResult().getOutput().toString();
        long end = System.currentTimeMillis();
        log.info("耗时={},响应内容={}", end - start, content);
        return content;
    }

    /**
     * 多模态   一次请求同时输入多种资源
     *
     * @param userId     用户id
     * @param sessionId  对话框id
     * @param prompt     你的问题
     * @param fileBase64 base64的文件
     * @return
     */
    @RequestMapping(value = "/mult")
    public String mult(@RequestParam(defaultValue = "default") String userId,
                       @RequestParam(defaultValue = "admin") String sessionId,
                       @RequestParam(value = "prompt", defaultValue = "附件的主要内容是啥") String prompt, String fileBase64) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();

        //用户信息
        Message message = new UserMessage(prompt);
        if (StringUtils.isNotBlank(fileBase64)) {
            byte[] bytes = Base64.getDecoder().decode(prompt.getBytes(StandardCharsets.UTF_8));
            Resource resource = new ByteArrayResource(bytes);
            Media media = new Media(MimeTypeUtils.ALL, resource);

            message = new UserMessage(prompt, media);//覆盖默认的
        }

        //选项
        ChatOptions chatOptions = OpenAiChatOptions.builder()
                .model(OpenAiApi.ChatModel.GPT_4_O)//设置模型
                .build();

        ChatResponse chatResponse = chatModel.call(new Prompt(message, chatOptions));
        String content = chatResponse.getResult().getOutput().getText();//获取返回信息
        long end = System.currentTimeMillis();
        log.info("耗时={},响应内容={}", end - start, content);
        return content;
    }


    /**
     * function-call
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    你的问题
     * @return
     */
    @RequestMapping(value = "/functionCall")
    public String functionCall(@RequestParam(defaultValue = "default") String userId,
                               @RequestParam(defaultValue = "admin") String sessionId,
                               @RequestParam(value = "prompt", defaultValue = "附件的主要内容是啥") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();

        //选项
        ChatOptions chatOptions = OpenAiChatOptions.builder()
                .model(OpenAiApi.ChatModel.GPT_4_O)//设置模型
                .function("myFunctionCall")//function-call,这里写bean的名字
                .build();

        ChatResponse chatResponse = chatModel.call(new Prompt(prompt, chatOptions));
        String content = chatResponse.getResult().getOutput().getText();//获取返回信息
        long end = System.currentTimeMillis();
        log.info("耗时={},响应内容={}", end - start, content);
        return content;
    }
}
