package com.share.ai.alibaba.dashscope.web;

import com.alibaba.cloud.ai.dashscope.api.DashScopeAudioSpeechApi;
import com.alibaba.cloud.ai.dashscope.audio.DashScopeAudioSpeechOptions;
import com.alibaba.cloud.ai.dashscope.audio.synthesis.SpeechSynthesisModel;
import com.alibaba.cloud.ai.dashscope.audio.synthesis.SpeechSynthesisPrompt;
import com.alibaba.cloud.ai.dashscope.audio.synthesis.SpeechSynthesisResponse;
import com.alibaba.cloud.ai.dashscope.audio.transcription.AudioTranscriptionModel;
import com.share.ai.common.records.Person;
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.audio.transcription.AudioTranscriptionPrompt;
import org.springframework.ai.audio.transcription.AudioTranscriptionResponse;
import org.springframework.ai.chat.client.ChatClient;
import org.springframework.ai.chat.memory.ChatMemory;
import org.springframework.ai.chat.messages.Message;
import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.ai.chat.model.ChatModel;
import org.springframework.ai.chat.model.ChatResponse;
import org.springframework.ai.chat.prompt.ChatOptions;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.image.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.ByteArrayResource;
import org.springframework.core.io.Resource;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import reactor.core.publisher.Flux;

import java.nio.charset.StandardCharsets;
import java.util.Base64;


@Slf4j
@RestController
public class AliController {
    @Autowired
    private ChatClient chatClient;
    @Autowired
    private ChatModel chatModel;
    @Autowired
    private ImageModel imageModel;
    @Autowired
    private AudioTranscriptionModel audioTranscriptionModel;
    @Autowired
    private SpeechSynthesisModel speechSynthesisModel;

    /**
     * 简单对话,一问一答,一次性返回所有内容
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    用户的问题
     * @return
     */
    @GetMapping("/chat")
    public String chat(@RequestParam(defaultValue = "default") String userId,
                       @RequestParam(defaultValue = "admin") String sessionId,
                       @RequestParam(value = "prompt", defaultValue = "你好，很高兴认识你，能简单介绍一下自己吗？") String prompt,
                       @RequestParam(value = "voice", defaultValue = "沈藤") String voice) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();
        String content = chatClient.prompt()
                .advisors(a -> a.param(ChatMemory.CONVERSATION_ID, memoryId)) //记忆拦截---流式的也支持
                .system(s->s.param("voice",voice)) //动态预设角色特征
                .user(prompt) //用户问题
                .call()//开始调用 一次返回所有
                .content();//获取返回信息
        long end = System.currentTimeMillis();
        log.info("耗时={},响应内容={}", end - start, content);
        return content;
    }

    //返回 对象
    //核心原理  名字自动匹配机制
    @GetMapping("/chatEntity")
    public Person chatEntity(@RequestParam(defaultValue = "default") String userId,
                       @RequestParam(defaultValue = "admin") String sessionId,
                       @RequestParam(value = "prompt", defaultValue = "你好，很高兴认识你，能简单介绍一下自己吗？") String prompt,
                       @RequestParam(value = "voice", defaultValue = "沈腾") String voice) {
        //常规对话
        Person person = chatClient.prompt()
                .system(s->s.param("voice",voice)) //动态预设角色特征
                .user(prompt) //用户问题
                .call()//开始调用 一次返回所有
                .entity(Person.class);
        return person;
    }

    /**
     * 流式对话,后端慢慢吐字 每秒5个字就是  5token/s
     * 要用到 webflux  sse
     * produces = "text/stream;chartset=UTF-8"  是设置响应流格式
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    用户的问题
     * @return
     */
    @GetMapping(value = "/streamChat", produces = "text/html;chartset=UTF-8")
    public Flux<String> streamChat(@RequestParam(defaultValue = "default") String userId,
                                   @RequestParam(defaultValue = "admin") String sessionId,
                                   @RequestParam(value = "prompt", defaultValue = "你好，很高兴认识你，能简单介绍一下自己吗？") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //流式对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();
        Flux<String> flux = chatClient.prompt()
                .advisors(a -> a.param(ChatMemory.CONVERSATION_ID, memoryId)) //记忆拦截
                .user(prompt) //用户问题
                .stream()//开始调用
                .content();//获取返回信息
        long end = System.currentTimeMillis();
        log.info("思考耗时={},响应内容={}", end - start);
        return flux;
    }


    /**
     * 文生图
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    图像的汉字描述
     * @return
     */
    @GetMapping(value = "/text2image")
    public String text2image(@RequestParam(defaultValue = "default") String userId,
                             @RequestParam(defaultValue = "admin") String sessionId,
                             @RequestParam(value = "prompt", defaultValue = "给我画个猫") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();
        ImageOptions imageOptions = ImageOptionsBuilder.builder()
                .model("wanx2.1-t2i-turbo") //设置模型
                .height(1024)
                .width(1024)
                .N(1) //生成一张图片
                .build();

        // 调用文生图接口
        ImageResponse imageResponse = imageModel.call(new ImagePrompt(prompt,imageOptions));
        Image output = imageResponse.getResult().getOutput();
        String url = output.getUrl();//返回文件地址
        String content = output.getB64Json();//返回base64格式
        long end = System.currentTimeMillis();
        log.info("耗时={},url={},响应内容={}", end - start, url, content);
        return content;
    }


    /**
     * 文生音频
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    待汉字转语音的文本
     * @return
     */
    @GetMapping(value = "/text2audio")
    public ResponseEntity text2audio(@RequestParam(defaultValue = "default") String userId,
                             @RequestParam(defaultValue = "admin") String sessionId,
                             @RequestParam(value = "prompt", defaultValue = "你是我的小呀小苹果,怎么爱你都不嫌多") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();
        DashScopeAudioSpeechOptions build = DashScopeAudioSpeechOptions.builder()
                .speed(1.0F)//设置语速
                .responseFormat(DashScopeAudioSpeechApi.ResponseFormat.MP3)//设置格式
                .build();
        SpeechSynthesisResponse speechSynthesisResponse = speechSynthesisModel.call(new SpeechSynthesisPrompt(prompt, build));
        byte[] output = speechSynthesisResponse.getResult().getOutput().getAudio().array();
        long end = System.currentTimeMillis();
        log.info("耗时={}", end - start);

        //文件下载的方式
        ResponseEntity<byte[]> body = ResponseEntity.ok()
                .contentType(MediaType.APPLICATION_OCTET_STREAM)
                .header("Content-Disposition", "attachment; filename=output.mp3")
                .body(output);

        //直接在线网页播放
//        return ResponseEntity.ok()
//                .contentType(MediaType.valueOf("audio/mpeg"))
//                .body(output);

        return body;
    }

    /**
     * 音频生文
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    语音转base64的字符串
     * @return
     */
    @GetMapping(value = "/audio2text")
    public String audio2text(@RequestParam(defaultValue = "default") String userId,
                             @RequestParam(defaultValue = "admin") String sessionId,
                             @RequestParam(value = "prompt", defaultValue = "base64语音字符串") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();

        byte[] bytes = Base64.getDecoder().decode(prompt.getBytes(StandardCharsets.UTF_8));
        Resource resource = new ByteArrayResource(bytes);
        AudioTranscriptionResponse audioTranscriptionResponse = audioTranscriptionModel.call(new AudioTranscriptionPrompt(resource));
        String content = audioTranscriptionResponse.getResult().getOutput();
        long end = System.currentTimeMillis();
        log.info("耗时={},响应内容={}", end - start, content);
        return content;
    }

    /**
     * 多模态   一次请求同时输入多种资源
     *
     * @param userId     用户id
     * @param sessionId  对话框id
     * @param prompt     你的问题
     * @param fileBase64 base64的文件
     * @return
     */
    @GetMapping(value = "/mult")
    public String mult(@RequestParam(defaultValue = "default") String userId,
                       @RequestParam(defaultValue = "admin") String sessionId,
                       @RequestParam(value = "prompt", defaultValue = "附件的主要内容是啥") String prompt, String fileBase64) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();

        //用户信息
        Message message = new UserMessage(prompt);

        //选项
        ChatOptions chatOptions = ChatOptions.builder()
                .model("GPT_4_O")//设置模型
                .build();

        ChatResponse chatResponse = chatModel.call(new Prompt(message, chatOptions));
        String content = chatResponse.getResult().getOutput().getText();//获取返回信息
        long end = System.currentTimeMillis();
        log.info("耗时={},响应内容={}", end - start, content);
        return content;
    }


    /**
     * function-call
     *
     * @param userId    用户id
     * @param sessionId 对话框id
     * @param prompt    你的问题
     * @return
     */
    @GetMapping(value = "/functionCall")
    public String functionCall(@RequestParam(defaultValue = "default") String userId,
                               @RequestParam(defaultValue = "admin") String sessionId,
                               @RequestParam(value = "prompt", defaultValue = "附件的主要内容是啥") String prompt) {
        String memoryId = userId + "_" + sessionId; //由用户id和会话id组成

        //常规对话
        log.info("请求参数={}", prompt);
        long start = System.currentTimeMillis();

        //选项
        ChatOptions chatOptions = ChatOptions.builder()
                .model("GPT_4_O")//设置模型
                .build();

        ChatResponse chatResponse = chatModel.call(new Prompt(prompt, chatOptions));
        String content = chatResponse.getResult().getOutput().getText();//获取返回信息
        long end = System.currentTimeMillis();
        log.info("耗时={},响应内容={}", end - start, content);
        return content;
    }
}
