package cn.whale.chat;

import cn.whale.assistant.OllamaAssistant;
import cn.whale.assistant.RAGAssistant;
import cn.whale.chain.TaskType;
import cn.whale.chain.TaskTypeAssistant;
import com.alibaba.dashscope.aigc.videosynthesis.VideoSynthesis;
import com.alibaba.dashscope.aigc.videosynthesis.VideoSynthesisParam;
import com.alibaba.dashscope.aigc.videosynthesis.VideoSynthesisResult;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesisParam;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesizer;
import com.alibaba.dashscope.exception.InputRequiredException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.utils.JsonUtils;
import dev.langchain4j.community.model.dashscope.WanxImageModel;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.splitter.DocumentByLineSplitter;
import dev.langchain4j.data.image.Image;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.ollama.OllamaChatModel;
import dev.langchain4j.model.ollama.OllamaEmbeddingModel;
import dev.langchain4j.model.ollama.OllamaStreamingChatModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.service.TokenStream;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import jakarta.annotation.Resource;
import jakarta.servlet.http.HttpServletResponse;
import lombok.extern.slf4j.Slf4j;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import org.springframework.web.servlet.mvc.method.annotation.SseEmitter;
import reactor.core.publisher.Flux;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;

import static org.springframework.http.MediaType.TEXT_EVENT_STREAM_VALUE;

@RestController
@Slf4j
public class ChatController {

    @Resource
    private OpenAiChatModel openAiChatModel;
    @Resource
    private OllamaChatModel ollamaChatModel;
    @Resource
    private OllamaStreamingChatModel ollamaStreamingChatModel;
    @Resource
    private OllamaAssistant ollamaAssistant;


    /**
     * openai接口对接
     * @param message ：用户消息
     * @return ：大模型返回结果
     */
    @RequestMapping("/chat/openai")
    public String chatOpenAi(@RequestParam("message") String message){
        return openAiChatModel.chat(message);
    }


    /**
     * 本地大模型对接
     * @param message ：用户消息
     * @return ：大模型返回结果
     */
    @RequestMapping("/chat/ollama")
    public String chatOllama(@RequestParam("message") String message){
        return ollamaChatModel.chat(message);
    }

    /**
     * 流式输出-本地大模型对接
     * @param message ：用户消息
     * @return ：大模型返回结果
     */
    @RequestMapping(value="/chat/ollama/stream")
    public SseEmitter chatOllamaStream(@RequestParam("message") String message, HttpServletResponse response) {

        //流式输出
        SseEmitter emitter = new SseEmitter();

        ollamaStreamingChatModel.chat(message, new cn.whale.handler.StreamingChatResponseHandlerImpl(emitter));

        return emitter;
    }

    /**
     * 记忆功能
     */
    @RequestMapping(value="/chat/ollama/memory", produces = TEXT_EVENT_STREAM_VALUE)
    public Flux<String> chatOllamaMemory(@RequestParam("message") String message, @RequestParam("memoryId")String memoryId) {
        setEncoder();
        return ollamaAssistant.chatStream(message, memoryId);
    }


    /**
     * 记忆功能 - 流式响应
     */
    @RequestMapping(value="/chat/ollama/memory/stream")
    public SseEmitter chatOllamaTokenStreamMemory(@RequestParam("message") String message, @RequestParam("memoryId")String memoryId) {
        SseEmitter emitter = new SseEmitter();

        TokenStream tokenStream = ollamaAssistant.chatTokenStream(message, memoryId);

        setEncoder();

        tokenStream.onPartialResponse((String partialResponse) -> {
            try {
                log.info("partialResponse -> {}",partialResponse);
                emitter.send(partialResponse);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        })
        /*.onRetrieved((List<Content> contents) -> System.out.println(contents))
        .onToolExecuted((ToolExecution toolExecution) -> System.out.println(toolExecution))
        .onError((Throwable error) -> error.printStackTrace())*/
        .start();
        return emitter;
    }


    @SuppressWarnings("DataFlowIssue")
    private void setEncoder(){
        ServletRequestAttributes requestAttributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
        HttpServletResponse response = requestAttributes.getResponse();
        response.setContentType("text/event-stream;charset=utf-8");
        response.setCharacterEncoding("UTF-8");
    }


    /**
     * 记忆功能 - tool调用
     */
    @RequestMapping(value="/chat/ollama/tool", produces = TEXT_EVENT_STREAM_VALUE)
    public Flux<String> chatOllamaSearch(@RequestParam("message") String message, @RequestParam("memoryId")String memoryId) {
        setEncoder();
        return ollamaAssistant.weather(message, memoryId);
    }



    /*@GetMapping("/high/chat")
    public String highChat(@RequestParam(value = "message") String message) throws IOException {

        File file = new File("D:\\lecture\\lecture-langchain\\documents\\cat.png");
        byte[] bytes = Files.readAllBytes(file.toPath());

        UserMessage userMessage = UserMessage.from(TextContent.from(message),
                ImageContent.from(Base64.getEncoder().encodeToString(bytes), "image/png"));
        return chatLanguageModel.chat(List.of(userMessage)).aiMessage().text();
    }*/



    /**
     * 记忆功能 - tool调用 - websearch
     */
    @RequestMapping(value="/chat/ollama/search", produces = TEXT_EVENT_STREAM_VALUE)
    public Flux<String> chatOllamSearch(@RequestParam("message") String message, @RequestParam("memoryId")String memoryId) {
        setEncoder();
        return ollamaAssistant.search(message, memoryId);
    }



    //向量存储
    @Resource
    private EmbeddingStore<TextSegment> embeddingStore;

    //向量模型
    @Resource
    private OllamaEmbeddingModel embeddingModel;

    /**
     * 加载文档，创建RAG知识库
     */
    @RequestMapping("/rag/load")
    public void ragLoad() {
        List<Document> documents = FileSystemDocumentLoader.loadDocuments("C:\\Users\\Administrator\\Desktop\\rag");
        EmbeddingStoreIngestor.builder()
                .embeddingStore(embeddingStore)
                .embeddingModel(embeddingModel)
                //文档切割 - 按照什么样的规则把文档分段
                .documentSplitter(new DocumentByLineSplitter(200,10))
                .build().ingest(documents);
    }

    @Resource
    private RAGAssistant ragAssistant;
    /**
     * 知识库检索
     */
    @RequestMapping("/rag/chat")
    public Flux<String> ragChat(@RequestParam("message") String message, @RequestParam("memoryId")String memoryId) {
        setEncoder();
        return ragAssistant.ragChat(message, memoryId);
    }



    /**
     * 文生图
     */
    @RequestMapping(value="/chat/image", produces = TEXT_EVENT_STREAM_VALUE)
    public String chatImage(@RequestParam("message") String message) {
        setEncoder();
        WanxImageModel wanxImageModel = WanxImageModel.builder().modelName("wanx2.1-t2i-plus").apiKey("sk-bf4252a94e494b30bd2c025ae015ef89").build();
        Response<Image> response = wanxImageModel.generate(message);
        return response.content().url().toString();
    }

    /**
     * 文生语音
     */
    @RequestMapping(value="/chat/audio", produces = TEXT_EVENT_STREAM_VALUE)
    public String chatAudio(@RequestParam("message") String message) {

        String model = "cosyvoice-v1";
        String voice = "longxiaochun";

        SpeechSynthesisParam param = SpeechSynthesisParam.builder()
                        // 若没有将API Key配置到环境变量中，需将下面这行代码注释放开，并将your-api-key替换为自己的API Key
                        .apiKey("sk-xx")
                        .model(model)
                        .voice(voice)
                        .build();

        // 同步模式：禁用回调（第二个参数为null）
        SpeechSynthesizer synthesizer = new SpeechSynthesizer(param, null);
        // 阻塞直至音频返回
        ByteBuffer audio = synthesizer.call(message);
        // 将音频数据保存到本地文件“output.mp3”中
        File file = new File("output.mp3");
        System.out.println(
                "[Metric] requestId: "
                        + synthesizer.getLastRequestId()
                        + ", first package delay ms: "
                        + synthesizer.getFirstPackageDelay());
        try (FileOutputStream fos = new FileOutputStream(file)) {
            fos.write(audio.array());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        return "ok";
    }

    /**
     * 文生视频
     */
    @RequestMapping(value="/chat/video", produces = TEXT_EVENT_STREAM_VALUE)
    public String chatVideo(@RequestParam("message") String message) {

        VideoSynthesis vs = new VideoSynthesis();
        VideoSynthesisParam param = VideoSynthesisParam.builder()
                        .apiKey("sk-xx")
                        .model("wanx2.1-t2v-turbo")
                        .prompt(message)
                        .size("1280*720")
                        .build();
        VideoSynthesisResult result = null;
        try {
            result = vs.call(param);
        } catch (NoApiKeyException e) {
            throw new RuntimeException(e);
        } catch (InputRequiredException e) {
            throw new RuntimeException(e);
        }
        return JsonUtils.toJson(result);
    }



    @Resource
    private TaskTypeAssistant taskTypeAssistant;

    /**
     * 多个AI调用，AI chain
     */
    @RequestMapping(value="/chat/chain", produces = TEXT_EVENT_STREAM_VALUE)
    public Flux<String>  aiChain(@RequestParam("message") String message, @RequestParam("memoryId")String memoryId) {
        setEncoder();
        TaskType taskType = taskTypeAssistant.taskType(message);
        log.info("taskType = {}",taskType);
         return switch (taskType){
            case SEARCH_WEATHER -> ollamaAssistant.weather(message,memoryId);
            case SEARCH_GOODS -> ollamaAssistant.search(message,memoryId);
            case SEARCH_ARTICLE -> ragAssistant.ragChat(message,memoryId);
        };
    }

    /*@Resource
    private McpAssistant mcpAssistant;


    @RequestMapping(value="/chat/mcp", produces = TEXT_EVENT_STREAM_VALUE)
    public Flux<String> mcp(@RequestParam("message") String message,@RequestParam("memoryId")String memoryId) {
        setEncoder();
        return mcpAssistant.chat(message, memoryId);
    }*/

}
