package com.yc.controller;

import com.yc.config.AiConfig;
import dev.langchain4j.model.chat.ChatModel;
import dev.langchain4j.model.chat.StreamingChatModel;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.service.TokenStream;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;

@RestController
@RequestMapping("/ai")
public class ChatController {
    @Autowired
    private ChatModel chatModel;

    @Autowired
    private StreamingChatModel streamingChatModel;

    @GetMapping("/chat")   //  http://localhost:8080/ai/chat?message=请介绍一下自己
    public String model(@RequestParam("message") String message) {
        String answer = chatModel.chat(message);
        return answer;
    }

    @RequestMapping("/streamingchat")  // 在控制台上打印并不会打印在页面 http://localhost:8080/ai/streamingchat?question=请介绍一下自己
    public void streamingchat(@RequestParam("question") String question) {
        streamingChatModel.chat(question, new StreamingChatResponseHandler() {
            @Override
            public void onPartialResponse(String s) {
                System.out.print( s );
            }
            @Override
            public void onCompleteResponse(ChatResponse chatResponse) {
                System.out.println( chatResponse.toString());
            }
            @Override
            public void onError(Throwable e) {
                System.out.println(e.toString() );
            }
        });

    }

    /**
     * 通过 Flux<String> 返回  （Server-Sent Events） 格式的流式数据
     * produces：用于告诉客户端（如浏览器）：回的数据是流式（Streaming）的，而非一次性完整响应。客户端可以实时接收 AI 模型的逐字/逐句响应。
     * http://localhost:8080/ai/streamingchat2?question=你今天开心吗
     */
//    @RequestMapping(value = "/streamingchat2", produces = "text/stream;charset=UTF-8")
//    public Flux<String> streamingchat2(@RequestParam("question") String question) {
//        //Flux 的创建与流式处理
//         Flux<String> flux = Flux.create( sink -> {
//            streamingChatModel.chat( question, new StreamingChatResponseHandler() {
//                @Override
//                public void onPartialResponse(String partialResponse) {  //每次收到模型的部分响应时，通过 sink.next() 推送给客户端
//                    // 发送每个部分响应到客户端
//                    sink.next(partialResponse  );
//                }
//                @Override
//                public void onCompleteResponse(ChatResponse completeResponse) { //模型处理完成时，调用 sink.complete() 关闭流。
//                    // 发送完成信号
//                    sink.complete();
//                }
//                @Override
//                public void onError(Throwable error) { //发生错误时，调用 sink.error() 终止流并传递异常
//                    // 发送错误信号
//                    sink.error(error);
//                }
//            });
//        });
//        return flux;
//    }
//
//
    //高级别的
    @Autowired
    private AiConfig.Assistant assistant;
    //http://localhost:8080/ai/assistantstreamingchat?memoryId=1&question=什么是RAG
//    @RequestMapping(value = "/assistantchat", produces = "text/stream;charset=UTF-8")
//    public String assistantchat( @RequestParam("memoryId" ) int memoryId , @RequestParam("question") String question) {
//        String answer = assistant.chat(memoryId, question);
//        return answer;
//    }
//
//
//    //处理流式AI聊天请求，返回响应流 http://localhost:9090/ai/assistantstreamingchat?memoryId=1&question=什么是RAG
//    @RequestMapping(value = "/assistantstreamingchat", produces = "text/stream;charset=UTF-8")
//    public Flux<String> assistantstreamingchat( @RequestParam("memoryId" ) int memoryId , @RequestParam("question") String question) {
//        // 1. 调用AI服务获取流式响应
//        TokenStream tokenStream = assistant.chatStream(memoryId, question);
//        // 将一个 AI流式响应(TokenStream) 转换为 pring WebFlux的响应式流(Flux)
//        return Flux.create(fluxSink -> {
//            tokenStream.onPartialResponse(response -> { // 推送每个响应片段
//                fluxSink.next(response );
//            }).onCompleteResponse(response -> {  //  响应完成
//                fluxSink.complete();
//            }).onError(error -> {   // 错误处理
//                fluxSink.error(error);
//            }).start();
//        }, FluxSink.OverflowStrategy.BUFFER);
//    }


    @RequestMapping("/assistantchat")
    public String assistantchat(   @RequestParam("memoryId") String memoryId, @RequestParam("question")  String question ){
        String answer=assistant.chat(    memoryId,      question );
        return answer;
    }

    //   http://localhost:9090/ai/assistantstreamingchat?memoryId=xxx&question=xxx
    @RequestMapping(method = {RequestMethod.POST,RequestMethod.GET },value="/assistantstreamingchat", produces="text/stream;charset=utf-8")
    public Flux<String> assistantstreamingchat(@RequestParam("memoryId") String memoryId, @RequestParam("question") String question){
        TokenStream tokenStream=assistant.chatStream(  memoryId, question );

        return Flux.create(  fluxSink->{
            tokenStream.onPartialResponse(response->{
                fluxSink.next(   response);  // DATA: xx
            }).onCompleteResponse( response->{
                fluxSink.complete();
            }).onError(    error->{
                fluxSink.error(   error );
            }).start();
        } , FluxSink.OverflowStrategy.BUFFER );   //确保多线程安全的机制
    }


//    @RequestMapping(method = {RequestMethod.POST, RequestMethod.GET},
//            value = "/assistantstreamingchat2",
//            produces = "text/stream;charset=utf-8")
//    public Flux<String> assistantstreamingchat2(@RequestParam("memoryId") String memoryId,
//                                                @RequestParam("question") String question) {
//
//        TokenStream tokenStream = assistant.chatStream(memoryId, question);
//
//        return Flux.create(fluxSink -> {
//            StringBuilder buffer = new StringBuilder();
//            final boolean[] isCompleted = {false}; // 标志是否完成
//            final Object lock = new Object();
//
//            // 定时刷新线程，每隔50ms发送一次
//            ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor();
//            ScheduledFuture<?> scheduledTask = scheduler.scheduleAtFixedRate(() -> {
//                synchronized (lock) {
//                    if (buffer.length() > 0) {
//                        String data = buffer.toString();
//                        fluxSink.next(data);
//                        buffer.setLength(0); // 清空
//                    }
//                    if (isCompleted[0]) {
//                        fluxSink.next("data: [DONE]\n\n");
//                        fluxSink.complete();
//                        scheduler.shutdown();
//                    }
//                }
//            }, 0, 1500, TimeUnit.MILLISECONDS); // 每50ms刷一次
//
//
//            // 处理 token 流
//            tokenStream.onPartialResponse(token -> {
//                synchronized (lock) {
//                    buffer.append(token);
//                }
//            }).onCompleteResponse(fullResponse -> {
//                synchronized (lock) {
//                    isCompleted[0] = true;
//                }
//            }).onError(error -> {
//                fluxSink.error(error);
//                scheduler.shutdown();
//            }).start();
//
//        }, FluxSink.OverflowStrategy.BUFFER);
//    }
}
