package com.star.langchain.learn.controller;

import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;

import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;

import com.star.langchain.learn.aiservice.RouterAgent;
import com.star.langchain.learn.aiservice.SimpleAiService;
import com.star.langchain.learn.aiservice.StreamingAiService;
import com.star.langchain.learn.aiservice.TokenStreamingService;
import com.star.langchain.learn.service.ToolConfirmationService;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.agent.tool.ToolSpecification;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.ToolExecutionResultMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.chat.ChatModel;
import dev.langchain4j.model.chat.StreamingChatModel;
import dev.langchain4j.model.chat.request.ChatRequest;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.PartialThinking;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.output.FinishReason;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.model.output.TokenUsage;
import dev.langchain4j.rag.content.Content;
import dev.langchain4j.service.TokenStream;
import dev.langchain4j.service.tool.BeforeToolExecution;
import dev.langchain4j.service.tool.ToolExecution;
import reactor.core.publisher.Flux;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import static dev.langchain4j.data.message.ToolExecutionResultMessage.from;
import static dev.langchain4j.data.message.UserMessage.userMessage;

/**
 * AI聊天
 *
 * @author star
 */
@RequestMapping("/ai")
@RestController
@Slf4j
public class AIController {

    /**
     * 聊天模型
     */
    @Autowired
    private ChatModel chatModel;


    /**
     * 流式聊天模型
     */
    @Autowired
    private StreamingChatModel streamingChatModel;

    /**
     * 向量嵌入模型
     */
    @Autowired
    private EmbeddingModel embeddingModel;


    /**
     * 普通ai服务
     */
    @Autowired
    private SimpleAiService simpleAiService;

    /**
     * 计算器工具规范
     */
    @Autowired
    private ToolSpecification calculator;

    @Autowired
    private StreamingAiService streamingAiService;
    @Autowired
    private TokenStreamingService tokenStreamingService;

    @Autowired
    private ObjectMapper objectMapper;
    @Autowired
    private ToolConfirmationService toolConfirmationService;


    /**
     * 普通ai服务
     */
    @Autowired
    private RouterAgent routerAgent;

    /**
     * 使用模型进行聊天
     *
     * @param msg 用户消息
     * @return AI响应文本
     */
    @GetMapping("/service/simple")
    public String chat(String msg, Long memoryId) {

        String chat = simpleAiService.chat(msg, memoryId);
        return chat;
    }

    @GetMapping(value = "/service/stream", produces = MediaType.TEXT_EVENT_STREAM_VALUE)
    public Flux<String> fluxChat(String msg, Long memoryId) {
        Flux<String> chat = streamingAiService.chat(msg, memoryId);
        return chat.flatMap(text -> {
            // 将单个字符串拆分为字符数组，再转为每个字符的 Mono
            return Flux.fromArray(text.split(""))
                    // 可选：控制每个字的输出间隔（如 50ms/字，模拟打字机效果）
                    .delayElements(Duration.ofMillis(50));
        });
    }

    @GetMapping(value = "/service/tokenStream", produces = MediaType.TEXT_EVENT_STREAM_VALUE)
    public Flux<StreamRecord> tokenStream(String msg, Long memoryId) {
        TokenStream chat = tokenStreamingService.chat(msg, memoryId);

        return Flux.create(sink -> {
            chat.onPartialResponse((String partialResponse) -> sink.next(new StreamRecord(partialResponse, "response")))
                    .onPartialThinking((PartialThinking partialThinking) -> sink.next(
                            new StreamRecord(partialThinking.text(), "thinking")))
                    .onRetrieved((List<Content> contents) -> System.out.println(contents))
                    .onIntermediateResponse(
                            (ChatResponse intermediateResponse) -> System.out.println(intermediateResponse))
                    // This will be invoked right before a tool is executed. BeforeToolExecution contains ToolExecutionRequest (e.g. tool name, tool arguments, etc.)
                    .beforeToolExecution((BeforeToolExecution beforeToolExecution) -> {
                        try {
                            // 获取确认ID（通过ThreadLocal）
                            String confirmationId = toolConfirmationService.getConfirmationId();
                            ToolExecutionRequest request = beforeToolExecution.request();

                            // 构建确认请求消息，通过流式响应发送给用户
                            ToolConfirmationRequest confirmationRequest = new ToolConfirmationRequest();
                            confirmationRequest.setConfirmationId(confirmationId);
                            confirmationRequest.setToolName(request.name());
                            confirmationRequest.setToolArguments(request.arguments());
                            confirmationRequest.setMessage("需要您的确认才能执行工具: " + request.name());

                            // 将确认请求序列化为JSON并通过流发送
                            String confirmationJson = objectMapper.writeValueAsString(confirmationRequest);
                            sink.next(new StreamRecord(confirmationJson, "tool_confirmation"));

                            log.info("已发送工具确认请求给用户: confirmationId={}, toolName={}",
                                    confirmationId, request.name());
                        } catch (Exception e) {
                            log.error("发送工具确认请求时发生错误", e);
                        }
                    })
                    // This will be invoked right after a tool is executed. ToolExecution contains ToolExecutionRequest and tool execution result.
                    .onToolExecuted((ToolExecution toolExecution) -> {
                        log.info("工具执行完成: {}", toolExecution);
                    })
                    .onCompleteResponse((ChatResponse response) -> sink.complete())
                    .onError(sink::error)
                    .start();
        });
    }

    /**
     * 工具确认请求数据类
     */
    @lombok.Data
    public static class ToolConfirmationRequest {
        private String confirmationId;
        private String toolName;
        private String toolArguments;
        private String message;
    }


    public record StreamRecord(String text, String type) {
    }

    /**
     * 使用AI服务进行流式聊天
     *
     * @param msg 用户消息
     * @return 流式响应
     */
    @GetMapping(value = "/model/stream", produces = MediaType.TEXT_EVENT_STREAM_VALUE)
    public Flux<String> serviceChat(String msg) {

        var request = ChatRequest.builder()
                .messages(List.of(UserMessage.from(msg)
                )).build();

        return Flux.create(sink -> {
            streamingChatModel.chat(request, new StreamingChatResponseHandler() {
                @Override
                public void onPartialResponse(String partialResponse) {
                    log.debug("Partial response: {}", partialResponse);
                    // 过滤掉null值，只发送非空响应
                    if (partialResponse != null) {
                        sink.next(partialResponse);
                    }
                }

                @Override
                public void onCompleteResponse(ChatResponse completeResponse) {
                    log.info("Complete response received: {}", completeResponse);
                    // 完成流
                    sink.complete();
                }

                @Override
                public void onError(Throwable error) {
                    log.error("Error during streaming: ", error);
                    // 发送错误到流中
                    sink.error(error);
                }
            });
        });

    }

    /**
     * 测试工具调用功能
     *
     * @return 执行结果
     */
    @GetMapping("/tools")
    public String tools() {
        // given
        UserMessage userMessage = userMessage("2+2=?");
        List<ToolSpecification> toolSpecifications = singletonList(calculator);

        // when
        ChatResponse response = chatModel.chat(
                ChatRequest.builder().messages(singletonList(userMessage)).toolSpecifications(toolSpecifications)
                        .build());

        // then
        AiMessage aiMessage = response.aiMessage();
        // text是null
        String text = aiMessage.text();
        // 调用工具类
        List<ToolExecutionRequest> toolExecutionRequests = aiMessage.toolExecutionRequests();
        ToolExecutionRequest toolExecutionRequest = toolExecutionRequests.getFirst();

        String name = toolExecutionRequest.name();
        log.info("tool name:{}", name);

        // 调用参数
        String arguments = toolExecutionRequest.arguments();

        TokenUsage tokenUsage = response.tokenUsage();
        Integer totalTokenCount = tokenUsage.totalTokenCount();
        FinishReason finishReason = response.finishReason();

        // 构建聊天历史
        ToolExecutionResultMessage toolExecutionResultMessage = from(toolExecutionRequest, "4");
        List<ChatMessage> messages = asList(userMessage, aiMessage, toolExecutionResultMessage);

        ChatResponse secondResponse = chatModel.chat(messages);

        // then
        AiMessage secondAiMessage = secondResponse.aiMessage();

        // 空的tool
        List<ToolExecutionRequest> emptyToolExecutionRequests1 = secondAiMessage.toolExecutionRequests();

        TokenUsage secondTokenUsage = secondResponse.tokenUsage();
        return "ok";
    }

    /**
     * 测试向量嵌入功能
     *
     * @return 执行结果
     */
    @GetMapping("/embedding")
    public String embedding() {
        int batchSize = 10;
        int numberOfSegments = batchSize + 1;

        List<TextSegment> segments = new ArrayList<>();
        for (int i = 0; i < numberOfSegments; i++) {
            segments.add(TextSegment.from("text " + i));
        }

        Response<List<Embedding>> response = embeddingModel.embedAll(segments);

        List<Embedding> embeddingList = response.content();
        int dimension = response.content().getFirst().dimension();

        FinishReason finishReason = response.finishReason();
        return "ok";

    }


    /**
     * 多智能体协作
     */
    @GetMapping("/multi")
    public String multi(String msg, Long memoryId) {


        String result = routerAgent.askToExpert(msg, memoryId);
        return result;
    }
}
