package cn.bugstack.xfg.dev.tech.trigger.http;

import cn.bugstack.xfg.dev.tech.api.IAiService;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.chat.ChatResponse;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.document.Document;
import org.springframework.ai.ollama.OllamaChatClient;
import org.springframework.ai.ollama.api.OllamaOptions;
import org.springframework.ai.vectorstore.PgVectorStore;
import org.springframework.ai.vectorstore.SearchRequest;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.servlet.mvc.method.annotation.SseEmitter;
import reactor.core.publisher.Flux;

import java.io.IOException;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.springframework.ai.chat.messages.Message;
import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.ai.chat.prompt.SystemPromptTemplate;
import java.util.ArrayList;
import java.util.Map;
import java.util.stream.Collectors;


@RestController()
@CrossOrigin("*")
@RequestMapping("/api/v1/ollama/")
@Slf4j
public class OllamaController implements IAiService {

    @Resource
    private OllamaChatClient chatClient;

    @Resource
    private PgVectorStore pgVectorStore;


    /**
     * http://localhost:8090/api/v1/ollama/generate?model=deepseek-r1:1.5b&message=1+1
     */
    @RequestMapping(value = "generate", method = RequestMethod.GET)
    @Override
    public ChatResponse generate(@RequestParam("model") String model, @RequestParam("message") String message) {
        return chatClient.call(new Prompt(message, OllamaOptions.create().withModel(model)));
    }

    /**
     * http://localhost:8090/api/v1/ollama/generate_stream?model=deepseek-r1:1.5b&message=hi
     */
    @RequestMapping(value = "generate_stream", method = RequestMethod.GET)
    @Override
    public Flux<ChatResponse> generateStream(@RequestParam("model") String model, @RequestParam("message") String message) {
        return chatClient.stream(new Prompt(message, OllamaOptions.create().withModel(model)));
    }

    /**
     * http://localhost:8090/api/v1/ollama/generate_streamsse?model=deepseek-r1:1.5b&message=hi
     *
     * @param model
     * @param message
     * @return
     */
    @RequestMapping(value = "generate_streamsse", method = RequestMethod.GET, produces = MediaType.TEXT_EVENT_STREAM_VALUE)
    public SseEmitter generateStreamSSE(@RequestParam("model") String model, @RequestParam("message") String message) {
        SseEmitter emitter = new SseEmitter();
        ExecutorService executor = Executors.newSingleThreadExecutor();

        executor.execute(() -> {
            try {
                chatClient.stream(new Prompt(message, OllamaOptions.create().withModel(model)))
                        .subscribe(
                                response -> {
                                    try {
                                        emitter.send(response, MediaType.APPLICATION_JSON);
                                    } catch (IOException e) {
                                        emitter.completeWithError(e);
                                    }
                                },
                                emitter::completeWithError,
                                () -> {
                                    try {
                                        // 在流结束时发送最终消息
                                        emitter.send(SseEmitter.event()
                                                .name("complete")
                                                .data("流处理已完成", MediaType.TEXT_PLAIN));
                                        emitter.complete();
                                    } catch (IOException e) {
                                        emitter.completeWithError(e);
                                    }
                                }
                        );
            } catch (Exception e) {
                emitter.completeWithError(e);
            }
        });

        return emitter;
    }



    @RequestMapping(value = "generate_stream_rag", method = RequestMethod.GET)
    @Override
    public Flux<ChatResponse> generateStreamRag(@RequestParam("model") String model, @RequestParam("ragTag") String ragTag, @RequestParam("message") String message) {
        log.info("开始处理 RAG 请求: model={}, ragTag={}, message={}", model, ragTag, message);
        
        String SYSTEM_PROMPT = """
                Use the information from the DOCUMENTS section to provide accurate answers but act as if you knew this information innately.
                If unsure, simply state that you don't know.
                Another thing you need to note is that your reply must be in Chinese!
                DOCUMENTS:
                    {documents}
                """;

        // 指定文档搜索
        SearchRequest request = SearchRequest.query(message)
                .withTopK(5)
                .withFilterExpression("knowledge == '" + ragTag + "'");

        log.info("开始向量数据库查询...");
        List<Document> documents = pgVectorStore.similaritySearch(request);
        log.info("向量数据库查询完成，找到 {} 条文档", documents.size());
        
        if (documents.isEmpty()) {
            log.warn("没有找到匹配的文档，将返回空回答");
            // 如果没有找到文档，添加一个提示信息
            documents.add(new Document("没有找到相关知识，请尝试其他问题或知识库。"));
        }
        
        String documentCollectors = documents.stream().map(Document::getContent).collect(Collectors.joining("\n\n"));
        log.info("文档内容长度: {} 字符", documentCollectors.length());
        
        Message ragMessage = new SystemPromptTemplate(SYSTEM_PROMPT).createMessage(Map.of("documents", documentCollectors));

        List<Message> messages = new ArrayList<>();
        messages.add(new UserMessage(message));
        messages.add(ragMessage);

        log.info("开始调用 Ollama 模型...");
        return chatClient.stream(new Prompt(
                messages,
                OllamaOptions.create()
                        .withModel(model)
        ))
        .doOnNext(response -> log.info("收到模型响应: {}", response.getResult().getOutput().getContent()))
        .doOnError(error -> log.error("模型调用出错: ", error))
        .doOnComplete(() -> log.info("模型调用完成"));
    }
}
