package cn.bugstack.xfg.dev.texh.test;

import cn.bugstack.xfg.dev.tech.Application;
import com.alibaba.fastjson.JSON;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.ai.chat.ChatResponse;
import org.springframework.ai.chat.messages.Message;
import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.chat.prompt.SystemPromptTemplate;
import org.springframework.ai.document.Document;
import org.springframework.ai.ollama.OllamaChatClient;
import org.springframework.ai.ollama.api.OllamaOptions;
import org.springframework.ai.reader.tika.TikaDocumentReader;
import org.springframework.ai.transformer.splitter.TokenTextSplitter;
import org.springframework.ai.vectorstore.PgVectorStore;
import org.springframework.ai.vectorstore.SearchRequest;
import org.springframework.ai.vectorstore.SimpleVectorStore;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

/**
 * @Author：lxd
 * @Description
 * @Date：2025/6/27
 */
@Slf4j
@RunWith(SpringRunner.class)
@SpringBootTest(classes = Application.class)
public class RAGTest {

    @Resource
    private OllamaChatClient ollamaChatClient;
    @Resource
    private TokenTextSplitter tokenTextSplitter;
    @Resource
    private PgVectorStore pgVectorStore;

    @Test
    public void upload() {
        // 1. 读取文档
        TikaDocumentReader reader = new TikaDocumentReader("./data/file.text");
        // 2. 文档切割分块
        List<Document> documents = reader.get();
        List<Document> documentSplitterList = tokenTextSplitter.apply(documents);
        // 3. 给所有的文档添加元信息
        documents.forEach(doc -> doc.getMetadata().put("knowledge", "知识库名称"));
        // 4. 给文本块添加元信息
        documentSplitterList.forEach(doc -> doc.getMetadata().put("knowledge", "知识库名称"));
        // 5. 文本块转换为向量, 存入向量库
        pgVectorStore.accept(documentSplitterList);
        log.info("上传完成");
    }

    @Test
    public void chat() {
        // 1. 用户提问
        String message = "上海同磊土木工程有限公司什么时候离职?";
        // 模版
        String SYSTEM_PROMPT = """  
                Use the information from the DOCUMENTS section to provide accurate answers but act as if you knew this information innately.
                If unsure, simply state that you don't know.
                Another thing you need to note is that your reply must be in Chinese!
                DOCUMENTS:
                    {documents}
                """;
        // 2. 从向量库的指定标签的知识库中, 查询最相近的5个文本块, 然后从向量转换成文档
        SearchRequest request = SearchRequest.query(message).withTopK(5).withFilterExpression("knowledge == '知识库名称'");
        List<Document> documents = pgVectorStore.similaritySearch(request);
        // 3. 将文本块拼接成一个字符串, 作为模版的参数
        String documentsCollectors = documents.stream().map(Document::getContent).collect(Collectors.joining());
        // 4. 把查询到的字符串替换掉模版中的内容
        Message ragMessage = new SystemPromptTemplate(SYSTEM_PROMPT).createMessage(Map.of("documents", documentsCollectors));
        // 5. 整合用户提问和检索内容
        ArrayList<Message> messages = new ArrayList<>();
        messages.add(new UserMessage(message));
        messages.add(ragMessage);
        // 6. 调用模型, 生成回答
        ChatResponse chatResponse = ollamaChatClient.call(new Prompt(messages, OllamaOptions.create().withModel("deepseek-r1:1.5b")));

        log.info("测试结果:{}", JSON.toJSONString(chatResponse));

    }


}
