package com.example.springai.controller;

import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.document.Document;
import org.springframework.ai.reader.TextReader;
import org.springframework.ai.transformer.splitter.TextSplitter;
import org.springframework.ai.transformer.splitter.TokenTextSplitter;
import org.springframework.ai.vectorstore.SearchRequest;
import org.springframework.ai.vectorstore.VectorStore;
import org.springframework.ai.vectorstore.filter.FilterExpressionBuilder;
import org.springframework.core.io.FileSystemResourceLoader;
import org.springframework.core.io.Resource;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import java.io.File;
import java.util.List;
import java.util.Map;

@Slf4j
@RestController
@RequestMapping("/vector")
public class VectorStoreController {
    private final VectorStore vectorStore;

    public VectorStoreController(VectorStore vectorStore) {
        this.vectorStore = vectorStore;
    }

    /**
     * 向知识库添加内容
     * @param text 文本内容
     * @param storeId 知识库ID
     * @return
     */
    @RequestMapping("/add")
    public String add(String text, String storeId) {
        // 构造文档，并把归属的知识库添加到元数据中
        List<Document> docs = List.of(new Document(text, Map.of("storeId", storeId)));
        vectorStore.add(docs);
        return "Add Success";
    }

    /**
     * 知识库查询
     * @param query 查询内容
     * @param storeId 知识库ID
     * @return
     */
    @RequestMapping("/query")
    public List<Document> query(String query, String storeId) {
        FilterExpressionBuilder b = new FilterExpressionBuilder();

        List<Document> documents = vectorStore.similaritySearch(
                SearchRequest.defaults()
                        .withQuery(query)   // 查询内容
                        .withTopK(3)        // 查询数量
//                        .withSimilarityThreshold(SIMILARITY_THRESHOLD)  // 相似度阈值
                        .withFilterExpression(b.eq("storeId", storeId).build()) // 基于元数据的过滤条件
        );
        return documents;
    }

    @RequestMapping("/initFromFile")
    public String initFromFile() {
        String filePath = "C:\\学习\\AI\\project\\teacher-data\\agi-ta\\chatall\\ChatALL.md";

        Resource resource = new FileSystemResourceLoader().getResource(filePath);
        // 读取文件
        TextReader textReader = new TextReader(resource);
        Map<String, Object> metaData = textReader.getCustomMetadata();
        metaData.put("filename", "ChatALL.md");
        metaData.put("storeId", "s1");
        List<Document> documents = textReader.read();
        log.info("文档内容：{}", documents);

        // 切分文档
        TextSplitter textSplitter = new TokenTextSplitter();
        documents = textSplitter.transform(documents);
        log.info("分隔后的文档内容：{}", documents);

        // 文档保存至向量库
        vectorStore.add(documents);

        return "初始化文档至向量库完成";
    }
}
