package com.forever.controller;


import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.apache.tika.ApacheTikaDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentByParagraphSplitter;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.Tokenizer;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.milvus.MilvusEmbeddingStore;
import lombok.SneakyThrows;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.multipart.MultipartFile;

import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;

@RestController
@RequestMapping("/ai/rag")
public class RAGController {

    Logger logger = LoggerFactory.getLogger(RAGController.class);
    @Autowired
    OpenAiEmbeddingModel  openAiEmbeddingModel;

    @SneakyThrows
    @PostMapping("/upLoadFile")
    public void upLoadTiKaDocument(MultipartFile file) {
        //获取文件的名字
        String originName = file.getOriginalFilename();
        System.out.println(file.getOriginalFilename());
        //获取文件的后缀，如.txt,.docx
        int count = 0;
        for(int i = 0; i < originName.length(); i++){
            if(originName.charAt(i) == '.'){
                count = i;
                break;
            }
        }
        String endName = originName.substring(count);
        SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd");
        String format = sdf.format(new Date());
        String savePath = System.getProperty("user.dir") + "\\" + "files" +    "\\" + format;
        String filePath = savePath + "\\" + originName;
        File filedir = new File(filePath); //以某路径实例化一个File对象
        if (!filedir.exists()){ //如果不存在
            boolean dr = filedir.mkdirs(); //创建目录
        }
        //将文件保存到本地
        try {
            file.transferTo(new File(savePath,originName));
        } catch (IOException e){
            logger.error(e.getMessage());
        }



        EmbeddingModel embeddingModel =
                new OpenAiEmbeddingModel
                        .OpenAiEmbeddingModelBuilder()
                        .modelName("quentinz/bge-base-zh-v1.5")
                        .baseUrl("http://localhost:11434/v1")
                        .apiKey("ollama")
                        .build();


        // Create a tokenizer instance
        Tokenizer tokenizer = new OpenAiTokenizer();

        // Create a DocumentSplitter with a max segment size of 1024 tokens
        DocumentByParagraphSplitter splitter = new DocumentByParagraphSplitter(1024, 0, tokenizer);
        Document document = FileSystemDocumentLoader.loadDocument(filePath, new ApacheTikaDocumentParser());
        List<TextSegment> segments = splitter.split(document);

        EmbeddingStore embeddingStore = MilvusEmbeddingStore.builder().uri("http://localhost:19530")
                .collectionName("test_collection")
                .dimension(768)
                .build();
        for (TextSegment segment : segments) {
            Response<Embedding> embed = openAiEmbeddingModel.embed(segment);
            System.out.println(segment);
            System.out.println(embed);
            System.out.println(embed.content().dimension());
            System.out.println();
            String id = embeddingStore.add(embed.content(), segment);
            System.out.println(id);
        }
    }
}
