package com.yc;

import dev.langchain4j.community.model.dashscope.QwenEmbeddingModel;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentParser;
import dev.langchain4j.data.document.Metadata;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.apache.tika.ApacheTikaDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentByLineSplitter;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.store.embedding.EmbeddingStore;
import org.apache.tika.Tika;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
import org.springframework.context.annotation.Bean;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import java.util.stream.Stream;

@EnableScheduling
@SpringBootApplication
@EnableDiscoveryClient
public class OpenAiApp_RAG {
    public static void main(String[] args) {
        SpringApplication.run(OpenAiApp_RAG.class, args);
    }

    // 文件类型常量
    private static final Map<String, String> FILE_TYPE_MAPPINGS = Map.of(
            ".pdf", "pdf",
            ".doc", "word",
            ".docx", "word",
            ".xls", "excel",
            ".xlsx", "excel",
            ".txt", "text"
    );

    // 注入所需的依赖
    private final QwenEmbeddingModel qwenEmbeddingModel;
    private final EmbeddingStore<TextSegment> pdfStore;
    private final EmbeddingStore<TextSegment> wordStore;
    private final EmbeddingStore<TextSegment> excelStore;
    private final EmbeddingStore<TextSegment> defaultStore;

    public OpenAiApp_RAG(
            QwenEmbeddingModel qwenEmbeddingModel,
            @Qualifier("pdfEmbeddingStore") EmbeddingStore<TextSegment> pdfStore,
            @Qualifier("wordEmbeddingStore") EmbeddingStore<TextSegment> wordStore,
            @Qualifier("excelEmbeddingStore") EmbeddingStore<TextSegment> excelStore,
            @Qualifier("defaultEmbeddingStore") EmbeddingStore<TextSegment> defaultStore) {
        this.qwenEmbeddingModel = qwenEmbeddingModel;
        this.pdfStore = pdfStore;
        this.wordStore = wordStore;
        this.excelStore = excelStore;
        this.defaultStore = defaultStore;
    }

    // 初始启动时执行一次
    @Bean
    public CommandLineRunner initDataAtStartup() {
        return args -> initVectorStore();
    }

    // 定时任务：每天凌晨2点执行一次
    @Scheduled(cron = "0 0 2 * * ?")
    public void scheduledInitVectorStore() {
        System.out.println("开始执行定时任务：" + LocalDateTime.now().format(DateTimeFormatter.ISO_LOCAL_DATE_TIME));
        initVectorStore();
    }

    // 封装初始化向量库的逻辑
    private void initVectorStore() {
        try {
            // 获取资源目录路径
            URL resourceUrl = getClass().getClassLoader().getResource("rag");
            if (resourceUrl == null) {
                throw new IllegalStateException("资源目录 rag 未找到");
            }
            Path directoryPath = Paths.get(resourceUrl.toURI());

            Tika tika = new Tika();
            DocumentParser parser = new ApacheTikaDocumentParser();
            DocumentByLineSplitter splitter = new DocumentByLineSplitter(100, 20);

            // 递归扫描目录
            List<Path> filePaths = scanDirectory(directoryPath);
            AtomicInteger totalVectors = new AtomicInteger(0);
            AtomicInteger processedFiles = new AtomicInteger(0);

            // 按文件类型分组处理
            Map<String, List<Path>> filesByType = filePaths.stream()
                    .collect(Collectors.groupingBy(path -> detectFileType(path, tika)));

            for (Map.Entry<String, List<Path>> entry : filesByType.entrySet()) {
                String fileType = entry.getKey();
                List<Path> paths = entry.getValue();

                System.out.printf("处理 %s 文件: %d 个%n", fileType, paths.size());

                // 根据文件类型选择对应的向量存储
                EmbeddingStore<TextSegment> targetStore = switch (fileType) {
                    case "pdf" -> pdfStore;
                    case "word" -> wordStore;
                    case "excel" -> excelStore;
                    default -> defaultStore;
                };

                for (Path path : paths) {
                    try {
                        // 加载并解析文档
                        Document document = FileSystemDocumentLoader.loadDocument(path, parser);

                        // 添加文件元数据，包括处理时间
                        Metadata metadata = document.metadata()
                                .put("file_type", fileType)
                                .put("file_name", path.getFileName().toString())
                                .put("file_size", String.valueOf(Files.size(path)))
                                .put("processing_time", LocalDateTime.now().toString());
                        document = Document.from(document.text(), metadata);

                        // 分割文档
                        List<TextSegment> segments = splitter.split(document);

                        // 生成向量
                        List<Embedding> embeddings = qwenEmbeddingModel.embedAll(segments).content();

                        // 存入对应的向量库
                        targetStore.addAll(embeddings, segments);

                        totalVectors.addAndGet(embeddings.size());
                        System.out.printf(" 已处理文件: %s (%d 个向量)%n",
                                path.getFileName(), embeddings.size());

                        // 处理完成后删除文件
                        Files.delete(path);
                        processedFiles.incrementAndGet();
                        System.out.printf(" 已删除文件: %s%n", path.getFileName());

                    } catch (Exception e) {
                        System.err.printf("处理文件 %s 失败: %s%n", path, e.getMessage());
                        e.printStackTrace();
                    }
                }
            }
            System.out.printf("已完成初始化，共处理 %d 个文件，存入 %d 个向量%n",
                    processedFiles.get(), totalVectors.get());
        } catch (Exception e) {
            System.err.println("向量库初始化失败: " + e.getMessage());
            e.printStackTrace();
        }
    }

    // 递归扫描目录获取所有文件
    private List<Path> scanDirectory(Path directory) throws IOException {
        try (Stream<Path> walk = Files.walk(directory)) {
            return walk
                    .filter(Files::isRegularFile)
                    .collect(Collectors.toList());
        }
    }

    // 检测文件类型
    private String detectFileType(Path path, Tika tika) {
        try {
            String fileName = path.getFileName().toString().toLowerCase();

            // 优先从扩展名映射获取
            for (Map.Entry<String, String> entry : FILE_TYPE_MAPPINGS.entrySet()) {
                if (fileName.endsWith(entry.getKey())) {
                    return entry.getValue();
                }
            }

            // 从MIME类型推断
            String mimeType = tika.detect(path.toFile());
            if (mimeType.contains("pdf")) return "pdf";
            if (mimeType.contains("word")) return "word";
            if (mimeType.contains("excel") || mimeType.contains("spreadsheet")) return "excel";
            return "text"; // 默认作为文本处理
        } catch (Exception e) {
            System.err.printf("检测文件 %s 类型失败: %s%n", path, e.getMessage());
            return "text"; // 默认作为文本处理
        }
    }
}