//package org.example.springaitest.config;
//
//import com.fasterxml.jackson.core.type.TypeReference;
//import com.fasterxml.jackson.databind.ObjectMapper;
//import io.weaviate.client.WeaviateClient;
//import org.springframework.ai.document.Document;
//import org.springframework.ai.vectorstore.VectorStore;
//import org.springframework.beans.factory.annotation.Autowired;
//import org.springframework.boot.CommandLineRunner;
//import org.springframework.core.io.Resource;
//import org.springframework.core.io.ResourceLoader;
//import org.springframework.stereotype.Component;
//
//import java.io.InputStream;
//import java.util.List;
//import java.util.Map;
//import java.util.stream.Collectors;
//
//@Component
//public class DocumentImportRunner implements CommandLineRunner {
//    private final VectorStore vectorStore;
//    private final ResourceLoader resourceLoader;
//    private final ObjectMapper objectMapper;
//    private static final int BATCH_SIZE = 64;
//
//    @Autowired
//    public DocumentImportRunner(VectorStore vectorStore, ResourceLoader resourceLoader, ObjectMapper objectMapper) {
//        this.vectorStore = vectorStore;
//        this.resourceLoader = resourceLoader;
//        this.objectMapper = objectMapper;
//    }
//
//    @Override
//    public void run(String... args) throws Exception {
//        // 使用classpath资源路径加载文件
//        Resource resource = resourceLoader.getResource("classpath:spring-ai-documents.json");
//
//        // 通过输入流读取classpath资源
//        try (InputStream inputStream = resource.getInputStream()) {
//            // 解析JSON数据
//            List<Map<String, Object>> jsonData = objectMapper.readValue(
//                    inputStream, new TypeReference<List<Map<String, Object>>>() {});
//
//            // 转换为Document对象列表
//            List<Document> documents = jsonData.stream()
//                    .map(item -> new Document(
//                            (String) item.get("content"),
//                            (Map<String, Object>) item.get("metadata")
//                    )).collect(Collectors.toList());
//
//            // 批处理导入文档
//            int totalDocuments = documents.size();
//            int batches = (int) Math.ceil((double) totalDocuments / BATCH_SIZE);
//
//            System.out.printf("开始导入 %d 个文档，共 %d 批%n", totalDocuments, batches);
//
//            for (int i = 0; i < batches; i++) {
//                int start = i * BATCH_SIZE;
//                int end = Math.min(start + BATCH_SIZE, totalDocuments);
//                List<Document> batch = documents.subList(start, end);
//
//                vectorStore.add(batch);
//                System.out.printf("已导入第 %d/%d 批 (%d 个文档)%n", i + 1, batches, batch.size());
//            }
//
//            System.out.println("所有文档导入完成");
//        }
//    }
//}