package com.mings.ai.controller;

import com.mings.ai.dto.*;
import com.mings.ai.service.DocumentProcessor;
import com.mings.ai.service.RagService;
import org.springframework.ai.document.Document;
import org.springframework.ai.reader.TextReader;
import org.springframework.ai.transformer.splitter.TokenTextSplitter;
import org.springframework.ai.vectorstore.SearchRequest;
import org.springframework.ai.vectorstore.VectorStore;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.ClassPathResource;
import org.springframework.core.io.Resource;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;

import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

@RestController
@RequestMapping("/api/knowledge")
class KnowledgeBaseController {

    @Autowired
    private VectorStore vectorStore;

    @Autowired
    private RagService ragService;

    @Autowired
    private DocumentProcessor documentProcessor;

    // 添加文档到知识库（支持文件上传）
    @PostMapping("/addDocument")
    public String addDocument(@RequestParam("file") MultipartFile file) {
        try {
            if (file.isEmpty()) {
                return "文件不能为空";
            }

            // 获取文件名
            String fileName = file.getOriginalFilename();
            
            // 检查文件格式是否支持
            if (!documentProcessor.isSupportedFormat(fileName)) {
                return "不支持的文件格式，请上传PDF、Word、Excel或TXT文件";
            }
            
            // 提取文件文本内容
            String content = documentProcessor.extractTextFromFile(file);
            
            if (content == null || content.trim().isEmpty()) {
                return "文件内容为空或无法提取文本内容";
            }

            // 创建文档对象
            Document document = new Document(content);
            document.getMetadata().put("source", fileName);
            
            // 对文档进行文本分割
            TokenTextSplitter textSplitter = new TokenTextSplitter(2000, 10, 3, 5000, true);
            List<Document> splitDocuments = textSplitter.apply(List.of(document));
            
            // 添加到向量数据库
            vectorStore.add(splitDocuments);

            return "文档添加成功，共处理 " + splitDocuments.size() + " 个文本块";
        } catch (Exception e) {
            e.printStackTrace();
            return "文档添加失败: " + e.getMessage();
        }
    }

    // 保留原有接口（兼容性）
    @PostMapping("/documents/legacy")
    public String addDocumentLegacy(@RequestBody DocumentRequest request) {
        try {
            // 创建文档读取器
            Resource resource = new ClassPathResource("documents/" + request.getFileName());
            TextReader textReader = new TextReader(resource);
            textReader.getCustomMetadata().put("source", request.getFileName());

            // 读取文档
            List<Document> documents = textReader.get();

            // 先对每个文档进行文本分割
            TokenTextSplitter textSplitter = new TokenTextSplitter(2000, 10, 3, 5000, true);
            List<Document> splitDocuments = textSplitter.apply(documents);
            
            // 为每个分割后的文档设置metadata
            for (Document splitDoc : splitDocuments) {
                splitDoc.getMetadata().put("source", request.getFileName());
            }

            // 添加到向量数据库
            vectorStore.add(splitDocuments);

            return "文档添加成功，共处理 " + splitDocuments.size() + " 个文本块";
        } catch (Exception e) {
            return "文档添加失败: " + e.getMessage();
        }
    }

    // 查询知识库（增强版，集成大模型）
    @GetMapping("/search")
    public EnhancedSearchResponse search(@RequestParam String query,
                                         @RequestParam(defaultValue = "5") int topK) {
        try {
            SearchRequest searchRequest = SearchRequest.builder()
                    .query(query)
                    .topK(topK)
                    .build();

            // 1. 从向量库检索相关文档
            List<Document> results = vectorStore.similaritySearch(searchRequest);

            // 2. 构建相关文档列表
            List<SearchResult> searchResults = results.stream()
                    .map(doc -> new SearchResult(
                            doc.getText(),
                            doc.getMetadata().get("source") != null ? 
                                doc.getMetadata().get("source").toString() : "未知来源",
                            doc.getMetadata()
                    ))
                    .collect(Collectors.toList());

            // 3. 通过RagService调用大模型生成智能回答
            String answer = ragService.query(query, topK);

            return new EnhancedSearchResponse(answer, searchResults, results.size());
        } catch (Exception e) {
            e.printStackTrace();
            return new EnhancedSearchResponse("查询失败：" + e.getMessage(), List.of(), 0);
        }
    }

    // 原始查询接口（保留兼容性）
    @GetMapping("/search/basic")
    public SearchResponse basicSearch(@RequestParam String query,
                                      @RequestParam(defaultValue = "5") int topK) {
        try {
            SearchRequest searchRequest = SearchRequest.builder()
                    .query(query)
                    .topK(topK)
                    .build();

            List<Document> results = vectorStore.similaritySearch(searchRequest);

            List<SearchResult> searchResults = results.stream()
                    .map(doc -> new SearchResult(
                            doc.getText(),
                            doc.getMetadata().get("source") != null ? 
                                doc.getMetadata().get("source").toString() : "未知来源",
                            doc.getMetadata()
                    ))
                    .collect(Collectors.toList());

            return new SearchResponse(searchResults, results.size());
        } catch (Exception e) {
            e.printStackTrace();
            return new SearchResponse(List.of(), 0);
        }
    }

    // 获取已嵌入的文档列表
    @GetMapping("/getDocuments")
    public DocumentListResponse getDocuments() {
        try {
            // 从向量库中获取所有文档的元数据
            List<Document> allDocuments = vectorStore.similaritySearch(SearchRequest.builder()
                    .query("")
                    .topK(1000) // 获取足够多的文档
                    .build());
            
            // 提取唯一的文档名称
            List<String> documentNames = allDocuments.stream()
                    .map(doc -> doc.getMetadata().get("source") != null ? 
                            doc.getMetadata().get("source").toString() : "未知来源")
                    .distinct()
                    .collect(Collectors.toList());
            
            return new DocumentListResponse(documentNames, documentNames.size());
        } catch (Exception e) {
            e.printStackTrace();
            return new DocumentListResponse(List.of(), 0);
        }
    }

    // 获取知识库统计信息
    @GetMapping("/stats")
    public Map<String, Object> getStats() {
        try {
            List<Document> allDocuments = vectorStore.similaritySearch(SearchRequest.builder()
                    .query("")
                    .topK(1000)
                    .build());
            
            List<String> documentNames = allDocuments.stream()
                    .map(doc -> doc.getMetadata().get("source") != null ? 
                            doc.getMetadata().get("source").toString() : "未知来源")
                    .distinct()
                    .collect(Collectors.toList());
            
            return Map.of(
                    "status", "知识库系统运行正常",
                    "vectorStore", "ElasticSearch Vector Store",
                    "documentCount", documentNames.size(),
                    "documents", documentNames
            );
        } catch (Exception e) {
            return Map.of(
                    "status", "知识库系统运行正常",
                    "vectorStore", "ElasticSearch Vector Store",
                    "documentCount", 0,
                    "documents", List.of()
            );
        }
    }
}