package com.office.trigger.http;

import com.office.api.IRAGService;
import com.office.api.response.Response;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.transport.UsernamePasswordCredentialsProvider;
import org.redisson.api.RList;
import org.redisson.api.RedissonClient;
import org.springframework.ai.document.Document;
import org.springframework.ai.ollama.OllamaChatClient;
import org.springframework.ai.transformer.splitter.TokenTextSplitter;
import org.springframework.ai.vectorstore.PgVectorStore;
import org.springframework.ai.vectorstore.SearchRequest;
import org.springframework.ai.vectorstore.SimpleVectorStore;
import org.springframework.core.io.PathResource;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;

import jakarta.servlet.http.HttpServletRequest;
import java.io.File;
import java.io.IOException;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;


/**
 * RAG检索
 * @author 数字牧马人
 */
@Slf4j
@RestController()
@CrossOrigin("*")
@RequestMapping("/api/v1/rag/")
public class RAGController implements IRAGService {

    @Resource
    private OllamaChatClient ollamaChatClient;
    @Resource
    private TokenTextSplitter tokenTextSplitter;
    @Resource
    private SimpleVectorStore simpleVectorStore;
    @Resource
    private PgVectorStore pgVectorStore;
    @Resource
    private RedissonClient redissonClient;

    /**
     * 检查当前用户是否为管理员（临时测试版本）
     * @return 是否为管理员
     */
    private boolean isAdmin(HttpServletRequest request) {
        try {
            // 临时方案：检查请求头中的用户角色
            String userRole = request.getHeader("X-User-Role");
            
            if (userRole != null && "admin".equals(userRole)) {
                return true;
            }
            
            return false;
            
        } catch (Exception e) {
            return false;
        }
    }

    @RequestMapping(value = "query_rag_tag_list", method = RequestMethod.GET)
    @Override
    public Response<List<String>> queryRagTagList() {
        RList<String> elements = redissonClient.getList("ragTag");
        return Response.<List<String>>builder()
                .code("0000")
                .info("调用成功")
                .data(elements)
                .build();
    }
    // http://localhost:8080/api/v1/rag/file/upload 知识库上传
    @RequestMapping(value = "file/upload", method = RequestMethod.POST, headers = "content-type=multipart/form-data")
    @Override
    public Response<String> uploadFile(@RequestParam("ragTag") String ragTag, @RequestParam("file") List<MultipartFile> files) {
        for (MultipartFile file : files) {
            try {
                // 检查文件类型
                String fileName = file.getOriginalFilename();
                if (fileName == null) {
                    log.warn("文件名为空，跳过处理");
                    continue;
                }
                
                String fileExtension = fileName.substring(fileName.lastIndexOf(".") + 1).toLowerCase();
                log.info("处理文件: {}, 类型: {}", fileName, fileExtension);
                
                // 使用安全的文档读取方法
                List<Document> documents = safeReadDocument(file.getResource(), fileName);
                
                // 检查原始文档是否为空
                if (documents.isEmpty()) {
                    log.warn("文件 {} 解析后为空，跳过处理", fileName);
                    continue;
                }

                // 应用文档分割器
                List<Document> documentSplitterList = tokenTextSplitter.apply(documents);

                // 如果分割后没有文档，使用原始文档
                if (documentSplitterList.isEmpty()) {
                    documentSplitterList = documents;
                }

                // 为所有文档添加知识库标签
                documentSplitterList.forEach(doc -> doc.getMetadata().put("knowledge", ragTag));

                // 去重处理：移除内容完全相同的文档
                List<Document> uniqueDocuments = removeDuplicateDocuments(documentSplitterList);

                try {
                    if (!uniqueDocuments.isEmpty()) {
                        pgVectorStore.accept(uniqueDocuments);
                        log.info("成功处理文件: {}, 文档数量: {}", fileName, uniqueDocuments.size());
                    }
                } catch (Exception e) {
                    log.error("向量存储失败: {}", e.getMessage());
                    throw e;
                }

                // 添加到知识库标签列表
                RList<String> elements = redissonClient.getList("ragTag");
                if (!elements.contains(ragTag)) {
                    elements.add(ragTag);
                }
            } catch (Exception e) {
                log.error("处理文件时发生错误: {}", e.getMessage(), e);
                return Response.<String>builder()
                    .code("500")
                    .info("文件处理失败: " + e.getMessage())
                    .build();
            }
        }

        return Response.<String>builder()
            .code("0000")
            .info("上传成功")
            .data("文件上传成功")
            .build();
    }

    /**
     * 移除重复的文档
     * @param documents 文档列表
     * @return 去重后的文档列表
     */
    private List<Document> removeDuplicateDocuments(List<Document> documents) {
        if (documents == null || documents.isEmpty()) {
            return new ArrayList<>();
        }

        List<Document> uniqueDocuments = new ArrayList<>();
        Set<String> contentHashes = new HashSet<>();

        for (Document doc : documents) {
            if (doc.getContent() == null || doc.getContent().trim().isEmpty()) {
                continue; // 跳过空内容文档
            }

            String content = doc.getContent().trim();
            
            // 生成内容的标准化哈希值（去除多余空格、转换为小写）
            String normalizedContent = content.toLowerCase()
                .replaceAll("\\s+", " ")  // 将多个空格替换为单个空格
                .trim();
            
            if (normalizedContent.isEmpty()) {
                continue; // 跳过标准化后为空的内容
            }

            // 使用标准化内容的哈希值来判断重复
            if (!contentHashes.contains(normalizedContent)) {
                contentHashes.add(normalizedContent);
                uniqueDocuments.add(doc);
            }
        }

        return uniqueDocuments;
    }

    @RequestMapping(value = "analyze_git_repository", method = RequestMethod.POST)
    @Override
    public Response<String> analyzeGitRepository(@RequestParam("repoUrl") String repoUrl, @RequestParam("userName") String userName, @RequestParam("token") String token) throws Exception {
        String localPath = "./git-cloned-repo";
        String repoProjectName = extractProjectName(repoUrl);

        FileUtils.deleteDirectory(new File(localPath));

        Git git = Git.cloneRepository()
                .setURI(repoUrl)
                .setDirectory(new File(localPath))
                .setCredentialsProvider(new UsernamePasswordCredentialsProvider(userName, token))
                .call();

        Files.walkFileTree(Paths.get(localPath), new SimpleFileVisitor<>() {
            @Override
            public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
                try {
                    String fileName = file.getFileName().toString();
                    String fileExtension = fileName.substring(fileName.lastIndexOf(".") + 1).toLowerCase();
                    
                    // 只处理支持的文件类型
                    if (!isSupportedFileType(fileExtension)) {
                        return FileVisitResult.CONTINUE;
                    }
                    
                    List<Document> documents = safeReadDocument(new PathResource(file), fileName);
                    
                    // 检查原始文档是否为空
                    if (documents.isEmpty()) {
                        return FileVisitResult.CONTINUE;
                    }
                    
                    List<Document> documentSplitterList = tokenTextSplitter.apply(documents);

                    // 如果分割后没有文档，使用原始文档
                    if (documentSplitterList.isEmpty()) {
                        documentSplitterList = documents;
                    }

                    // 为所有文档添加知识库标签
                    documentSplitterList.forEach(doc -> doc.getMetadata().put("knowledge", repoProjectName));

                    // 去重处理：移除内容完全相同的文档
                    List<Document> uniqueDocuments = removeDuplicateDocuments(documentSplitterList);
                    
                    if (!uniqueDocuments.isEmpty()) {
                        pgVectorStore.accept(uniqueDocuments);
                    }
                } catch (IncompatibleClassChangeError e) {
                    log.error("Tika 版本冲突错误，跳过文件: {}", file.getFileName());
                    return FileVisitResult.CONTINUE;
                } catch (Exception e) {
                    log.warn("处理文件失败，跳过: {}, 错误: {}", file.getFileName(), e.getMessage());
                    return FileVisitResult.CONTINUE;
                }

                return FileVisitResult.CONTINUE;
            }

            @Override
            public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
                return FileVisitResult.CONTINUE;
            }
        });

        FileUtils.deleteDirectory(new File(localPath));

        RList<String> elements = redissonClient.getList("ragTag");
        if (!elements.contains(repoProjectName)) {
            elements.add(repoProjectName);
        }

        git.close();

        return Response.<String>builder().code("0000").info("调用成功").build();
    }

    private String extractProjectName(String repoUrl) {
        String[] parts = repoUrl.split("/");
        String projectNameWithGit = parts[parts.length - 1];
        return projectNameWithGit.replace(".git", "");
    }

    /**
     * 检查文件类型是否支持
     * @param fileExtension 文件扩展名
     * @return 是否支持
     */
    private boolean isSupportedFileType(String fileExtension) {
        String[] supportedTypes = {"txt", "md", "csv", "xlsx", "xls", "doc", "docx", "pdf"};
        for (String type : supportedTypes) {
            if (type.equals(fileExtension)) {
                return true;
            }
        }
        return false;
    }

    /**
     * 安全地读取文档，暂时使用备用方法避免版本冲突
     * @param resource 文件资源
     * @param fileName 文件名
     * @return 文档列表
     */
    private List<Document> safeReadDocument(org.springframework.core.io.Resource resource, String fileName) {
        // 暂时禁用 Tika，直接使用备用方法避免版本冲突
        log.info("使用备用方法处理文件: {}", fileName);
        return fallbackReadDocument(resource, fileName);
        
        // 以下是原始 Tika 代码，暂时注释掉
        /*
        try {
            // 首先尝试使用 Tika
            TikaDocumentReader documentReader = new TikaDocumentReader(resource);
            return documentReader.get();
        } catch (IncompatibleClassChangeError e) {
            log.error("Tika 版本冲突，使用备用方法处理文件: {}", fileName);
            return fallbackReadDocument(resource, fileName);
        } catch (Exception e) {
            log.warn("Tika 处理失败，使用备用方法处理文件: {}, 错误: {}", fileName, e.getMessage());
            return fallbackReadDocument(resource, fileName);
        }
        */
    }

    /**
     * 备用文档读取方法
     * @param resource 文件资源
     * @param fileName 文件名
     * @return 文档列表
     */
    private List<Document> fallbackReadDocument(org.springframework.core.io.Resource resource, String fileName) {
        try {
            String fileExtension = fileName.substring(fileName.lastIndexOf(".") + 1).toLowerCase();
            
            // 对于文本文件，直接读取内容
            if ("txt".equals(fileExtension) || "md".equals(fileExtension) || "csv".equals(fileExtension)) {
                String content = new String(resource.getInputStream().readAllBytes(), "UTF-8");
                if (content.trim().isEmpty()) {
                    return new ArrayList<>();
                }
                return List.of(new Document(content, new HashMap<>(Map.of("source", fileName))));
            }
            
            // 对于 Excel 文件，使用POI解析
            if ("xlsx".equals(fileExtension) || "xls".equals(fileExtension)) {
                String content = com.office.common.utils.file.DocumentParserUtils.parseExcelDocument(
                    resource.getInputStream(), fileName);
                if (content.isEmpty()) {
                    return new ArrayList<>();
                }
                return List.of(new Document(content, new HashMap<>(Map.of("source", fileName, "type", "excel"))));
            }
            
            // 对于 Word 文档，使用POI解析
            if ("doc".equals(fileExtension) || "docx".equals(fileExtension)) {
                if ("doc".equals(fileExtension)) {
                    // 对于.doc文件，返回提示信息
                    String content = "Word 文档内容（旧版.doc格式暂不支持解析，建议转换为.docx格式）：" + fileName;
                    return List.of(new Document(content, new HashMap<>(Map.of("source", fileName, "type", "word", "format", "doc"))));
                } else {
                    // 对于.docx文件，使用POI解析
                    String content = com.office.common.utils.file.DocumentParserUtils.parseWordDocument(
                        resource.getInputStream(), fileName);
                    if (content.isEmpty()) {
                        return new ArrayList<>();
                    }
                    return List.of(new Document(content, new HashMap<>(Map.of("source", fileName, "type", "word", "format", "docx"))));
                }
            }
            
            // 对于其他文件类型，返回空列表
            log.warn("备用方法不支持文件类型: {}", fileExtension);
            return new ArrayList<>();
            
        } catch (Exception e) {
            log.error("备用方法处理文件失败: {}, 错误: {}", fileName, e.getMessage());
            return new ArrayList<>();
        }
    }

    @RequestMapping(value = "clear_rag_tag_list", method = RequestMethod.POST)
    @Override
    public Response<String> clearRagTagList() {
        RList<String> elements = redissonClient.getList("ragTag");
        elements.clear();
        return Response.<String>builder()
                .code("0000")
                .info("清理成功")
                .data("标签列表已清空")
                .build();
    }

    @RequestMapping(value = "delete_rag_tag", method = RequestMethod.DELETE)
    @Override
    public Response<String> deleteRagTag(@RequestParam("ragTag") String ragTag, HttpServletRequest request) {
        // 检查是否为管理员
        if (!isAdmin(request)) {
            return Response.<String>builder()
                    .code("403")
                    .info("权限不足：只有管理员才能删除知识库")
                    .build();
        }
        
        try {
            // 1. 从Redis中删除标签
            RList<String> elements = redissonClient.getList("ragTag");
            elements.remove(ragTag);

            // 2. 从向量数据库中删除相关文档
            SearchRequest searchRequest = SearchRequest.query(ragTag)
                .withFilterExpression("knowledge == '" + ragTag + "'");
            List<Document> documents = pgVectorStore.similaritySearch(searchRequest);

            if (!documents.isEmpty()) {
                List<String> documentIds = documents.stream()
                    .map(Document::getId)
                    .collect(Collectors.toList());
                pgVectorStore.delete(documentIds);
            }

            return Response.<String>builder()
                    .code("0000")
                    .info("删除成功")
                    .data("标签及相关文档已删除")
                    .build();
        } catch (Exception e) {
            return Response.<String>builder()
                    .code("500")
                    .info("删除失败：" + e.getMessage())
                    .build();
        }
    }

    @RequestMapping(value = "query_rag_content", method = RequestMethod.GET)
    @Override
    public Response<List<Document>> queryRagContent(@RequestParam("ragTag") String ragTag) {
        try {
            SearchRequest searchRequest = SearchRequest.query(ragTag)
                .withFilterExpression("knowledge == '" + ragTag + "'");
            List<Document> documents = pgVectorStore.similaritySearch(searchRequest);

            return Response.<List<Document>>builder()
                    .code("0000")
                    .info("查询成功")
                    .data(documents)
                    .build();
        } catch (Exception e) {
            return Response.<List<Document>>builder()
                    .code("500")
                    .info("查询失败：" + e.getMessage())
                    .build();
        }
    }

    @RequestMapping(value = "delete_document", method = RequestMethod.DELETE)
    @Override
    public Response<String> deleteDocument(@RequestParam("documentId") String documentId, HttpServletRequest request) {
        // 检查是否为管理员
        if (!isAdmin(request)) {
            return Response.<String>builder()
                    .code("403")
                    .info("权限不足：只有管理员才能删除文档")
                    .build();
        }
        
        try {
            // 从向量数据库中删除指定文档
            pgVectorStore.delete(List.of(documentId));

            return Response.<String>builder()
                    .code("0000")
                    .info("删除成功")
                    .data("文档已删除")
                    .build();
        } catch (Exception e) {
            return Response.<String>builder()
                    .code("500")
                    .info("删除失败：" + e.getMessage())
                    .build();
        }
    }

    @RequestMapping(value = "search", method = RequestMethod.POST)
    @Override
    public Response<List<Document>> searchKnowledgeBase(@RequestBody Map<String, Object> request) {
        try {
            String query = (String) request.get("query");
            Integer maxResults = (Integer) request.getOrDefault("maxResults", 10);
            Double similarityThreshold = (Double) request.getOrDefault("similarityThreshold", 0.7);
            List<String> knowledgeTags = (List<String>) request.getOrDefault("knowledgeTags", new ArrayList<>());

            if (query == null || query.trim().isEmpty()) {
                return Response.<List<Document>>builder()
                        .code("400")
                        .info("查询关键词不能为空")
                        .build();
            }

            SearchRequest searchRequest = SearchRequest.query(query)
                    .withTopK(maxResults);

            // 如果指定了知识库标签，添加过滤条件
            if (!knowledgeTags.isEmpty()) {
                String filterExpression = knowledgeTags.stream()
                        .map(tag -> "knowledge == '" + tag + "'")
                        .collect(Collectors.joining(" OR "));
                searchRequest = searchRequest.withFilterExpression(filterExpression);
            }

            List<Document> documents = pgVectorStore.similaritySearch(searchRequest);

            // 根据相似度阈值过滤结果
            List<Document> filteredDocuments = documents.stream()
                    .filter(doc -> {
                        // 这里可以根据文档的相似度分数进行过滤
                        // 由于Spring AI的Document没有直接的相似度分数，这里暂时返回所有结果
                        return true;
                    })
                    .collect(Collectors.toList());

            return Response.<List<Document>>builder()
                    .code("0000")
                    .info("搜索成功")
                    .data(filteredDocuments)
                    .build();
        } catch (Exception e) {
            return Response.<List<Document>>builder()
                    .code("500")
                    .info("搜索失败：" + e.getMessage())
                    .build();
        }
    }

    @RequestMapping(value = "stats", method = RequestMethod.GET)
    @Override
    public Response<Map<String, Object>> getKnowledgeBaseStats() {
        try {
            Map<String, Object> stats = new HashMap<>();

            // 获取知识库标签列表
            RList<String> elements = redissonClient.getList("ragTag");
            List<String> tags = new ArrayList<>(elements);
            stats.put("totalTags", tags.size());
            stats.put("tags", tags);

            // 获取每个知识库的文档数量
            Map<String, Integer> tagDocumentCounts = new HashMap<>();
            int totalDocuments = 0;

            for (String tag : tags) {
                try {
                    SearchRequest searchRequest = SearchRequest.query(tag)
                            .withFilterExpression("knowledge == '" + tag + "'");
                    List<Document> documents = pgVectorStore.similaritySearch(searchRequest);
                    int count = documents.size();
                    tagDocumentCounts.put(tag, count);
                    totalDocuments += count;
                } catch (Exception e) {
                    tagDocumentCounts.put(tag, 0);
                }
            }

            stats.put("totalDocuments", totalDocuments);
            stats.put("tagDocumentCounts", tagDocumentCounts);

            return Response.<Map<String, Object>>builder()
                    .code("0000")
                    .info("获取统计信息成功")
                    .data(stats)
                    .build();
        } catch (Exception e) {
            return Response.<Map<String, Object>>builder()
                    .code("500")
                    .info("获取统计信息失败：" + e.getMessage())
                    .build();
        }
    }

    @RequestMapping(value = "parse_document", method = RequestMethod.POST, headers = "content-type=multipart/form-data")
    public Response<String> parseDocument(@RequestParam("file") MultipartFile file) {
        try {
            String fileName = file.getOriginalFilename();
            if (fileName == null) {
                return Response.<String>builder()
                        .code("400")
                        .info("文件名不能为空")
                        .build();
            }
            
            String fileExtension = fileName.substring(fileName.lastIndexOf(".") + 1).toLowerCase();
            String content = "";
            
            if (com.office.common.utils.file.DocumentParserUtils.isWordDocument(fileName)) {
                if (fileExtension.equals("doc")) {
                    content = "此文档为旧版Word格式(.doc)，暂不支持解析。建议将文档另存为.docx格式后重新上传。";
                } else {
                    content = com.office.common.utils.file.DocumentParserUtils.parseWordDocument(
                        file.getInputStream(), fileName);
                }
            } else if (com.office.common.utils.file.DocumentParserUtils.isExcelDocument(fileName)) {
                content = com.office.common.utils.file.DocumentParserUtils.parseExcelDocument(
                    file.getInputStream(), fileName);
            } else {
                return Response.<String>builder()
                        .code("400")
                        .info("不支持的文件类型: " + fileExtension)
                        .build();
            }
            
            if (content.isEmpty()) {
                return Response.<String>builder()
                        .code("400")
                        .info("文档内容为空或解析失败")
                        .build();
            }
            
            return Response.<String>builder()
                    .code("0000")
                    .info("解析成功")
                    .data(content)
                    .build();
                    
        } catch (Exception e) {
            log.error("解析文档失败: {}", e.getMessage());
            return Response.<String>builder()
                    .code("500")
                    .info("解析失败: " + e.getMessage())
                    .build();
        }
    }

    @RequestMapping(value = "batch_query", method = RequestMethod.POST)
    @Override
    public Response<Map<String, List<Document>>> batchQueryKnowledgeBases(@RequestBody Map<String, Object> request) {
        try {
            List<String> ragTags = (List<String>) request.get("ragTags");

            if (ragTags == null || ragTags.isEmpty()) {
                return Response.<Map<String, List<Document>>>builder()
                        .code("400")
                        .info("知识库标签列表不能为空")
                        .build();
            }

            Map<String, List<Document>> results = new HashMap<>();

            for (String tag : ragTags) {
                try {
                    SearchRequest searchRequest = SearchRequest.query(tag)
                            .withFilterExpression("knowledge == '" + tag + "'");
                    List<Document> documents = pgVectorStore.similaritySearch(searchRequest);
                    results.put(tag, documents);
                } catch (Exception e) {
                    results.put(tag, new ArrayList<>());
                }
            }

            return Response.<Map<String, List<Document>>>builder()
                    .code("0000")
                    .info("批量查询成功")
                    .data(results)
                    .build();
        } catch (Exception e) {
            return Response.<Map<String, List<Document>>>builder()
                    .code("500")
                    .info("批量查询失败：" + e.getMessage())
                    .build();
        }
    }

}
