package com.liru.neo4j_demo.service;

import com.liru.neo4j_demo.model.Chunk;
import com.liru.neo4j_demo.model.MyDocument;
import com.liru.neo4j_demo.model.Entity;
import com.liru.neo4j_demo.model.Relation;
import com.liru.neo4j_demo.util.FileSplitter;
import com.liru.neo4j_demo.util.Neo4jUtils;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.Metadata;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.internal.Json;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.UserMessage;
import org.apache.poi.xwpf.usermodel.XWPFDocument;
import org.apache.poi.xwpf.extractor.XWPFWordExtractor;
import org.neo4j.driver.*;
import org.neo4j.driver.Record;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.StandardCopyOption;
import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * 知识图谱服务类，实现核心业务逻辑
 */
@Service
public class KnowledgeGraphService {

    // Neo4j数据库驱动
    private final Driver neo4jDriver;
    // 向量嵌入模型
    private final EmbeddingModel embeddingModel;
    // OpenAI聊天模型
    private final OpenAiChatModel openAiChatModel;
    // NLP处理服务
    private final NLPProcessingService nlpProcessingService;

    /**
     * 构造函数，注入依赖
     * @param neo4jDriver Neo4j数据库驱动
     * @param embeddingModel 向量嵌入模型
     * @param openAiChatModel OpenAI聊天模型
     */
    @Autowired
    public KnowledgeGraphService(Driver neo4jDriver, EmbeddingModel embeddingModel, OpenAiChatModel openAiChatModel, NLPProcessingService nlpProcessingService) {
        this.neo4jDriver = neo4jDriver;
        this.embeddingModel = embeddingModel;
        this.openAiChatModel = openAiChatModel;
        this.nlpProcessingService = nlpProcessingService;
    }

    /**
     * 知识图谱问答
     * @param query 查询问题
     * @param knowledgeId 知识ID
     * @param limit 相关片段数量限制
     * @return 问答结果
     */
    public String queryKnowledgeGraph(String query, String knowledgeId, int limit) {
        // 1. 执行图扩展搜索获取相关信息
        List<Map<String, Object>> searchResults = graphExtendedSearch(query, knowledgeId, limit);

        // 2. 构建提示上下文
        StringBuilder contextBuilder = new StringBuilder();
        contextBuilder.append("基于以下信息回答问题：\n");
        for (Map<String, Object> result : searchResults) {
            contextBuilder.append("- ").append(result.get("relatedContent")).append("\n");
        }
        contextBuilder.append("\n问题：").append(query);

        // 3. 调用OpenAI模型生成回答
        ChatResponse response = openAiChatModel.chat(new UserMessage(contextBuilder.toString()));
        return response.aiMessage().text();
    }

    /**
     * 处理上传的文档，构建知识图谱
     * @param file 上传的文件
     * @param knowledgeId 知识ID，用于数据隔离
     * @return 处理结果
     * @throws IOException 文件处理异常
     */
    public Map<String, Object> processDocument(MultipartFile file, String knowledgeId) throws IOException {
        // 1. 保存文件到临时目录
        File tempFile = File.createTempFile("upload_", ".tmp", new File("E:\\"));
        Files.copy(file.getInputStream(), tempFile.toPath(), StandardCopyOption.REPLACE_EXISTING);

        // 2. 加载文档
        Document document = loadDocument(tempFile, file.getOriginalFilename());

        // 3. 生成文档节点
        MyDocument docNode = createDocumentNode(file, knowledgeId);

        // 4. 切分文档为片段
        List<Chunk> chunks = splitDocumentIntoChunks(document, docNode.getDocId(), knowledgeId);

        // 5. 为每个片段生成向量
        generateChunkEmbeddings(chunks);

        // 6. 保存片段到Neo4j
        saveChunksToNeo4j(chunks, docNode.getDocId());

        // 7. 从片段中抽取实体
        List<Entity> entities = extractEntitiesFromChunks(chunks, knowledgeId);

        // 8. 保存实体到Neo4j
        saveEntitiesToNeo4j(entities);

        // 9. 建立片段与实体的引用关系
        linkChunksToEntities(chunks, entities);

        // 10. 抽取实体之间的关系
        List<Relation> relations = nlpProcessingService.extractRelations(document.text(), entities, knowledgeId);

        // 11. 保存关系到Neo4j
        // 注意：extractRelationsFromChunks方法内部已处理源实体，关系对象中已包含source信息
        saveRelationsToNeo4j(relations);

        // 清理临时文件
        boolean delete = tempFile.delete();
        if (!delete)
            System.out.println("Failed to delete temporary file: " + tempFile.getAbsolutePath());

        // 返回处理结果
        Map<String, Object> result = new HashMap<>();
        result.put("docId", docNode.getDocId());
        result.put("knowledgeId", knowledgeId);
        result.put("chunkCount", chunks.size());
        result.put("entityCount", entities.size());
        result.put("relationCount", relations.size());

        return result;
    }

    /**
     * 加载文档
     * @param file 文件
     * @param fileName 文件名
     * @return 加载后的文档
     */
    private Document loadDocument(File file, String fileName) {
        // 根据文件扩展名选择不同的加载方式
        if (fileName.toLowerCase().endsWith(".pdf")) {
            // 加载PDF文档
            return FileSystemDocumentLoader.loadDocument( file.getPath());
        } else if (fileName.toLowerCase().endsWith(".html") || fileName.toLowerCase().endsWith(".htm")) {
            // 加载HTML文档
            return FileSystemDocumentLoader.loadDocument( file.getPath());
        } else if (fileName.toLowerCase().endsWith(".docx")) {
            // 加载DOCX文档
            try (XWPFDocument doc = new XWPFDocument(Files.newInputStream(file.toPath()));
                 XWPFWordExtractor extractor = new XWPFWordExtractor(doc)) {
                String text = extractor.getText();
                // 创建文档对象
                return Document.from(text);
            } catch (IOException e) {
                throw new RuntimeException("Failed to load DOCX document: " + e.getMessage(), e);
            }
        } else {
            // 默认加载文本文档
            try {
                // 指定UTF-8编码读取文本文件
                String text = Files.readString(file.toPath(), StandardCharsets.UTF_8);
                return Document.from(text);
            } catch (IOException e) {
                throw new RuntimeException("Failed to load text document: " + e.getMessage(), e);
            }
        }
    }

    /**
     * 创建文档节点
     * @param file 文件
     * @param knowledgeId 知识ID
     * @return 文档节点
     */
    private MyDocument createDocumentNode(MultipartFile file, String knowledgeId) {
        // 创建文档对象并设置属性
        MyDocument myDocument = new MyDocument();
        myDocument.setDocId(UUID.randomUUID().toString()); // 生成唯一文档ID
        myDocument.setName(file.getOriginalFilename()); // 设置文件名
        myDocument.setKnowledgeId(knowledgeId); // 设置知识ID
        myDocument.setCreatedAt(LocalDateTime.now()); // 设置创建时间
        myDocument.setFileType(file.getContentType()); // 设置文件类型
        myDocument.setFileSize(file.getSize()); // 设置文件大小

        // 保存到Neo4j数据库
        try (Session session = neo4jDriver.session()) {
            session.writeTransaction(tx -> {
                // 执行Cypher语句创建文档节点
                tx.run(
                        "CREATE (d:MyDocument {docId: $docId, name: $name, knowledgeId: $knowledgeId, " +
                        "createdAt: $createdAt, fileType: $fileType, fileSize: $fileSize})",
                        Values.parameters(
                                "docId", myDocument.getDocId(),
                                "name", myDocument.getName(),
                                "knowledgeId", knowledgeId,
                                "createdAt", myDocument.getCreatedAt().toString(),
                                "fileType", myDocument.getFileType(),
                                "fileSize", myDocument.getFileSize()
                        )
                );
                return null;
            });
        }

        return myDocument;
    }

    /**
     * 将文档切分为片段（使用高级切割策略）
     * @param document 文档
     * @param docId 文档ID
     * @param knowledgeId 知识ID
     * @return 片段列表
     */
    private List<Chunk> splitDocumentIntoChunks(Document document, String docId, String knowledgeId) {
        List<Chunk> chunks = new ArrayList<>();
        String content = document.text(); // 获取文档文本内容

        // 使用高级文本切割策略
        List<String> segmentTexts = FileSplitter.splitByAdvancedStrategy(content, embeddingModel);

        AtomicInteger index = new AtomicInteger(0);
        // 遍历切割后的文本片段
        for (String segmentText : segmentTexts) {
            if (!segmentText.trim().isEmpty()) {
                // 创建Chunk对象并设置属性
                Chunk chunk = new Chunk();
                chunk.setChunkId(UUID.randomUUID().toString()); // 生成唯一片段ID
                chunk.setContent(segmentText); // 设置片段内容
                chunk.setKnowledgeId(knowledgeId); // 设置知识ID
                chunk.setDocId(docId); // 设置所属文档ID
                chunk.setIndex(index.getAndIncrement()); // 设置片段索引
                chunks.add(chunk);
            }
        }

        return chunks;
    }

    /**
     * 为片段生成嵌入向量
     * @param chunks 片段列表
     */
    private void generateChunkEmbeddings(List<Chunk> chunks) {
        // 遍历所有片段
        for (Chunk chunk : chunks) {
            // 使用嵌入模型生成向量
            Embedding embedding = embeddingModel.embed(chunk.getContent()).content();
            // 设置片段的嵌入向量
            chunk.setEmbedding(embedding.vectorAsList());
        }
    }

    /**
     * 保存片段到Neo4j
     * @param chunks 片段列表
     * @param docId 文档ID
     */
    private void saveChunksToNeo4j(List<Chunk> chunks, String docId) {
        try (Session session = neo4jDriver.session()) {
            session.writeTransaction(tx -> {
                // 遍历所有片段
                for (Chunk chunk : chunks) {
                    // 创建Chunk节点
                    tx.run(
                            "CREATE (c:Chunk {chunkId: $chunkId, content: $content, knowledgeId: $knowledgeId, " +
                            "docId: $docId, index: $index, embedding: $embedding})",
                            Values.parameters(
                                    "chunkId", chunk.getChunkId(),
                                    "content", chunk.getContent(),
                                    "knowledgeId", chunk.getKnowledgeId(),
                                    "docId", docId,
                                    "index", chunk.getIndex(),
                                    "embedding", chunk.getEmbedding()
                            )
                    );

                    // 建立文档与片段的关系
                    tx.run(
                            "MATCH (d:MyDocument {docId: $docId}) " +
                            "MATCH (c:Chunk {chunkId: $chunkId}) " +
                            "MERGE (d)-[:CONTAINS {knowledgeId: $knowledgeId}]->(c)",
                            Values.parameters(
                                    "docId", docId,
                                    "chunkId", chunk.getChunkId(),
                                    "knowledgeId", chunk.getKnowledgeId()
                            )
                    );
                }
                return null;
            });
        }
    }

    /**
     * 从片段中抽取实体
     * @param chunks 片段列表
     * @param knowledgeId 知识ID
     * @return 实体列表
     */
    private List<Entity> extractEntitiesFromChunks(List<Chunk> chunks, String knowledgeId) {
        List<Entity> entities = new ArrayList<>();
        Map<String, Entity> entityMap = new HashMap<>();

        // 遍历所有片段
        for (Chunk chunk : chunks) {
            String content = chunk.getContent();

            // 使用NLP服务抽取实体
            List<Entity> chunkEntities = nlpProcessingService.extractEntities(content, knowledgeId);

            // 合并实体，避免重复
            for (Entity entity : chunkEntities) {
                if (!entityMap.containsKey(entity.getName())) {
                    entityMap.put(entity.getName(), entity);
                }
            }
        }

        entities.addAll(entityMap.values());
        return entities;
    }

    /**
     * 保存实体到Neo4j
     * @param entities 实体列表
     */
    private void saveEntitiesToNeo4j(List<Entity> entities) {
        try (Session session = neo4jDriver.session()) {
            session.writeTransaction(tx -> {
                // 遍历所有实体
                for (Entity entity : entities) {
                    // 创建实体节点，动态添加类型标签
                    // 清理实体类型标签和属性
                    String cleanedType = Neo4jUtils.cleanLabel(entity.getType());
                    Map<String, Object> cleanedProperties = Neo4jUtils.cleanProperties(entity.getProperties());
                    String cypher = "MERGE (e:Entity:" + cleanedType + " {name: $name, knowledgeId: $knowledgeId}) " +
                                    "ON CREATE SET e.properties = $properties";
                    tx.run(
                            cypher,
                            Values.parameters(
                                    "name", entity.getName(),
                                    "knowledgeId", entity.getKnowledgeId(),
                                    "properties", Json.toJson(cleanedProperties)
                            )
                    );
                }
                return null;
            });
        }
    }

    /**
     * 建立片段与实体的引用关系
     * @param chunks 片段列表
     * @param entities 实体列表
     */
    private void linkChunksToEntities(List<Chunk> chunks, List<Entity> entities) {
        try (Session session = neo4jDriver.session()) {
            session.writeTransaction(tx -> {
                // 遍历所有片段和实体
                for (Chunk chunk : chunks) {
                    for (Entity entity : entities) {
                        // 如果片段内容包含实体名称，则建立引用关系
                        if (chunk.getContent().contains(entity.getName())) {
                            tx.run(
                                    "MATCH (c:Chunk {chunkId: $chunkId}) " +
                                    "MATCH (e:Entity {name: $entityName, knowledgeId: $knowledgeId}) " +
                                    "MERGE (c)-[:MENTIONS {knowledgeId: $knowledgeId}]->(e)",
                                    Values.parameters(
                                            "chunkId", chunk.getChunkId(),
                                            "entityName", entity.getName(),
                                            "knowledgeId", chunk.getKnowledgeId()
                                    )
                            );
                        }
                    }
                }
                return null;
            });
        }
    }

    /**
     * 从片段中抽取实体关系
     * @param chunks 片段列表
     * @param entities 实体列表
     * @param knowledgeId 知识ID
     * @return 关系列表
     */
    private List<Relation> extractRelationsFromChunks(List<Chunk> chunks, List<Entity> entities, String knowledgeId) {
        List<Relation> relations = new ArrayList<>();

        // 简单的关系抽取示例（实际应用中应使用专业的关系抽取工具）
        // 遍历所有片段
        for (Chunk chunk : chunks) {
            String content = chunk.getContent();

            // 检查实体对之间的关系
            for (int i = 0; i < entities.size(); i++) {
                Entity e1 = entities.get(i);
                for (int j = 0; j < entities.size(); j++) {
                    if (i == j) continue;

                    Entity e2 = entities.get(j);

                    // 检查是否存在 "e1 是 e2 的 CEO" 模式
                    Pattern ceoPattern = Pattern.compile(".*" + Pattern.quote(e1.getName()) + ".*是.*" + Pattern.quote(e2.getName()) + ".*的CEO.*");
                    Matcher ceoMatcher = ceoPattern.matcher(content);
                    if (ceoMatcher.find()) {
                        Relation relation = new Relation();
                        relation.setType("WORKS_FOR"); // 设置关系类型
                        relation.setKnowledgeId(knowledgeId); // 设置知识ID
                        Map<String, Object> properties = new HashMap<>();
                        properties.put("role", "CEO"); // 设置角色属性
                        relation.setProperties(properties);
                        relation.setSource(e1); // 设置源实体
                        relation.setTarget(e2); // 设置目标实体
                        relations.add(relation);
                    }

                    // 检查是否存在 "e1 任职于 e2" 模式
                    Pattern workPattern = Pattern.compile(".*" + Pattern.quote(e1.getName()) + ".*任职于.*" + Pattern.quote(e2.getName()) + ".*");
                    Matcher workMatcher = workPattern.matcher(content);
                    if (workMatcher.find()) {
                        Relation relation = new Relation();
                        relation.setType("WORKS_FOR"); // 设置关系类型
                        relation.setKnowledgeId(knowledgeId); // 设置知识ID
                        Map<String, Object> properties = new HashMap<>();
                        properties.put("role", "员工"); // 设置角色属性
                        relation.setProperties(properties);
                        relation.setSource(e1); // 设置源实体
                        relation.setTarget(e2); // 设置目标实体
                        relations.add(relation);
                    }

                    // 检查是否存在 "e1 位于 e2" 模式
                    Pattern locationPattern = Pattern.compile(".*" + Pattern.quote(e1.getName()) + ".*位于.*" + Pattern.quote(e2.getName()) + ".*");
                    Matcher locationMatcher = locationPattern.matcher(content);
                    if (locationMatcher.find()) {
                        Relation relation = new Relation();
                        relation.setType("LOCATED_IN"); // 设置关系类型
                        relation.setKnowledgeId(knowledgeId); // 设置知识ID
                        relation.setProperties(new HashMap<>()); // 初始化属性
                        relation.setSource(e1); // 设置源实体
                        relation.setTarget(e2); // 设置目标实体
                        relations.add(relation);
                    }
                }
            }
        }

        return relations;
    }

    /**
     * 保存关系到Neo4j
     * @param relations 关系列表
     */
    private void saveRelationsToNeo4j(List<Relation> relations) {
        try (Session session = neo4jDriver.session()) {
            session.writeTransaction(tx -> {
                // 遍历所有关系
                for (Relation relation : relations) {
                    Entity source = relation.getSource();
                    Entity target = relation.getTarget();

                    // 清理关系类型和属性
                    String cleanedType = Neo4jUtils.cleanRelationshipType(relation.getType());
                    Map<String, Object> cleanedProperties = Neo4jUtils.cleanProperties(relation.getProperties());

                    // 执行Cypher语句创建关系
                    tx.run(
                            "MATCH (e1:Entity {name: $sourceName, knowledgeId: $knowledgeId}) " +
                            "MATCH (e2:Entity {name: $targetName, knowledgeId: $knowledgeId}) " +
                            "MERGE (e1)-[:RELATION {type: $type, knowledgeId: $knowledgeId, properties: $properties}]->(e2)",
                            Values.parameters(
                                    "sourceName", source.getName(),
                                    "targetName", target.getName(),
                                    "type", cleanedType,
                                    "knowledgeId", relation.getKnowledgeId(),
                                    "properties", Json.toJson(cleanedProperties)
                            )
                    );
                }
                return null;
            });
        }
    }

    /**
     * 向量检索
     * @param query 查询文本
     * @param knowledgeId 知识ID
     * @param limit 返回数量限制
     * @return 检索结果
     */
    public List<Map<String, Object>> vectorSearch(String query, String knowledgeId, int limit) {
        // 生成查询向量
        Embedding queryEmbedding = embeddingModel.embed(query).content();
        List<Float> queryVector = queryEmbedding.vectorAsList();

        List<Map<String, Object>> results = new ArrayList<>();

        try (Session session = neo4jDriver.session()) {
            // 执行向量相似度查询
            Result result = session.run(
                    "WITH $queryVector AS queryVector " +
                    "MATCH (c:Chunk) " +
                    "WHERE c.knowledgeId = $knowledgeId " +
                    "RETURN c.chunkId AS chunkId, c.content AS content, c.docId AS docId, " +
                    "vector.similarity.cosine(c.embedding, queryVector) AS score " +
                    "ORDER BY score DESC " +
                    "LIMIT $limit",
                    Values.parameters(
                            "queryVector", queryVector,
                            "knowledgeId", knowledgeId,
                            "limit", limit
                    )
            );

            // 处理查询结果
            while (result.hasNext()) {
                Record record = result.next();
                Map<String, Object> item = new HashMap<>();
                item.put("chunkId", record.get("chunkId").asString());
                item.put("content", record.get("content").asString());
                item.put("docId", record.get("docId").asString());
                item.put("score", record.get("score").asDouble());
                results.add(item);
            }
        }

        return results;
    }

    /**
     * 图扩展搜索
     * @param query 查询文本
     * @param knowledgeId 知识ID
     * @param limit 返回数量限制
     * @return 搜索结果
     */
    public List<Map<String, Object>> graphExtendedSearch(String query, String knowledgeId, int limit) {
        // 1. 向量检索找到相似片段
        List<Map<String, Object>> vectorResults = vectorSearch(query, knowledgeId, limit);

        List<Map<String, Object>> results = new ArrayList<>();

        // 2. 沿图扩展搜索
        try (Session session = neo4jDriver.session()) {
            // 遍历向量检索结果
            for (Map<String, Object> vectorResult : vectorResults) {
                String chunkId = (String) vectorResult.get("chunkId");
                Double score = (Double) vectorResult.get("score");

                // 执行图扩展查询
                Result graphResult = session.run(
                        "MATCH (c:Chunk {chunkId: $chunkId}) " +
                        "MATCH (c)-[:MENTIONS]->(e1:Entity) " +
                        "MATCH (e1)-[r:RELATION {knowledgeId: $knowledgeId}]->(e2:Entity) " +
                        "MATCH (e2)<-[:MENTIONS]-(relatedChunk:Chunk) " +
                        "WHERE relatedChunk.knowledgeId = $knowledgeId " +
                        "RETURN relatedChunk.chunkId AS relatedChunkId, relatedChunk.content AS relatedContent, " +
                        "e1.name AS entity1, e2.name AS entity2, r.type AS relationType, r.properties AS relationProperties " +
                        "LIMIT $limit",
                        Values.parameters(
                                "chunkId", chunkId,
                                "knowledgeId", knowledgeId,
                                "limit", limit
                        )
                );

                // 处理图扩展查询结果
                while (graphResult.hasNext()) {
                    Record record = graphResult.next();
                    Map<String, Object> item = new HashMap<>();
                    item.put("relatedChunkId", record.get("relatedChunkId").asString());
                    item.put("relatedContent", record.get("relatedContent").asString());
                    item.put("entity1", record.get("entity1").asString());
                    item.put("entity2", record.get("entity2").asString());
                    item.put("relationType", record.get("relationType").asString());
                    item.put("relationProperties", Json.toJson(record.get("relationProperties")));
                    item.put("score", score);
                    results.add(item);
                }
            }
        }

        // 按相关性分数排序
        results.sort((a, b) -> Double.compare((Double) b.get("score"), (Double) a.get("score")));

        // 限制返回数量
        if (results.size() > limit) {
            results = results.subList(0, limit);
        }

        return results;
    }
}