package com.fliad.ai.service;

import cn.hutool.core.util.IdUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.http.HttpUtil;
import com.fliad.ai.dto.DocumentUploadRequest;
import com.fliad.ai.dto.DocumentUploadResponse;
import com.fliad.ai.entity.AiDocument;
import com.fliad.ai.entity.AiDocumentChunk;
import com.fliad.ai.entity.AiKnowledge;
import com.fliad.ai.entity.AiLlm;
import org.noear.snack.ONode;
import org.noear.solon.ai.embedding.EmbeddingModel;
import org.noear.solon.ai.rag.Document;
import org.noear.solon.ai.rag.RepositoryStorable;
import org.noear.solon.ai.rag.repository.InMemoryRepository;
import org.noear.solon.ai.rag.splitter.RegexTextSplitter;
import org.noear.solon.ai.rag.splitter.SplitterPipeline;
import org.noear.solon.ai.rag.util.QueryCondition;
import org.noear.solon.annotation.Component;
import org.noear.solon.cloud.CloudClient;
import org.noear.solon.cloud.model.Media;
import org.noear.wood.DbContext;
import org.noear.wood.annotation.Db;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

@Component
public class AiDocumentService {

    @Db
    DbContext db;

    // 知识库存储
    public static final Map<Long, RepositoryStorable> KNOWLEDGE_REPO = new ConcurrentHashMap<>();

    public List<String> getDocumentTypes() {
        List<String> list = new ArrayList<>();
        list.add("pdf");
        list.add("word");
        list.add("aieditor");
        return list;
    }

    public boolean create(AiDocument document) throws SQLException {
        document.setId(IdUtil.getSnowflakeNextId());
        document.setCreated(new Date());
        document.setModified(new Date());
        return db.table("ai_document")
                .setEntity(document)
                .insert() > 0;
    }

    public List<Document> preview(DocumentUploadRequest request) throws SQLException, IOException {
        // 获取请求参数
        String fileUrl = request.getFile();
        if (StrUtil.isEmptyIfStr(fileUrl)) {
            throw new IllegalArgumentException("文件不能为空");
        }
        if (fileUrl.contains("{")) {
            fileUrl = ONode.loadStr(fileUrl).get("url").getString();
        }
        Map<String, String> paramMap = HttpUtil.decodeParamMap(fileUrl, StandardCharsets.UTF_8);
        String splitter = request.getSplitter();
        String regex = request.getRegex();

        SplitterPipeline splitterPipeline = createSplitterPipeline(splitter, regex);
        Media media = CloudClient.file().get(paramMap.get("bucket"), paramMap.get("key"));
        return splitterPipeline.split(media.bodyAsString());
    }

    public List<Document> upload(DocumentUploadRequest request) throws SQLException, IOException {
        // 获取请求参数
        String fileUrl = request.getFile();
        if (fileUrl.contains("{")) {
            fileUrl = ONode.loadStr(fileUrl).get("url").getString();
        }
        Map<String, String> paramMap = HttpUtil.decodeParamMap(fileUrl, StandardCharsets.UTF_8);

        String splitter = request.getSplitter();
        String regex = request.getRegex();

        Long knowledgeId = request.getKnowledgeId();
        if (knowledgeId == null) {
            throw new IllegalArgumentException("knowledgeId is null");
        }

        SplitterPipeline splitterPipeline = createSplitterPipeline(splitter, regex);

        // 创建文档对象
        AiDocument document = createOrUpdateDocument(request, paramMap, knowledgeId);

        // 下载文件并分割
        List<Document> docList = splitFileContent(splitterPipeline, paramMap.get("bucket"), paramMap.get("key"));

        // 增加元数据
        addMetadata(docList, paramMap.get("key"), knowledgeId, document.getId());
        addMetadata(docList, request.getMeta());

        // 知识库维护
        RepositoryStorable repositoryStorable = getOrCreateRepository(knowledgeId);

        // 编辑的情况
        if (request.getDocumentId() != null && request.getDocumentId() != 1) {
            deleteExistingChunks(document.getId());
        }
        repositoryStorable.insert(docList);
        // 维护分片表的数据 分片表是ai_document_chunk
        for (Document doc : docList) {
            AiDocumentChunk aiDocumentChunk = new AiDocumentChunk();
            aiDocumentChunk.setId(doc.getId());
            aiDocumentChunk.setDocumentId(document.getId());
            aiDocumentChunk.setKnowledgeId(knowledgeId);
            db.table("ai_document_chunk").setEntity(aiDocumentChunk).insert();
        }

        return docList;
    }

    public DocumentUploadResponse get(Long id) throws SQLException, IOException {
        AiDocument aiDocument = db.table("ai_document")
                .whereEq("id", id)
                .selectItem("*", AiDocument.class);
        if (StrUtil.isEmptyIfStr(aiDocument.getId())) {
            return null;
        }
        ONode options = ONode.loadStr(aiDocument.getOptions());

        DocumentUploadResponse response = new DocumentUploadResponse();
        response.setDocumentId(aiDocument.getId());
        response.setKnowledgeId(aiDocument.getKnowledgeId());
        response.setSplitter(options.get("splitter").getString());
        response.setRegex(options.get("regex").getString());
        response.setFile(ONode.newObject().set("url", "/file/down?bucket=ai&key=" + aiDocument.getFileKey())
                .set("filename", aiDocument.getTitle()));

        SplitterPipeline splitterPipeline = createSplitterPipeline(response.getSplitter(), response.getRegex());

        List<Document> docList = splitFileContent(splitterPipeline, "ai", aiDocument.getFileKey());
        addMetadata(docList, aiDocument.getFileKey(), aiDocument.getKnowledgeId(), aiDocument.getId());
        response.setResult(docList);

        response.setFilename(aiDocument.getFileKey());
        response.setMeta(StrUtil.isBlankIfStr(aiDocument.getMeta()) ? null : ONode.loadStr(aiDocument.getMeta()));
        return response;
    }

    public List<AiDocument> list(String title, String documentType, Long knowledgeId) throws SQLException {
        return db.table("ai_document")
                .where("1=1")  // Base condition to start the WHERE clause
                .andIf(title != null && !title.isEmpty(),
                        "title LIKE ?", "%" + title + "%")  // 按标题模糊查询
                .andIf(documentType != null && !documentType.isEmpty(),
                        "document_type = ?", documentType)  // 按文档类型查询
                .andIf(knowledgeId != null,
                        "knowledge_id = ?", knowledgeId)  // 按知识库ID查询
                .orderBy("id DESC")
                .selectList("*", AiDocument.class);
    }

    public boolean update(Long id, AiDocument document) throws SQLException {
        document.setModified(new Date());
        return db.table("ai_document")
                .whereEq("id", id)
                .setEntity(document)
                .update() > 0;
    }

    // 新增方法：删除指定知识库下的所有文档
    public void deleteByKnowledgeId(Long knowledgeId) throws SQLException, IOException {
        // 查询该知识库下的所有文档
        List<AiDocument> documents = db.table("ai_document")
                .whereEq("knowledge_id", knowledgeId)
                .selectList("*", AiDocument.class);

        // 删除每个文档并清理内存中的知识库存储
        for (AiDocument document : documents) {
            delete(document.getId());
            KNOWLEDGE_REPO.remove(knowledgeId);
        }
    }

    // 修改方法：删除文档时清理内存中的知识库存储
    public boolean delete(Long id) throws SQLException, IOException {
        // 删除文档分块数据
        deleteExistingChunks(id);

        // 删除文档
        return db.table("ai_document")
                .whereEq("id", id)
                .delete() > 0;
    }

    public List<Document> search(Long id, String keyword) throws SQLException, IOException {
        AiDocument aiDocument = db.table("ai_document").whereEq("id", id).selectItem("*", AiDocument.class);
        AiKnowledge aiKnowledge = db.table("ai_knowledge").whereEq("id", aiDocument.getKnowledgeId())
                .selectItem("*", AiKnowledge.class);
        AiLlm aiLlm = db.table("ai_llm")
                .whereEq("id", aiKnowledge.getVectorEmbedLlmId())
                .selectItem("*", AiLlm.class);
        RepositoryStorable repositoryStorable = KNOWLEDGE_REPO.computeIfAbsent(aiKnowledge.getId(), key -> {
            return new InMemoryRepository(EmbeddingModel.of(aiLlm.getLlmEndpoint())
                    .apiKey(aiLlm.getLlmApiKey()).provider(aiLlm.getDialect()).model(aiLlm.getLlmModel()).build());
        });
        QueryCondition condition = new QueryCondition(keyword);
        condition.filterExpression("documentId == " + aiDocument.getId() + "L");
        return repositoryStorable.search(condition);
    }

    // 新增方法：创建或更新文档
    private AiDocument createOrUpdateDocument(DocumentUploadRequest request, Map<String, String> paramMap, Long knowledgeId) throws SQLException {
        AiDocument document = new AiDocument();
        if (request.getDocumentId() == null || request.getDocumentId() == 1) {
            document.setId(IdUtil.getSnowflakeNextId());
        } else {
            document.setId(request.getDocumentId());
        }
        document.setOptions(ONode.newObject().set("splitter", request.getSplitter()).set("regex", request.getRegex()).toJson());
        document.setMeta(request.getMeta() == null ? null : request.getMeta().toJson());
        document.setTitle(paramMap.get("key"));
        document.setKnowledgeId(knowledgeId);
        document.setFileKey(paramMap.get("key"));
        document.setCreated(new Date());
        document.setModified(new Date());

        db.table("ai_document")
                .setEntity(document)
                .upsertBy("id");

        return document;
    }

    // 新增方法：创建分割管道
    private SplitterPipeline createSplitterPipeline(String splitter, String regex) {
        SplitterPipeline splitterPipeline = new SplitterPipeline();
        if ("org.noear.solon.ai.rag.splitter.RegexTextSplitter".equals(splitter)) {
            splitterPipeline.next(new RegexTextSplitter(regex));
        }
        return splitterPipeline;
    }

    // 新增方法：分割文件内容
    private List<Document> splitFileContent(SplitterPipeline splitterPipeline, String bucket, String key) throws IOException {
        return splitterPipeline.split(CloudClient.file().get(bucket, key).bodyAsString());
    }

    // 新增方法：添加元数据
    private void addMetadata(List<Document> docList, String fileName, Long knowledgeId, Long documentId) {
        for (int i = 0; i < docList.size(); i++) {
            docList.get(i).metadata("name", fileName);
            docList.get(i).metadata("knowledgeId", knowledgeId);
            docList.get(i).metadata("documentId", documentId);
        }
    }

    // 新增方法：添加元数据
    private void addMetadata(List<Document> docList, ONode meta) {
        if (meta != null && meta.isObject()) {
            for (int i = 0; i < docList.size(); i++) {
                int finalI = i;
                meta.forEach((key, val) -> {
                    docList.get(finalI).metadata(key, val.getString());
                });
            }
        }
    }

    // 新增方法：获取或创建知识库
    private RepositoryStorable getOrCreateRepository(Long knowledgeId) throws SQLException {
        return KNOWLEDGE_REPO.computeIfAbsent(knowledgeId, key -> {
            AiKnowledge aiKnowledge = null;
            try {
                aiKnowledge = db.table("ai_knowledge")
                        .whereEq("id", knowledgeId)
                        .selectItem("*", AiKnowledge.class);
            } catch (SQLException e) {
                throw new RuntimeException(e);
            }
            AiLlm aiLlm = null;
            try {
                aiLlm = db.table("ai_llm")
                        .whereEq("id", aiKnowledge.getVectorEmbedLlmId())
                        .selectItem("*", AiLlm.class);
            } catch (SQLException e) {
                throw new RuntimeException(e);
            }
            return new InMemoryRepository(EmbeddingModel.of(aiLlm.getLlmEndpoint())
                    .apiKey(aiLlm.getLlmApiKey()).provider(aiLlm.getDialect()).model(aiLlm.getLlmModel()).build());
        });
    }

    // 新增方法：删除现有块
    private void deleteExistingChunks(Long documentId) throws SQLException, IOException {
        List<AiDocumentChunk> aiDocumentChunkList = db.table("ai_document_chunk")
                .whereEq("document_id", documentId)
                .selectList("*", AiDocumentChunk.class);
        AiDocument document = db.table("ai_document")
                .whereEq("id", documentId)
                .selectItem("*", AiDocument.class);
        if (document != null) {
            RepositoryStorable repositoryStorable = KNOWLEDGE_REPO.get(document.getKnowledgeId());
            if (repositoryStorable != null) {
                repositoryStorable.delete(aiDocumentChunkList.stream().map(AiDocumentChunk::getId).toArray(String[]::new));
            }
        }
        db.table("ai_document_chunk")
                .whereEq("document_id", documentId)
                .delete();
    }

}
