package com.yc.documentLoanding;

import dev.langchain4j.community.model.dashscope.QwenEmbeddingModel;
import dev.langchain4j.data.document.Metadata;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore;
import org.apache.tika.Tika;
import org.apache.tika.exception.TikaException;
import org.apache.tika.metadata.TikaCoreProperties;
import org.apache.tika.parser.ParseContext;
import org.apache.tika.sax.BodyContentHandler;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.TrustAllStrategy;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.ssl.SSLContextBuilder;
import org.elasticsearch.client.RestClient;
import org.xml.sax.SAXException;

import javax.net.ssl.SSLContext;
import java.io.*;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class DocumentVectorizer {
    private final Tika tika;
    private final EmbeddingModel embeddingModel;
    private final EmbeddingStore<TextSegment> embeddingStore;
    private final RestClient restClient;
    private final int chunkSize;
    private final int overlapSize;
    private final int processingBatchSize;
    private final String indexName;

    public DocumentVectorizer(String esHost, int esPort, String apiKey,
                              String username, String password, boolean useHttps, String indexName, int dimension) {
        this(esHost, esPort, apiKey, username, password, useHttps, indexName, 300, 50, 5, dimension);
    }

    public DocumentVectorizer(String esHost, int esPort, String apiKey,
                              String username, String password, boolean useHttps, String indexName,
                              int chunkSize, int overlapSize, int processingBatchSize, int dimension) {
        this.tika = new Tika();
        this.chunkSize = chunkSize;
        this.overlapSize = overlapSize;
        this.processingBatchSize = processingBatchSize;
        this.indexName = indexName;

        // 初始化通义千问嵌入模型
        this.embeddingModel = QwenEmbeddingModel.builder()
                .apiKey(apiKey)
                .modelName("text-embedding-v4")
                .dimension(dimension)
                .build();

        // 初始化Elasticsearch客户端和EmbeddingStore
        this.restClient = createRestClient(esHost, esPort, username, password, useHttps);
        this.embeddingStore = ElasticsearchEmbeddingStore.builder()
                .restClient(restClient)
                .indexName(indexName)
                .dimension(dimension)
                .build();
    }

    private RestClient createRestClient(String host, int port,
                                        String username, String password, boolean useHttps) {
        try {
            final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
            credentialsProvider.setCredentials(
                    AuthScope.ANY,
                    new UsernamePasswordCredentials(username, password)
            );

            SSLContext sslContext = SSLContextBuilder.create()
                    .loadTrustMaterial(TrustAllStrategy.INSTANCE)
                    .build();

            String scheme = useHttps ? "https" : "http";
            return RestClient.builder(
                            new HttpHost(host, port, scheme))
                    .setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder
                            .setDefaultCredentialsProvider(credentialsProvider)
                            .setSSLContext(sslContext)
                            .setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE))
                    .build();
        } catch (Exception e) {
            throw new RuntimeException("创建Elasticsearch客户端失败", e);
        }
    }

    public void close() throws IOException {
        if (restClient != null) {
            restClient.close();
        }
    }

    // 改进的文件解析方法
    public String parseDocument(File file) throws IOException {
        System.out.println("开始解析文件: " + file.getName() + ", 大小: " + file.length() + " 字节");

        try {
            // 方法1: 使用 Tika 的简单方法
            String content = tika.parseToString(file);
            if (content != null && content.length() > 50) {
                System.out.println("Tika解析成功，长度: " + content.length() + " 字符");
                return content;
            }

            // 方法2: 对于文本文件，直接读取
            if (file.getName().toLowerCase().endsWith(".txt")) {
                content = new String(Files.readAllBytes(file.toPath()), StandardCharsets.UTF_8);
                if (content.length() > 50) {
                    System.out.println("直接读取文本文件成功，长度: " + content.length() + " 字符");
                    return content;
                }
            }

            // 方法3: 使用 Tika 的高级解析
            try (InputStream stream = new FileInputStream(file)) {
                org.apache.tika.metadata.Metadata metadata = new org.apache.tika.metadata.Metadata();
                metadata.set(TikaCoreProperties.RESOURCE_NAME_KEY, file.getName());

                // 使用 BodyContentHandler 而不是 WriteOutContentHandler
                BodyContentHandler handler = new BodyContentHandler(-1); // -1 表示无限制
                ParseContext context = new ParseContext();

                tika.getParser().parse(stream, handler, metadata, context);
                content = handler.toString();

                if (content != null && content.length() > 50) {
                    System.out.println("Tika高级解析成功，长度: " + content.length() + " 字符");
                    return content;
                }
            } catch (Exception e) {
                System.err.println("Tika高级解析失败: " + e.getMessage());
            }

            // 如果所有方法都失败，抛出异常
            throw new IOException("无法解析文件内容，文件可能已损坏或格式不受支持");

        } catch (Exception e) {
            System.err.println("解析文件 " + file.getName() + " 失败: " + e.getMessage());
            throw new IOException("文件解析失败: " + file.getName(), e);
        }
    }

    private String cleanText(String text) {
        if (text == null || text.isEmpty()) {
            return "";
        }
        return text.replaceAll("<[^>]*>", " ")
                .replaceAll("\\s+", " ")
                .trim();
    }

    public List<String> chunkText(String text, int chunkSize, int overlapSize) {
        if (text == null || text.isEmpty() || text.length() < 50) {
            System.out.println("文本太短，无法分块: " + (text != null ? text.length() : 0) + " 字符");
            return new ArrayList<>();
        }

        List<String> chunks = new ArrayList<>();
        int textLength = text.length();
        int start = 0;

        while (start < textLength) {
            int end = Math.min(start + chunkSize, textLength);

            if (end < textLength) {
                // 寻找合适的分割点
                int splitPoint = findSplitPoint(text, end);
                if (splitPoint > start) {
                    end = splitPoint;
                }
            }

            String chunk = text.substring(start, end).trim();
            if (chunk.length() >= 20 && hasMeaningfulContent(chunk)) {
                chunks.add(chunk);
            }

            start = Math.max(start + 1, end - overlapSize);
            if (start >= textLength) break;
        }

        System.out.println("分块完成: " + chunks.size() + " 个文本块");
        return chunks;
    }

    private boolean hasMeaningfulContent(String text) {
        if (text == null || text.length() < 20) {
            return false;
        }

        long chineseCharCount = text.chars()
                .filter(c -> c >= '\u4E00' && c <= '\u9FFF')
                .count();
        long letterCount = text.chars()
                .filter(Character::isLetter)
                .count();

        return chineseCharCount >= 5 || letterCount >= 10 || text.split("\\s+").length >= 5;
    }

    private int findSplitPoint(String text, int suggestedEnd) {
        // 优先在句子结束符处分割
        String[] sentenceEndings = {"。", "！", "？", ".", "!", "?", "\n\n", "\r\n\r\n"};
        for (String ending : sentenceEndings) {
            int index = text.lastIndexOf(ending, suggestedEnd);
            if (index > 0 && index > suggestedEnd - 100) {
                return index + ending.length();
            }
        }

        // 然后在空格处分割
        int spaceIndex = text.lastIndexOf(" ", suggestedEnd);
        if (spaceIndex > 0 && spaceIndex > suggestedEnd - 30) {
            return spaceIndex + 1;
        }

        return suggestedEnd;
    }

    public Embedding generateEmbedding(String text) {
        try {
            String cleanedText = cleanText(text);
            if (cleanedText.length() > 8000) {
                cleanedText = cleanedText.substring(0, 8000);
            }

            if (cleanedText.length() < 20) {
                System.out.println("文本太短，跳过生成向量");
                return null;
            }

            return embeddingModel.embed(cleanedText).content();
        } catch (Exception e) {
            System.err.println("生成向量失败: " + e.getMessage());
            return null;
        }
    }

    private void addEmbeddingsBatch(List<TextSegment> segments, List<Embedding> embeddings) {
        if (segments.isEmpty() || embeddings.isEmpty() || segments.size() != embeddings.size()) {
            return;
        }

        try {
            // 过滤掉 null 的 embedding
            List<TextSegment> validSegments = new ArrayList<>();
            List<Embedding> validEmbeddings = new ArrayList<>();

            for (int i = 0; i < embeddings.size(); i++) {
                if (embeddings.get(i) != null) {
                    validSegments.add(segments.get(i));
                    validEmbeddings.add(embeddings.get(i));
                }
            }

            if (!validSegments.isEmpty()) {
                embeddingStore.addAll(validEmbeddings, validSegments);
                System.out.println("成功添加 " + validSegments.size() + " 个向量到 Elasticsearch");
            }
        } catch (Exception e) {
            System.err.println("批量添加失败: " + e.getMessage());
        }
    }

    public void processDocument(File file) {
        try {
            System.out.println("\n=== 开始处理文档: " + file.getName() + " ===");

            // 解析文档
            String content = parseDocument(file);
            System.out.println("文档解析成功，长度: " + content.length() + " 字符");

            // 分块
            List<String> chunks = chunkText(content, chunkSize, overlapSize);
            if (chunks.isEmpty()) {
                System.out.println("警告: 文档分块后没有有效内容");
                return;
            }

            System.out.println("分割为 " + chunks.size() + " 个文本块");

            // 处理每个文本块
            List<TextSegment> batchSegments = new ArrayList<>();
            List<Embedding> batchEmbeddings = new ArrayList<>();

            for (int i = 0; i < chunks.size(); i++) {
                String chunk = chunks.get(i);
                System.out.println("处理文本块 " + (i + 1) + "/" + chunks.size() +
                        ", 长度: " + chunk.length() + " 字符");

                // 创建元数据
                Map<String, Object> metadata = new HashMap<>();
                metadata.put("filename", file.getName());
                metadata.put("chunk_index", i);
                metadata.put("total_chunks", chunks.size());
                metadata.put("chunk_size", chunk.length());

                TextSegment segment = TextSegment.from(chunk, Metadata.from(metadata));
                Embedding embedding = generateEmbedding(chunk);

                if (embedding != null) {
                    batchSegments.add(segment);
                    batchEmbeddings.add(embedding);
                }

                // 批量提交
                if (batchSegments.size() >= processingBatchSize) {
                    addEmbeddingsBatch(batchSegments, batchEmbeddings);
                    batchSegments.clear();
                    batchEmbeddings.clear();

                    // 休息一下，避免速率限制
                    Thread.sleep(500);
                }
            }

            // 提交剩余的
            if (!batchSegments.isEmpty()) {
                addEmbeddingsBatch(batchSegments, batchEmbeddings);
            }

            System.out.println("=== 文档处理完成: " + file.getName() + " ===\n");

        } catch (Exception e) {
            System.err.println("处理文档失败: " + file.getName() + " - " + e.getMessage());
        }
    }

    public void processClasspathResources(String resourcePath) throws IOException, URISyntaxException {
        ClassLoader classLoader = getClass().getClassLoader();
        Enumeration<URL> resources = classLoader.getResources(resourcePath);

        if (!resources.hasMoreElements()) {
            throw new IOException("类路径资源目录不存在: " + resourcePath);
        }

        while (resources.hasMoreElements()) {
            URL resourceUrl = resources.nextElement();
            if ("file".equals(resourceUrl.getProtocol())) {
                File resourceDir = new File(resourceUrl.toURI());
                if (resourceDir.exists() && resourceDir.isDirectory()) {
                    processFileSystemDirectory(resourceDir);
                }
            }
        }
    }

    private void processFileSystemDirectory(File directory) {
        if (!directory.exists() || !directory.isDirectory()) {
            System.err.println("目录不存在: " + directory.getAbsolutePath());
            return;
        }

        File[] files = directory.listFiles((dir, name) ->
                name.toLowerCase().endsWith(".epub") ||
                        name.toLowerCase().endsWith(".txt") ||
                        name.toLowerCase().endsWith(".pdf") ||
                        name.toLowerCase().endsWith(".doc") ||
                        name.toLowerCase().endsWith(".docx"));

        if (files == null || files.length == 0) {
            System.out.println("目录中没有找到支持的文件: " + directory.getAbsolutePath());
            return;
        }

        System.out.println("找到 " + files.length + " 个文件");

        for (File file : files) {
            try {
                processDocument(file);
                Thread.sleep(1000); // 文件间延迟
            } catch (Exception e) {
                System.err.println("处理文件失败: " + file.getName() + " - " + e.getMessage());
            }
        }
    }

    public static void main(String[] args) {
        String esHost = "localhost";
        int esPort = 9200;
        String apiKey = "sk-b9ddcf3fc0ca47499e527fd85dbe4ab9";
        String username = "elastic";
        String password = "elastic";
        boolean useHttps = false;
        String resourcePath = "ragResources";
        String indexName = "psychology_vectors";
        int chunkSize = 500;
        int overlapSize = 50;
        int processingBatchSize = 10;
        int dimension = 1024; // text-embedding-v4 建议使用1536维度

        DocumentVectorizer vectorizer = null;
        try {
            System.out.println("=== 文档向量化处理开始 ===");
            System.out.println("建议使用: -Xmx4g -XX:+UseG1GC");

            vectorizer = new DocumentVectorizer(esHost, esPort, apiKey,
                    username, password, useHttps, indexName, chunkSize, overlapSize,
                    processingBatchSize, dimension);

            vectorizer.processClasspathResources(resourcePath);
            System.out.println("=== 所有文档处理完成! ===");

        } catch (Exception e) {
            System.err.println("处理过程中发生错误: " + e.getMessage());
            e.printStackTrace();
        } finally {
            if (vectorizer != null) {
                try {
                    vectorizer.close();
                } catch (IOException e) {
                    System.err.println("关闭资源失败: " + e.getMessage());
                }
            }
        }
    }
}