package org.example.corpus.utils;

import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.util.CoreMap;
import lombok.extern.slf4j.Slf4j;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.text.PDFTextStripper;
import org.apache.poi.xwpf.usermodel.XWPFDocument;
import org.apache.poi.xwpf.usermodel.XWPFParagraph;
import org.apache.tika.language.LanguageIdentifier;  // 使用 Tika 1.26 中的 LanguageIdentifier

import java.io.*;
import java.util.*;

@Slf4j
public class SentenceSplitterUtil {

    private static StanfordCoreNLP englishPipeline;
    private static StanfordCoreNLP chinesePipeline;

    static {
        // 设置英语的 Stanford NLP 管道
        Properties englishProps = new Properties();
        englishProps.setProperty("annotators", "tokenize,ssplit");
        englishPipeline = new StanfordCoreNLP(englishProps);

        // 设置中文的 Stanford NLP 管道
        Properties chineseProps = new Properties();
        chineseProps.setProperty("annotators", "tokenize,ssplit");
        chinesePipeline = new StanfordCoreNLP(chineseProps);
    }

    public static void splitSentences(String filePath, int corpusId, SentenceProcessor processor) {
        String text = readFile(filePath);
        if (text != null) {
            // 使用 Tika 的 LanguageIdentifier 检测语言
            String detectedLanguage = detectLanguage(text);

            // 根据检测的语言选择不同的处理管道
            StanfordCoreNLP pipeline = choosePipeline(detectedLanguage);

            // 创建 Annotation 对象
            Annotation document = new Annotation(text);

            // 使用 pipeline 对文档进行分析
            pipeline.annotate(document);

            // 获取句子分割结果
            List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
            for (CoreMap sentence : sentences) {
                processor.process(sentence.toString(), corpusId);
            }
        }
    }

    private static String detectLanguage(String text) {
        // 使用 Tika 1.26 的 LanguageIdentifier 检测语言
        LanguageIdentifier identifier = new LanguageIdentifier(text);
        return identifier.getLanguage();  // 返回语言代码，如 "en" 或 "zh"
    }

    private static StanfordCoreNLP choosePipeline(String language) {
        if ("zh".equals(language)) {
            return chinesePipeline;
        } else {
            return englishPipeline;
        }
    }

    private static String readFile(String filePath) {
        File file = new File(filePath);
        String text = null;
        try {
            if (filePath.endsWith(".pdf")) {
                text = readPdf(file);
            } else if (filePath.endsWith(".txt")) {
                text = readTxt(file);
            } else if (filePath.endsWith(".docx")) {
                text = readDocx(file);
            } else {
                log.error("Unsupported file type: {}", filePath);
            }
        } catch (IOException e) {
            log.error("Failed to read file: {}", filePath, e);
        }
        return text;
    }

    private static String readPdf(File file) throws IOException {
        try (PDDocument document = PDDocument.load(file)) {
            PDFTextStripper pdfStripper = new PDFTextStripper();
            return pdfStripper.getText(document);
        }
    }

    private static String readTxt(File file) throws IOException {
        StringBuilder textBuilder = new StringBuilder();
        try (InputStreamReader isr = new InputStreamReader(new FileInputStream(file), "UTF-8");
             BufferedReader reader = new BufferedReader(isr)) {
            String line;
            while ((line = reader.readLine()) != null) {
                textBuilder.append(line).append("\n");
            }
        }
        return textBuilder.toString();
    }

    private static String readDocx(File file) throws IOException {
        StringBuilder textBuilder = new StringBuilder();
        try (XWPFDocument document = new XWPFDocument(new FileInputStream(file))) {
            Iterator<XWPFParagraph> iterator = document.getParagraphs().iterator();
            while (iterator.hasNext()) {
                XWPFParagraph paragraph = iterator.next();
                textBuilder.append(paragraph.getText());
                textBuilder.append("\n");
            }
        }
        return textBuilder.toString();
    }

    public interface SentenceProcessor {
        void process(String sentence, int corpusId);
    }
}
