package util;

/**
 * @author ：hzs
 * @date ：Created in 2021/1/23 15:07
 * @description：
 * @modified By：
 * @version: $
 */

import java.util.ArrayList;
import java.util.List;

import com.aliasi.sentences.IndoEuropeanSentenceModel;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;

public class SplitTextInSentence {
    static final TokenizerFactory TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;
    static final SentenceModel SENTENCE_MODEL = new IndoEuropeanSentenceModel();

    public static void main(String[] args) {
        SplitTextInSentence s = new SplitTextInSentence();
        String str1 = "Water-splashing Festival is one of the most important festivals in the world, which is popular among Dai people of China and the southeast Asia. It has been celebrated by people for more than 700 years and now this festival is an necessary way for people to promote the cooperation and communication among countries.";
        String str2 = "This is how I tried to split a paragraph into a sentence. But, there is a problem. My paragraph includes dates like Jan.13, 2014 , words like U.S and numbers like 2.2. They all got split by the above code.";
        String str3 = "My friend holds a Msc. in Computer Science.";
        String str4 = "This is a test? This is a T.L.A. test!";
        String text = "50 Cent XYZ120 DVD Player 50 Cent lawyer. Person is john, he is a lawyer.";
        String str5 = "\"I do not ask for your forgiveness,\" he said, in a tone that became more firm and forceful. \"I have no illusions, and I am convinced that death is waiting for me: it is just.\"";
        String str6 = "\"The Times have had too much influence on me.\" He laughed bitterly and said to himself, \"it is only two steps away from death. Alone with me, I am still hypocritical... Ah, the 19th century!\"";
        String str7 = "泼水节是世界上最重要节日之一，深受中国傣族和东南亚人民的喜爱。七百多年来，人们一直在庆祝这个节日，现在这个节日是促进国家间合作和交流的必要方式。";
//        System.out.println(s.splitfuhao(str7));
        String str8 = "Let {a mathematical formula}〈X,A〉 be an af and S a subset of {a mathematical formula}X. \n" +
                " \n" +
                " \n" +
                "S is admissible (denoted as {a mathematical formula}S∈Eadm(〈X,A〉)) if S is conflict-free and every argument in S is acceptable wrt S, i.e. {a mathematical formula}S⊆F(S). \n" +
                " \n" +
                "S is a complete extension, (denoted as {a mathematical formula}S∈Ecomp(〈X,A〉)) if S is conflict-free and {a mathematical formula}x∈S if and only if x is acceptable wrt S, i.e. {a mathematical formula}S=F(S). \n" +
                " \n" +
                "S is a preferred extension (denoted as {a mathematical formula}S∈Epr(〈X,A〉)) if S is a maximal (wrt ⊆) admissible set. \n" +
                " \n" +
                "S is a stable extension (denoted as {a mathematical formula}S∈Estab(〈X,A〉)) if S is conflict-free and for any {a mathematical formula}y∉S, there is some {a mathematical formula}x∈S that attacks y, i.e. {a mathematical formula}S+=X∖S. \n" +
                " \n" +
                "S is the grounded extension of {a mathematical formula}〈X,A〉 (denoted as {a mathematical formula}S∈Egr(〈X,A〉)) if it is the (unique) least fixed point of {a mathematical formula}F, i.e. {a mathematical formula}S=F(S) and there is no {a mathematical formula}S′⊊S such that {a mathematical formula}S′=F(S′). \n" +
                " \n" +
                " \n" +
                "The existence and uniqueness of the grounded extension is established in [37] for all afs.";
        List<String> sl = testChunkSentences(str8);
        if (sl.isEmpty()) {
            System.out.println("没有识别到句子");
        }
        for (String row : sl) {
            System.out.println(row);
            System.out.println("=================");
        }
    }

    //这个是引用句子识别的方法，找了好多资料，在一个用它做文本分析里的找到的↓
    // https://blog.csdn.net/textboy/article/details/45580009
    public static List<String> testChunkSentences(String text) {
        List<String> result = new ArrayList<String>();
        List<String> tokenList = new ArrayList<String>();
        List<String> whiteList = new ArrayList<String>();
        Tokenizer tokenizer = TOKENIZER_FACTORY.tokenizer(text.toCharArray(),
                0, text.length());
        tokenizer.tokenize(tokenList, whiteList);
        String[] tokens = new String[tokenList.size()];
        String[] whites = new String[whiteList.size()];
        tokenList.toArray(tokens);
        whiteList.toArray(whites);
        int[] sentenceBoundaries = SENTENCE_MODEL.boundaryIndices(tokens,
                whites);
        int sentStartTok = 0;
        int sentEndTok = 0;

        for (int i = 0; i < sentenceBoundaries.length; ++i) {
            StringBuilder sb = new StringBuilder();
            sentEndTok = sentenceBoundaries[i];
            for (int j = sentStartTok; j <= sentEndTok; j++) {
                sb.append(tokens[j]).append(whites[j + 1]);
            }
            sentStartTok = sentEndTok + 1;
            result.add(sb.toString());
        }
        return result;
    }

    //替换中文标点符号，用于检测是否识别中文分句
    public String splitfuhao(String str) {
        String[] ChineseInterpunction = {"“", "”", "‘", "’", "。", "，", "；", "：", "？", "！", "……", "—", "～", "（", "）", "《", "》"};
        String[] EnglishInterpunction = {"\"", "\"", "'", "'", ".", ",", ";", ":", "?", "!", "…", "-", "~", "(", ")", "<", ">"};
        for (int j = 0; j < ChineseInterpunction.length; j++) {
            //alert("txt.replace("+ChineseInterpunction[j]+", "+EnglishInterpunction[j]+")");
            //String reg=str.matches(ChineseInterpunction[j],"g");
            str = str.replace(ChineseInterpunction[j], EnglishInterpunction[j] + " ");
        }
        return str;
    }

}

