package com.chao.yyks.util;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;

import com.aliasi.sentences.IndoEuropeanSentenceModel;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;
import com.chao.common.util.StrUtile;

public class SpliteText {
	static final TokenizerFactory TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;  
        static final SentenceModel SENTENCE_MODEL = new IndoEuropeanSentenceModel();
	//这里我选择了好多典型例子，属于正则表达式筛选会有问题的，你们的正则如果都能处理你牛逼，请留言给我，我也想要
	public static void main(String[] args) {
        String paragraph ="Conversation one.."
                ;

        List<String> sl = sq(paragraph);
		if(sl.isEmpty()){
			System.out.println("没有识别到句子");
		}
		for (String row : sl) {
			System.out.println(row);
		}
	}
    public static List<String> sq(String text){
        List<String> sl = testChunkSentences(splitfuhao(text));
        if(sl.isEmpty()){
            return Arrays.asList(text);
        }
        return sl;
    }

    //这个是引用句子识别的方法，找了好多资料，在一个用它做文本分析里的找到的↓
    //https://blog.csdn.net/textboy/article/details/45580009
    private static List<String> testChunkSentences(String text) {
        List<String> result = new ArrayList<String>();
        List<String> tokenList = new ArrayList<String>();
        List<String> whiteList = new ArrayList<String>();
        Tokenizer tokenizer = TOKENIZER_FACTORY.tokenizer(text.toCharArray(),
                0, text.length());
        tokenizer.tokenize(tokenList, whiteList);
        String[] tokens = new String[tokenList.size()];
        String[] whites = new String[whiteList.size()];
        tokenList.toArray(tokens);
        whiteList.toArray(whites);
        int[] sentenceBoundaries = SENTENCE_MODEL.boundaryIndices(tokens,
                whites);
        int sentStartTok = 0;
        int sentEndTok = 0;

        for (int sentenceBoundary : sentenceBoundaries) {
            StringBuilder sb = new StringBuilder();
            sentEndTok = sentenceBoundary;
            for (int j = sentStartTok; j <= sentEndTok; j++) {
                sb.append(tokens[j]).append(whites[j + 1]);
            }
            sentStartTok = sentEndTok + 1;
            if(StrUtile.isEmpty(sb.toString())){
                continue;
            }
            result.add(sb.toString());
        }
        //System.out.println("Final result:" + result);
        return result;
    }
	//替换中文标点符号，用于检测是否识别中文分句
	public static String splitfuhao(String str){
		String[] ChineseInterpunction = { ".\n","\n","“", "”", "‘", "’", "。", "，", "；", "：", "？", "！", "……", "—", "～", "（", "）", "《", "》" };
		String[] EnglishInterpunction = { ".",".","\"", "\"", "'", "'", ".", ",", ";", ":", "?", "!", "…", "-", "~", "(", ")", "<", ">" };
        for (int j = 0; j < ChineseInterpunction.length; j++)   
        {   
            //alert("txt.replace("+ChineseInterpunction[j]+", "+EnglishInterpunction[j]+")"); 
            //String reg=str.matches(ChineseInterpunction[j],"g"); 
            str = str.replace(ChineseInterpunction[j], EnglishInterpunction[j]+" ");  
        }  
        return str; 
	}

	// 查询文章单词数量
    public static int splitfuhaoCh(String str){
        String[] EnglishInterpunction = { ".\n","\n",".", ",", ";", ":", "?", "!",  "(", ")", "<", ">" };
        for (int j = 0; j < EnglishInterpunction.length; j++)
        {
            str = str.replace(EnglishInterpunction[j], " ");
        }
        return str.split(" ").length;
    }
	
}