package com.example.nplboot.test;

import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunking;
import com.aliasi.dict.DictionaryEntry;
import com.aliasi.dict.ExactDictionaryChunker;
import com.aliasi.dict.MapDictionary;
import com.aliasi.sentences.IndoEuropeanSentenceModel;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;

import java.util.ArrayList;
import java.util.List;

/**
 * 命名实体识别
 * @Classname TextAnalyzer
 * @Date 2023/2/12 22:27
 * @Created by 87766867@qq.com
 */
public class TextAnalyzer {


    static final double CHUNK_SCORE = 1.0;
    static final TokenizerFactory TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;
    static final SentenceModel SENTENCE_MODEL = new IndoEuropeanSentenceModel();

    public static void main(String[] args) {
            	//testChunkSentences();
            	testChunkDictionary();
    }


    // Sentences - Sentences Chunking（分句）
    private static void testChunkSentences() {
        String text = "50 元 mp3 播放器 50 元 律师. 此 人 是 约翰，他 是 一名 律师.";
        List < String > result = new ArrayList < String > ();

        List < String > tokenList = new ArrayList < String > ();
        List < String > whiteList = new ArrayList < String > ();
        Tokenizer tokenizer = TOKENIZER_FACTORY.tokenizer(text.toCharArray(),
                0, text.length());
        tokenizer.tokenize(tokenList, whiteList);
        String[] tokens = new String[tokenList.size()];
        String[] whites = new String[whiteList.size()];
        tokenList.toArray(tokens);
        whiteList.toArray(whites);
        int[] sentenceBoundaries = SENTENCE_MODEL.boundaryIndices(tokens,
                whites);
        int sentStartTok = 0;
        int sentEndTok = 0;
        for (int i = 0; i < sentenceBoundaries.length; ++i) {
            System.out.println("Sentense " + (i + 1) + ", sentense's length(from 0):" + (sentenceBoundaries[i]));
            StringBuilder sb = new StringBuilder();
            sentEndTok = sentenceBoundaries[i];
            for (int j = sentStartTok; j <= sentEndTok; j++) {
                sb.append(tokens[j]).append(whites[j + 1]);
            }
            sentStartTok = sentEndTok + 1;
            result.add(sb.toString());
        }
        System.out.println("Final result:" + result);
    }

    // NER(named entity recognition) - Exact Dictionary-Based Chunking（分词）
    private static void testChunkDictionary() {
        String[] args1 = {
                "50 元 mp3 播放器 50 元 律师.", "此 人 是 约翰，他 是 一名 律师."
        };

        MapDictionary < String > dictionary = new MapDictionary < String > ();
        dictionary.addEntry(new DictionaryEntry < String > ("50 元", "人", CHUNK_SCORE));
        dictionary.addEntry(new DictionaryEntry < String > ("mp3 播放器", "播放器", CHUNK_SCORE));
        dictionary.addEntry(new DictionaryEntry < String > ("元", "货币单位", CHUNK_SCORE));
        dictionary.addEntry(new DictionaryEntry < String > ("播放器", "产品", CHUNK_SCORE));


        ExactDictionaryChunker dictionaryChunkerTT
                = new ExactDictionaryChunker(dictionary,
                IndoEuropeanTokenizerFactory.INSTANCE,
                true, true);

        ExactDictionaryChunker dictionaryChunkerTF = new ExactDictionaryChunker(dictionary,
                IndoEuropeanTokenizerFactory.INSTANCE,
                true, false);

        // returnAllMatches为false表示从进一步的匹配过程中绕过匹配的文本
        ExactDictionaryChunker dictionaryChunkerFT
                = new ExactDictionaryChunker(dictionary,
                IndoEuropeanTokenizerFactory.INSTANCE,
                false, true);

        ExactDictionaryChunker dictionaryChunkerFF
                = new ExactDictionaryChunker(dictionary,
                IndoEuropeanTokenizerFactory.INSTANCE,
                false, false);



        System.out.println("\n词典\n" + dictionary);

        for (int i = 0; i < args1.length; ++i) {
            String text = args1[i];
            System.out.println("\n\nTEXT=" + text);

            chunk(dictionaryChunkerTT, text);
            chunk(dictionaryChunkerTF, text);
            chunk(dictionaryChunkerFT, text);
            chunk(dictionaryChunkerFF, text);
        }
    }

    static void chunk(ExactDictionaryChunker chunker, String text) {
        System.out.println("\n块." + " 所有匹配项=" + chunker.returnAllMatches() + " 区分大小写=" + chunker.caseSensitive());
        Chunking chunking = chunker.chunk(text);
        for (Chunk chunk: chunking.chunkSet()) {
            int start = chunk.start();
            int end = chunk.end();
            String type = chunk.type();
            double score = chunk.score();
            String phrase = text.substring(start, end);
            System.out.println("     短语=|" + phrase + "|" + " 开始=" + start + " 结束=" + end + " 类型=" + type + " 得分=" + score);
        }
    }


}