package org.elasticsearch.index.analysis;

import org.apache.lucene.analysis.Tokenizer;
import org.apdplat.word.lucene.ChineseWordTokenizer;
import org.apdplat.word.lucene.NewChineseWordTokenizer;
import org.apdplat.word.segmentation.Segmentation;
import org.apdplat.word.segmentation.SegmentationAlgorithm;
import org.apdplat.word.segmentation.SegmentationFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;

/**
 * Created with IntelliJ IDEA.
 * User: Kenn
 * Date: 2017/9/1
 * Time: 下午3:18
 * Description:
 */
public class ChineseWordTokenizerFactory extends AbstractTokenizerFactory {

    private Segmentation segmentation;

    public ChineseWordTokenizerFactory(IndexSettings indexSettings, String name, Settings settings, SegmentationAlgorithm algorithm) {
        super(indexSettings, name, settings);
        Segmentation segmentation = SegmentationFactory.getSegmentation(algorithm);
        this.segmentation = segmentation;
    }

    public static ChineseWordTokenizerFactory getMaximumMatchingTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.MaximumMatching);
    }

    public static ChineseWordTokenizerFactory getReverseMaximumMatchingTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.ReverseMaximumMatching);
    }

    public static ChineseWordTokenizerFactory getMinimumMatchingTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.MinimumMatching);
    }

    public static ChineseWordTokenizerFactory getReverseMinimumMatchingTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.ReverseMinimumMatching);
    }

    public static ChineseWordTokenizerFactory getBidirectionalMaximumMatchingTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.BidirectionalMaximumMatching);
    }

    public static ChineseWordTokenizerFactory getBidirectionalMinimumMatchingTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.BidirectionalMinimumMatching);
    }

    public static ChineseWordTokenizerFactory getBidirectionalMaximumMinimumMatchingTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.BidirectionalMaximumMinimumMatching);
    }

    public static ChineseWordTokenizerFactory getFullSegmentationTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.FullSegmentation);
    }

    public static ChineseWordTokenizerFactory getMinimalWordCountTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.MinimalWordCount);
    }

    public static ChineseWordTokenizerFactory getMaxNgramScoreTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
        return new ChineseWordTokenizerFactory(indexSettings, name, settings, SegmentationAlgorithm.MaxNgramScore);
    }

    @Override
    public Tokenizer create() {
        return new ChineseWordTokenizer(segmentation);
    }
}
