//
// Source code recreated from a .class file by IntelliJ IDEA
// (powered by Fernflower decompiler)
//

package org.wltea.analyzer.sample;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.io.IOException;
import java.io.StringReader;
import java.util.Scanner;

public class IKAnalzyerDemo {
	public IKAnalzyerDemo() {
	}

	public static void main(String[] args) {
		Analyzer analyzer = new IKAnalyzer(true);
		String text = "这是一个中文分词的例子，你可以直接运行它！IKAnalyer can analysis english text too";
		split(analyzer, text);

		Scanner sc = new Scanner(System.in);
		while (sc.hasNext()) {
			String next = sc.next();
			System.out.println(next);
			split(analyzer, next);
		}

	}

	private static void split(Analyzer analyzer, String text) {
		System.out.println("split =============== " + text);
		TokenStream ts = null;

		try {
			ts = analyzer.tokenStream("myfield", new StringReader(text));
			OffsetAttribute offset = ts.addAttribute(OffsetAttribute.class);
			CharTermAttribute term = ts.addAttribute(CharTermAttribute.class);
			TypeAttribute type = ts.addAttribute(TypeAttribute.class);
			ts.reset();

			while (ts.incrementToken()) {
				System.out.println(offset.startOffset() + " - " + offset.endOffset() + " : " + term.toString() + " | " + type.type());
			}

			ts.end();
		} catch (IOException var14) {
			var14.printStackTrace();
		} finally {
			if (ts != null) {
				try {
					ts.close();
				} catch (IOException var13) {
					var13.printStackTrace();
				}
			}

		}
	}
}
