package org.wltea.analyzer.seg;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.wltea.analyzer.lucene.IKAnalyzer;
import org.wltea.analyzer.term.Term;

public class Segmenter {
		
	Analyzer analyzer;
	public Segmenter(boolean useSmart) {
		analyzer = new IKAnalyzer(useSmart);
	}
	
	public List<Term> seg(String seg){
	    // 获取Lucene的TokenStream对象
	    TokenStream ts = null;
	    List<Term> list = new ArrayList<>();
	    try {
	      ts = analyzer.tokenStream("", new StringReader(seg));
	      // 获取词元位置属性
	      OffsetAttribute offset = ts.addAttribute(OffsetAttribute.class);
	      // 获取词元文本属性
	      CharTermAttribute word = ts.addAttribute(CharTermAttribute.class);
	      // 获取词元文本属性
	      TypeAttribute type = ts.addAttribute(TypeAttribute.class);

	      // 重置TokenStream（重置StringReader）
	      ts.reset();
	      // 迭代获取分词结果
	      Term term;
	      while (ts.incrementToken()) {
	        term = new Term(offset.startOffset(), offset.endOffset(), word.toString(), type.type());
	        list.add(term);
	      }
	      // 关闭TokenStream（关闭StringReader）
	      ts.end();

	    } catch (IOException e) {
	      e.printStackTrace();
	    } finally {
	      // 释放TokenStream的所有资源
	      if (ts != null) {
	        try {
	          ts.close();
	        } catch (IOException e) {
	          e.printStackTrace();
	        }
	      }
	      
	    }

	    return list;
	}

}
