package eu.jk.document.segment;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.wltea.analyzer.IKSegmentation;
import org.wltea.analyzer.Lexeme;

public class TextAnalyst {

	/**
	 * 词频统计
	 * @param text 需要统计的文本
	 * @param top	返回高频率词汇的个数
	 * @return 高频词汇列表
	 */
	public static List<String> findMaxOfenWord(String text,int top){
		Map<String,Integer> words=new HashMap<String,Integer>();
		IKSegmentation seg = new IKSegmentation(new StringReader(text) , true);
		try {
			Lexeme l = null;
			while( (l = seg.next()) != null){
				if(!(l.getLexemeType()==Lexeme.TYPE_CJK_NORMAL||l.getLexemeType()==Lexeme.TYPE_LETTER))
					continue;
				if(words.containsKey(l.getLexemeText()))
					words.put(l.getLexemeText(), words.get(l.getLexemeText())+1);
				else
					words.put(l.getLexemeText(), 1);
			}
		} 
		catch (IOException e) 
		{
			e.printStackTrace();
		}
		int max=0;
		String maxKey=null;
		List<String> ofenwords=new ArrayList<String>();
		for(int i=0;i<top&&i<words.size();i++){
			for(String key:words.keySet()){
				if(words.get(key)>max){
					max=words.get(key);
					maxKey=key;
				}
			}
			max=0;
			words.put(maxKey, -1);
			ofenwords.add(maxKey);
		}
		return ofenwords;
	}
	
}
