package net.tngou.c4j.ik;

import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Set;


/**

 * 天狗分段器，用于提出关键词的位置

 * @author Administrator

 *

 */
public class TnSegmenter {

	

	/**

	 * 数据分成定位

	 * @param html   ：内容

	 * @param list   ： 关键词

	 * @return

	 */
	public static List<TnLexeme> TnLexemes(String html, List<String> list) {
		
		Collections.sort(list, (a, b) -> Integer.compare(b.length(),
				 a.length())); //排序

		
		StringBuffer buffer = new StringBuffer(html);
		List<TnLexeme> lexemes = new ArrayList<TnLexeme>();
		int lenght = html.length();
		for ( int i=0; i < lenght; i++) {
			
			
			for (String word : list) {
				if(word.startsWith(buffer.charAt(i)+""))
				{
					
					if(buffer.length()>=i+word.length()) //

					{
						CharSequence s = buffer.substring(i, i+word.length());
					
						if(word.equals(s.toString())){
						
							int type=0;
							if(i>10&&new String(buffer.charAt(i-2)+"").equals("=")) type=1;
							lexemes.add(new TnLexeme(i, word.length(), s.toString(),type));
							
							break;
							
						}
					}
				}
					
			}
			
		}
		
		return lexemes;
		
	}	

	
	
	
}