package segmenter;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.util.Version;

public class EnglishSegment {
	
	//Lucene版本为3.6.2
		private final Version matchVersion = Version.LUCENE_36;
		//停用词列表
		private Set<String> stopWords = new HashSet<String>();
		//是否进行词干提取
		private Boolean doStemming;	
		
		
		//创建类时决定是否进行词干提取
		public EnglishSegment(Boolean doStemming){
			this.doStemming = doStemming;
		}
		
		/*进行英文分词
		*输入停用词列表文件路径swlistPath,文本内容content
		*输出分词结果result*/
		public String splitWord(String content) throws IOException{
			
			//将分词后由于“.”未分开的单词分开
			Pattern pPre = Pattern.compile("\\pL+\\.\\pL+");
			Matcher mPre = pPre.matcher(content);
			while(mPre.find()){
				String g = mPre.group();
				String replace = g.replaceAll("\\.", " ");
				content = content.replaceAll(g, replace);
			}
			
			//进行分词
			TokenStream ts = this.tokenStream("dummy",new StringReader(content));
			String result = "";
			CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class);
			
			Pattern p = Pattern.compile("[\\pS\\pN\\pP\\pC]*");
			Matcher m;
			
			while(ts.incrementToken()){
				String word = termAtt.toString();
				m = p.matcher(word);
				
				//去除数字标点等，去除只有一位的单词
				if(!m.matches() && word.length() > 1){
					String w = filter(word);
					
					if(w != null){
						result += w + " ";
						}
					
				}
				
			}
			
			return result;
		}

		/*实现tokenStream方法
		 * 根据空格分词，小写化，去停用词
		 * doStemming为真则进行词干提取
		 */
		public TokenStream tokenStream(String fileName, Reader reader) {
			TokenStream ts = new StandardTokenizer(matchVersion, reader);
			ts = new LowerCaseFilter(matchVersion, ts);
			ts = new StopFilter(matchVersion, ts, stopWords);
			
			if(doStemming){
				ts = new PorterStemFilter(ts);
			}
			
			return ts;
		}	
		
		/*去除单词末尾的单引号
		 * 输入待过滤单词
		 * 输出过滤结果
		 */
		public String filter(String word){
			
			if(word.charAt(word.length()-1) == '\'' || word.charAt(word.length()-1) == '’'){
				
				if(word.length() > 2){
					return word.substring(0, word.length()-1);
				}
				
			}
			else{
				if(word.length() > 2 && word.charAt(word.length()-1) == 's' && (word.charAt(word.length()-2) == '\'' || word.charAt(word.length()-2) == '’')){
					
					if(word.length() > 3){
						return word.substring(0, word.length()-2);
					}
					
				}
				else{
					
					if(word.length() > 1){
						return word;
					}
				}
				
			}
			return null;
		}
		
		/*从文件读取停用词表
		*输入停用词表文件路径swlistPath
		*输出停用词列表stopWords*/
		public Set<String> readStopWords(String swlistPath) throws IOException{
			File file = new File(swlistPath);
			InputStream in = new FileInputStream(file);
			BufferedReader br = new BufferedReader(new InputStreamReader(in));
			String line;
					
			while((line = br.readLine()) != null){
				stopWords.add(line);
				}
			
			return stopWords;
		}
		
		/*设置doStemming的值
		 * 输入设置是否进行词干提取
		 * true则进行，false不进行
		 */
		public void setDoStemming(boolean doStemming){
			this.doStemming = doStemming;
		}

}
