//package org.apache.nutch.analysis.mr;
package demo.in.ac.iitb.cfilt.analysis.en;

//package in.ac.cfilt.analysis.mr;
// This file is encoded in UTF-8

/**
 * Copyright 2004 The Apache Software Foundation
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WordlistLoader;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.Version;

/**
 * Analyzer for Marathi language. Supports an external list of stopwords (words
 * that will not be indexed at all) and an external list of exclusions (word
 * that will not be stemmed, but indexed). A default set of stopwords is used
 * unless an alternative list is specified, the exclusion list is empty by
 * default.
 * 
 * @author Gerhard Schwarz
 * @version $Id: MarathiAnalyzer.java 151017 2004-11-29 22:22:48Z dnaber $
 */
public class EnglishAnalyzer extends Analyzer {

	/**
	 * List of typical german stopwords.
	 */
	public static String[] MARATHI_STOP_WORDS;
	public static String stopWordFileName;

	/**
	 * Contains the stopwords used with the StopFilter.
	 */
	private Set stopSet = new HashSet();

	/**
	 * Contains words that should be indexed but not stemmed.
	 */
	private Set exclusionSet = new HashSet();

	/**
	 * Builds an analyzer with the given stop words.
	 */

	public EnglishAnalyzer(File stopwords) throws IOException {
		FileReader fr = new FileReader(stopwords);
		stopSet = WordlistLoader.getWordSet(fr, Version.LUCENE_35);
		fr.close();
	}

	public EnglishAnalyzer() throws IOException {
		stopWordFileName = "/data1/arjun/Lucene3.5/stop_words_en.txt";
		File stopWordFile = new File(stopWordFileName);
		FileReader fr = new FileReader(stopWordFile);
		stopSet = WordlistLoader.getWordSet(fr, Version.LUCENE_35);
		fr.close();
	}

	/**
	 * Builds an analyzer with the given stop words.
	 */
	public EnglishAnalyzer(String[] stopwords) {
		stopSet = StopFilter.makeStopSet(stopwords);
	}

	/**
	 * Builds an analyzer with the given stop words.
	 */
	public EnglishAnalyzer(Hashtable stopwords) {
		stopSet = new HashSet(stopwords.keySet());
	}

	/**
	 * Builds an exclusionlist from an array of Strings.
	 */
	public void setStemExclusionTable(String[] exclusionlist) {
		exclusionSet = StopFilter.makeStopSet(exclusionlist);
	}

	/**
	 * Builds an exclusionlist from a Hashtable.
	 */
	public void setStemExclusionTable(Hashtable exclusionlist) {
		exclusionSet = new HashSet(exclusionlist.keySet());
	}

	/**
	 * Builds an exclusionlist from the words contained in the given file.
	 */
	public void setStemExclusionTable(File exclusionlist) throws IOException {
		FileReader fr = new FileReader(exclusionlist);
		exclusionSet = WordlistLoader.getWordSet(fr, Version.LUCENE_35);
	}

	/**
	 * Creates a TokenStream which tokenizes all the text in the provided
	 * Reader.
	 * 
	 * @return A TokenStream build from a StandardTokenizer filtered with
	 *         StandardFilter, LowerCaseFilter, StopFilter, MarathiStemFilter
	 */
	public TokenStream tokenStream(String fieldName, Reader reader) {
		TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT,
				reader);
		result = new StopFilter(true, result, stopSet);
		result = new EnglishStemFilter(result, exclusionSet);
		return result;
	}

	public TokenStream reusableTokenStream(String fieldName, Reader reader)
			throws IOException {
		return tokenStream(fieldName, reader);
	}

	public static void main(String[] args) {
		try {
			Analyzer analyzer = new EnglishAnalyzer();
			String stringToStem = "many thanks";
			TokenStream ts = analyzer.tokenStream("content", new StringReader(
					stringToStem));
			while (ts.incrementToken())
			{
				System.out.println("Stemmed word is "
						+ ts.getAttribute(TermAttribute.class));
			}

		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
}
