package lia.common;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Properties;

import org.apache.log4j.Logger;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import static lia.util.Utilities.*;
import static lia.util.Constants.*;

/**
 * create test index
 * 
 * @author F.H Dragon(wenlong.meng@gmail.com)
 * @version 1.0 at 2011/08/07
 * @since 1.0
 */
public class CreateTestIndex {
	
	//local variables
	/**
	 * log
	 */
	private static final Logger logger = Logger.getLogger("commonLogger");

	/**
	 * crate test index...
	 * 
	 * @param args
	 * @throws IOException 
	 */
	public static void main(String[] args) throws IOException {
		logger.debug("Begin:CreateTestIndex...");
		String dataDir = buildDataDir("n");
		String indexDir = buildIndexDir("n");
		logger.debug("data dir is " + dataDir);
		logger.debug("index dir is " + indexDir);

		List<File> files = new ArrayList<File>();
		findFiles(files, new File(dataDir));
		logger.debug(files.size() + " books to index.");
		
		Directory dir = FSDirectory.open(new File(indexDir));
		IndexWriterConfig iwc = new IndexWriterConfig(LUCENE_VERSION, new MyStandardAnalyzer(LUCENE_VERSION));
		iwc.setOpenMode(OpenMode.CREATE);
		IndexWriter iw = new IndexWriter(dir, iwc);
		iw.setInfoStream(new PrintStream(new File(LIA_CONFIG_INDEXINFO_PATH)));
		for(File file : files){
			Document doc = getDocument(dataDir, file);
			iw.addDocument(doc);
		}
		iw.close();
		dir.close();
		logger.debug("End:CreateTestIndex!");
	}

	/**
	 * build Document by file
	 * 
	 * @param dataDir
	 * @param file
	 * @return
	 * @throws IOException 
	 * @throws FileNotFoundException 
	 */
	private static Document getDocument(String dataDir, File file) throws FileNotFoundException, IOException {
		
		Properties props = new Properties();
		props.load(new FileInputStream(file));
		
		Document doc = new Document();
		
		//Get category: category comes from relative path below the base directory
		String category = file.getParent().substring(dataDir.length());
		category = category.replace(File.separatorChar, '/');
		
		//Pull fields
		String isbn = props.getProperty("isbn");
		String title = props.getProperty("title");
		String author = props.getProperty("author");
		String url = props.getProperty("url");
		String subject = props.getProperty("subject");
		String pubmonth = props.getProperty("pubmonth");
		logger.debug("file data:" + isbn + "\n" + title + "\n" + author + "\n" + url + "\n" + subject + "\n" + pubmonth);

		//Add fields to Document instance 
		doc.add(new Field("isbn", isbn, Field.Store.YES, Field.Index.NOT_ANALYZED));
		doc.add(new Field("category", category, Field.Store.YES, Field.Index.NOT_ANALYZED));
		doc.add(new Field("title", title, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
		doc.add(new Field("title2", title.toLowerCase(), Field.Store.YES, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
		//split multiple authors into unique field instances
		String[] authors = author.split(",");
		for(String _author : authors){
			doc.add(new Field("author", _author, Field.Store.YES, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
		}
		//Flag subject field
		doc.add(new Field("subject", subject, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
		doc.add(new Field("url", url, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
		doc.add(new NumericField("pubmonth", Field.Store.YES, true).setIntValue(Integer.parseInt(pubmonth)));
		Date d;
		try{
			d = DateTools.stringToDate(pubmonth);
		}catch(ParseException e){
			throw new RuntimeException(e);
		}
		doc.add(new NumericField("pubmonthAsDay").setIntValue((int)(d.getTime()/(1000*3600*24))));
		//Ad catch-all contents field
		for(String text : new String[]{title, subject, author, category}){
			doc.add(new Field("contents", text, Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
		}
		
		return doc;
	}
	
	/**
	 * transfer strings into string
	 * 
	 * @param strings
	 * @return
	 */
	public static String aggregate(String[] strings){
		StringBuilder sb = new StringBuilder();
		for(int i = 0, counti = strings.length; i < counti; i++){
			sb.append(strings[i]);
			sb.append(" ");
		}
		return sb.toString();
	}

	/**
	 * find all files in the dir: FileFilter
	 * 
	 * @param files
	 *            - the files in the dir
	 * @param dir
	 *            - the root dir
	 */
	private static void findFiles(List<File> files, File dir) {
		for (File file : dir.listFiles()) {
			if (file.getName().endsWith(".properties")) {// add file that end
															// with .properties
				files.add(file);
			} else if (file.isDirectory()) {
				findFiles(files, file);// Recursive
			}
		}
	}

	/**
	 * Custom analyzer to override multi-valued position increment: Filters
	 * {@link StandardTokenizer} with {@link StandardFilter},
	 * {@link LowerCaseFilter} and {@link StopFilter}, using a list of English
	 * stop words. <a name="version"/>
	 * <p>
	 * You must specify the required {@link Version} compatibility when creating
	 * MyStandardAnalyzer
	 * 
	 * @author F.H Dragon(wenlong.meng@gmail.com)
	 * @version 1.0 at 2011/08/07
	 * @since 1.0
	 */
	public static class MyStandardAnalyzer extends StopwordAnalyzerBase {
		
		//local variables
		/**
		 * Default maximum allowed token length
		 */
		public static final int DEFAULT_MAX_TOKEN_LENGTH = 255;
		/**
		 * max token length
		 */
		private int maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH;
		/**
		 * Specifies whether deprecated acronyms should be replaced with HOST type.
		 * See {@linkplain "https://issues.apache.org/jira/browse/LUCENE-1068"}
		 */
//		private final boolean replaceInvalidAcronym;

		/**
		 * Creates a new Analyzer with the default stop words(
		 * {@link #STOP_WORDS_SET}).
		 * 
		 * @param version
		 *            the Lucene version for cross version compatibility
		 */
		protected MyStandardAnalyzer(Version version) {
			super(version);
//			replaceInvalidAcronym = matchVersion.onOrAfter(Version.LUCENE_24);
		}

		/**
		 * Invoked before indexing a Fieldable instance if terms have already
		 * been added to that field. This allows custom analyzers to place an
		 * automatic position increment gap between Fieldable instances using
		 * the same field name. The default value position increment gap is 0.
		 * With a 0 position increment gap and the typical default token
		 * position increment of 1, all terms in a field, including across
		 * Fieldable instances, are in successive positions, allowing exact
		 * PhraseQuery matches, for instance, across Fieldable instance
		 * boundaries.
		 * 
		 * @param fieldName
		 *            - Fieldable name being indexed.
		 * @return position increment gap, added to the next token emitted from
		 *         {@link #tokenStream(String,Reader)}
		 * @see org.apache.lucene.analysis.Analyzer#getPositionIncrementGap(java.lang.String)
		 */
		@Override
		public int getPositionIncrementGap(String fieldName) {
			if (fieldName.equals("contents")) {
				return 100;
			} else {
				return 0;
			}
		}
		
		/**
		 * @return the maxTokenLength
		 */
		public int getMaxTokenLength() {
			return maxTokenLength;
		}
		/**
		 * @param maxTokenLength the maxTokenLength to set
		 */
		public void setMaxTokenLength(int maxTokenLength) {
			this.maxTokenLength = maxTokenLength;
		}

		/**
		 * Creates a new {@link TokenStreamComponents} instance for this
		 * analyzer.
		 * 
		 * @param fieldName
		 *            the name of the fields content passed to the
		 *            {@link TokenStreamComponents} sink as a reader
		 * @param aReader
		 *            the reader passed to the {@link Tokenizer} constructor
		 * @return the {@link TokenStreamComponents} for this analyzer.
		 */
		@Override
		protected TokenStreamComponents createComponents(String fieldName,
				Reader aReader) {
			final StandardTokenizer src = new StandardTokenizer(matchVersion, aReader);
		    src.setMaxTokenLength(maxTokenLength);
//		    src.setReplaceInvalidAcronym(replaceInvalidAcronym);
		    TokenStream tok = new StandardFilter(matchVersion, src);
		    tok = new LowerCaseFilter(matchVersion, tok);
		    tok = new StopFilter(matchVersion, tok, stopwords);
		    return new TokenStreamComponents(src, tok) {
		      @Override
		      protected boolean reset(final Reader reader) throws IOException {
		        src.setMaxTokenLength(MyStandardAnalyzer.this.maxTokenLength);
		        return super.reset(reader);
		      }
		    };
		}

	}

}
