package ingest.blog.icwsm.icwsmutils;

/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements. See the NOTICE file distributed with this
 * work for additional information regarding copyright ownership. The ASF
 * licenses this file to You under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * 
 * http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations under
 * the License.
 * 
 * 
 * Note: this file is a modification of the Lucene examples. It uses some code
 * that was distributed therein. Hence the original license.
 */

import ingest.blog.icwsm.icwsmutils.Constants.Language;
import ingest.blog.icwsm.icwsmutils.Constants.SourceType;
import ingest.blog.icwsm.protoutils.ProtoStreamUtils;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import com.spinn3r.api.protobuf.ContentApi.Entry;
import com.spinn3r.api.protobuf.ContentApi.PermalinkEntry;



public class LuceneUtils
{	
	/**
	 * The defined fields on which we can search.
	 * @author niels
	 *
	 */
	public static enum SearchField
	{
		ProtoStreamFile,
		EntryNumberInFile,
		Title,
		ContentExtract
	}	
	/**
	 * Creates a Lucene index of the chosen elements
	 * @param indexDir The directory where to store the index
	 * @param dir The directory to index.
	 * @param lang The language directories which to include in the index
	 * @param source The SourceType directories which to include in the index
	 * @param analyzer The analyzer to use 
	 */
	public static void createIndex(String indexDir, String dir, Set<Language> lang, Set<SourceType> source, Analyzer analyzer)
	{
		File index = new File(indexDir);

		if (index.exists())
		{
			System.out.println("Cannot save index to '" + indexDir	+ "' directory, please delete it first");
			System.exit(1);
		}
		final File docDir = new File(dir);
		if (!docDir.exists() || !docDir.canRead())
		{
			System.out.println("Document directory '" + docDir.getAbsolutePath() + "' does not exist or is not readable, please check the path");
			System.exit(1);
		}		

		try
		{
			IndexWriter writer = new IndexWriter(FSDirectory.open(index), analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
			System.out.println("Indexing to directory '" + indexDir + "'...");
			indexDocs(docDir.toString() + File.separator, writer, docDir, lang, source);
			System.out.println("Optimizing...");
			writer.optimize();
			writer.close();
		}
		catch (IOException e)
		{
			e.printStackTrace();
		}
	}
	private static void indexDocs(String omitPath, IndexWriter writer, File file, Set<Language> lang, Set<SourceType> source) throws IOException
	{
		// do not try to index files that cannot be read
		if (file.canRead())
		{
			if (file.isDirectory())
			{
				System.out.println("Traversing " + file.toString());
				Language l = null;
				SourceType s = null;

				try
				{
					l = Language.valueOf(file.getName());	
				}
				catch (Exception e)
				{

				}
				try
				{
					s = SourceType.valueOf(file.getName());		
				}
				catch (Exception e)
				{

				}

				if (l != null && s == null)
				{
					if (lang.contains(l))
					{
						String[] files = file.list();
						// an IO error could occur
						if (files != null)
						{
							for (int i = 0; i < files.length; i++)
							{
								if (i % 1000 == 0)
									System.out.println(i + "/" + files.length + " in directory " + file.toString());
								indexDocs(omitPath, writer, new File(file, files[i]), lang, source);
							}
						}
					}
				}
				else if (l == null && s != null)
				{
					if (source.contains(s))
					{
						String[] files = file.list();
						// an IO error could occur
						if (files != null)
						{
							for (int i = 0; i < files.length; i++)
							{
								if (i % 1000 == 0)
									System.out.println(i + "/" + files.length + " in directory " + file.toString());
								indexDocs(omitPath, writer, new File(file, files[i]), lang, source);
							}
						}
					}
				}
				else
				{
					String[] files = file.list();
					// an IO error could occur
					if (files != null)
					{
						for (int i = 0; i < files.length; i++)
						{
							if (i % 1000 == 0)
								System.out.println(i + "/" + files.length + " in directory " + file.toString());
							indexDocs(omitPath, writer, new File(file, files[i]), lang, source);
						}
					}
				}
			}
			else
			{
				//System.out.println("adding " + file.toString().replaceFirst(omitPath, ""));
				try
				{					
					for (Document d : LuceneUtils.ProtosteamPermalinkToDocuments(file, omitPath))
					{
						writer.addDocument(d);
					}
				}
				// at least on windows, some temporary files raise this exception with an "access denied" message
				// checking if the file can be read doesn't help
				catch (FileNotFoundException fnfe)
				{
					;
				}				
			}
		}
	}
	
	/**
	 * Search a Lucene index.
	 * @param indexDir The directory where the index is stored
	 * @param query The query
	 * @param field The field to search in.
	 * @param maxHits The maximum number of hits to display
	 * @throws ParseException
	 * @throws CorruptIndexException
	 * @throws IOException
	 */
	public static void search(String indexDir, String query, SearchField field, int maxHits) throws ParseException, CorruptIndexException, IOException
	{
		Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);		
		QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field.toString(), analyzer);
		Query q = parser.parse(query);
		search(indexDir, q, field, maxHits);
	}
	/**
	 * Search a Lucene index.
	 * @param indexDir The directory where the index is stored
	 * @param query The query
	 * @param field The field to search in.
	 * @param maxHits The maximum number of hits to display
	 * @throws ParseException
	 * @throws CorruptIndexException
	 * @throws IOException
	 */
	public static void search(String indexDir, Query query, SearchField field, int maxHits) throws CorruptIndexException, IOException
	{
		IndexReader reader = IndexReader.open(FSDirectory.open(new File(indexDir)), true);

		System.out.println("There are " + reader.numDocs() + " documents in the index.");
		
		Searcher searcher = new IndexSearcher(reader);		
		searcher.search(query, null, 1);

		TopScoreDocCollector collector = TopScoreDocCollector.create(maxHits, false);
		searcher.search(query, collector);
		ScoreDoc[] hits = collector.topDocs().scoreDocs;

		int numTotalHits = collector.getTotalHits();
		System.out.println("There are " + numTotalHits + " hits for '" + query.toString(field.toString()) + "'.");

		for (int i = 0; i < hits.length; i++)
		{
			if (i % 1000 == 0)
			{
				System.out.println(i + " of " + hits.length);
			}
			Document doc = searcher.doc(hits[i].doc);			
			String path = doc.get(SearchField.ProtoStreamFile.toString());
			String entryIndex = doc.get(SearchField.EntryNumberInFile.toString());
			System.out.println("Score: "+ hits[i].score + " File: " + path + " Index: " + entryIndex);
		}
		reader.close();	    
	}
	
	
	/**
	 * Converts the entries of protosteam file to a list of Lucene readable Documents
	 * @param protosteam The protostream file
	 * @return A list of Documents
	 * @throws FileNotFoundException
	 */
	public static List<Document> ProtosteamPermalinkToDocuments(File protosteam) throws FileNotFoundException
	{
		return ProtosteamPermalinkToDocuments(protosteam, null);
	}
	/**
	 * Converts the entries of protosteam file to a list of Lucene readable Documents
	 * @param protosteam The protostream file
	 * @return A list of Documents
	 * @throws FileNotFoundException
	 */
	public static List<Document> ProtosteamPermalinkToDocuments(File protosteam, String omitPath) throws FileNotFoundException
	{
		List<Entry> entries = ProtoStreamUtils.read(protosteam);
		List<Document> docs = new ArrayList<Document>(entries.size());

		for (Integer i = 0; i < entries.size(); i++)
		{
			if (entries.get(i).hasPermalinkEntry())
			{
				try
				{
					Document doc = new Document();
					// add the path
					if (omitPath != null)
						doc.add(new Field(SearchField.ProtoStreamFile.toString(), protosteam.getPath().replaceFirst(omitPath, ""), Field.Store.YES, Field.Index.NOT_ANALYZED));
					else
						doc.add(new Field(SearchField.ProtoStreamFile.toString(), protosteam.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
					// add the entry number
					doc.add(new Field(SearchField.EntryNumberInFile.toString(), i.toString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
					// now get the permalink entry
					PermalinkEntry pe = entries.get(i).getPermalinkEntry();
					// get the title of the entry
					if (pe.hasTitle())
					{					
						doc.add(new Field(SearchField.Title.toString(), new StringReader(pe.getTitle())));
					}
					// if there is any content where the chrome/boilerplate stuff has been removed, then extract and get it					
					if (pe.hasContentExtract())
					{
						String cleanerContent = ProtoStreamUtils.contentToString(pe.getContentExtract());
						String text = cleanerContent.replaceAll("<[^>]*>","");

						//doc.add(new Field("text", text , Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));					
						doc.add(new Field(SearchField.ContentExtract.toString(), new StringReader(text)));
					}
					docs.add(doc);
				}
				catch (Exception e)
				{
					e.printStackTrace();
				}
			}
		}
		return docs;
	}
}
