package com.ibm.sand.rt.index;

import java.io.File;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Date;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;

import org.apache.lucene.document.Document;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.facet.taxonomy.InconsistentTaxonomyException;
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;

import com.ibm.ilel.IlelException;
import com.ibm.sand.rt.api.SearchResult;
import com.ibm.sand.rt.config.Config;
import com.ibm.sand.rt.config.RTConsts;
import com.ibm.sand.rt.engine.SearchEngine;
import com.ibm.sand.rt.indexers.RTDocument;
import com.ibm.sand.rt.parsers.ConnectionsEntityRelationsManager;

public enum IndexManager {
	INSTANCE;
	private DirectoryTaxonomyReader			taxReader;
	private DirectoryTaxonomyWriter			taxWriter;
	private ReadersWritersManager			rwManager;

	private SourceDates						sourceDates;
	private DocumentsCache					docsCache;
	private EntitiesFreqCache				entitiesFreqCache;
	private final AtomicLong				lastRefresh				= new AtomicLong();

	private Thread							refreshner;
	private int								indexSize;
	private final ReentrantReadWriteLock	indexLock				= new ReentrantReadWriteLock();
	private final AtomicInteger				documentsCount			= new AtomicInteger(0);
	private boolean							refreshCacheOnCommit	= true;

	private IndexManager() {
		try {

			openIndices();
			sourceDates = new SourceDates();
			initCaches();
			updateIndexSize();
			System.out.println("Number of documents in index is: " + indexSize);
			refreshner = new Thread(new TempIndexRefreshner());
			refreshner.start();
		} catch (final Exception e) {
			e.printStackTrace();
			throw new RuntimeException(e);
		}
	}

	/**
	 * 
	 * @param doc
	 *            can be null if all we need is to update the date. this
	 *            function calls startModifyingWriter() so do not call it from
	 *            another function which modifies the writer to prevent a
	 *            deadlock
	 */
	public boolean addDocument(final RTDocument rtDoc, final String crawler) throws CorruptIndexException, IOException, IlelException, ParseException, InterruptedException {
		if (rtDoc == null) return false;
		sourceDates.updateSourceDate(crawler, rtDoc.getDate());
		if (docsCache.isInCache(rtDoc)) // duplicate
		return false;
		updateEntitiesCache(rtDoc);

		if (rtDoc.getIndexingTimestamp() < 0) {
			rtDoc.setIndexingTimestamp(System.currentTimeMillis());
		}
		final Document doc = rtDoc.toLuceneDocument(this.taxWriter);
		final Term idT = new Term(RTConsts.ID, rtDoc.getId());

		indexLock.readLock().lock(); // need to sync this section so that
										// taxonomy
										// will not be refreshed while updating
										// a document and therefore will be
										// guaranteed to be synced with the
										// writer
		try {
			rwManager.updateDocument(idT, doc);

			final int count = documentsCount.incrementAndGet();
			if (count >= RTConsts.Index.BATCH_SIZE) {
				if (documentsCount.compareAndSet(count, 0)) {
					commit(refreshCacheOnCommit);
				}
			}
		} finally {
			indexLock.readLock().unlock();
		}

		return true;
	}

	// TODO: this is a hack so that all ordinals we wish to calculate upon will
	// be greater then 5
	// related to the ordinal policy in SandStreamsFacetIndexingParams (which
	// ignores ordinals 1-5)
	// *** get rid of this hack once moving to Ilel 1.5.
	private void addTaxonomyCategories() throws IOException {
		taxWriter.addCategory(RTConsts.AUTHOR_CATEGORY);
		taxWriter.addCategory(RTConsts.SOURCE_CATEGORY);
		taxWriter.addCategory(RTConsts.TAGS_CATEGORY);
		taxWriter.addCategory(RTConsts.DAY_CATEGORY);
		taxWriter.addCategory(RTConsts.RELATED_P_CATEGORY);
		taxWriter.commit();
	}

	/**
	 * commits the index all data is saved to the file system
	 * 
	 * @throws IOException
	 * @throws InterruptedException
	 */
	public void commit(final boolean refreshCaches) throws IOException, InterruptedException {
		final long start = System.currentTimeMillis();
		System.out.println(new Date(start) + " commiting index...");
		taxWriter.commit();
		rwManager.commit();
		updateIndexSize();
		if (refreshCaches) {
			sourceDates.persistSourcesDates();
			ConnectionsEntityRelationsManager.instance.persistManager();
			refreshEntitiesFreqCache();
		}
		System.out.println(new Date() + " done committing. took " + (System.currentTimeMillis() - start) + " millis");
	}

	public int[] docFreq(final List<String> terms) throws IOException {
		final IndexReader reader = rwManager.get();
		try {
			final int[] res = new int[terms.size()];
			for (int i = 0; i < terms.size(); i++) {
				final String term = terms.get(i);
				if (term.contains(" ")) { // handle two words tag
					final String[] s = term.replace("\"", "").split(" ");
					if (s.length == 2) {
						final int df1 = reader.docFreq(new Term("content", s[0]));
						final int df2 = reader.docFreq(new Term("content", s[1]));
						res[i] = Math.min(df1, df2) / 2;
					}
				} else {
					res[i] = reader.docFreq(new Term("content", term));
				}
			}
			return res;

		} finally {
			rwManager.release(reader);
		}

	}

	public int docFreq(final Term term) throws IOException {
		final IndexReader reader = rwManager.get();
		try {
			int res;
			final String text = term.text();
			if (text.contains(" ")) { // handle two words tag
				final String[] s = text.replace("\"", "").split(" ");
				if (s.length == 2) {
					final int df1 = reader.docFreq(new Term(term.field(), s[0]));
					final int df2 = reader.docFreq(new Term(term.field(), s[1]));
					// BSQ Why do we divide by 2? Why do we consider that as a
					// good approximation?
					res = Math.min(df1, df2) / 2;
				} else {
					System.err.println("IndexManager.docFreq: requested doc freq for tag with more then 2 words " + text);
					res = reader.docFreq(new Term(term.field(), s[0]));
				}
			} else {
				res = reader.docFreq(term);

			}

			return res;

		} finally {
			rwManager.release(reader);
		}

	}

	public int entityFreq(final String entity, final String field) throws IOException {
		final IndexReader reader = rwManager.get();
		try {
			return entitiesFreqCache.freq(entity, field, reader);
		} finally {
			rwManager.release(reader);
		}

	}

	/**
	 * Gets the most recent date for a specific source.
	 * 
	 * @param crawler
	 * @return
	 * @throws CorruptIndexException
	 * @throws IOException
	 */
	public Date getCrawlerDate(final String crawler) throws CorruptIndexException, IOException {
		return sourceDates.getSourceDate(crawler);
	}

	/**
	 * get the date of the first indexed document in the directory
	 */
	public long getFirstIndexedDate(final Directory dir) {
		return rwManager.getDirectoryFirstIndexedDate(dir);
	}

	public long getLastIndexedDate(final Directory dir) {
		return rwManager.getDirectoryLastIndexedDate(dir);
	}

	public long getRefreshness() {
		return lastRefresh.get();
	}

	public String getStatistics() throws SQLException, IOException {
		final StringBuilder statistics = new StringBuilder();
		statistics.append(entitiesFreqCache.getStatistics()).append("\n").append(rwManager.getIndexStatistics());

		return statistics.toString();
	}

	public void init() throws Exception {/*
										 * do nothing, static fields are
										 * initialized
										 */}

	/**
	 * loads the ids of the latest documents from the index on startup to avoid
	 * duplicates
	 * 
	 * @throws IOException
	 */
	private void initCaches() throws IOException {
		final IndexReader reader = rwManager.get();
		try {
			final IndexSearcher searcher = rwManager.getMatchingSearcher(reader);
			docsCache = new DocumentsCache(searcher, reader, RTConsts.Index.MAXIMUM_DOCUMENTS_IN_CACHE, RTConsts.Index.MINIMUM_DOCUMENTS_IN_CACHE);

			final IndexReader tempIndexesReader = rwManager.getTempReaders();
			try {
				entitiesFreqCache = new EntitiesFreqCache(tempIndexesReader, reader);
			} finally {
				rwManager.release(tempIndexesReader);
			}

		} finally {
			rwManager.release(reader);
		}
	}

	/**
	 * ensures that if addDocument is called, there will be no duplicates
	 * entries in the index note that this method can only check against the
	 * index visible to the searcher (the index that existed when opening the
	 * reader), and therefore may miss documents that were added after the last
	 * readers refresh
	 * 
	 * @param query
	 *            describing the document
	 * @return true if there is a duplicate copy in the index
	 * @throws IOException
	 */
	public boolean isDuplicate(final Query q) throws IOException {
		boolean res = false;
		final IndexReader reader = rwManager.get();
		try {
			final IndexSearcher searcher = rwManager.getMatchingSearcher(reader);
			final Scorer scorer = q.createWeight(searcher).scorer(reader, true, false);
			if (scorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
				System.out.println("found duplicate for query:" + q);
				res = true;
			}
		} finally {
			rwManager.release(reader);
		}

		return res;

	}

	public int numDocs() throws IOException {
		// no need to synchronize since at worse will not be the most updated
		return indexSize;
	}

	/**
	 * Called in the constructor and after merging the indices.
	 * 
	 * @throws InterruptedException
	 */
	private void openIndices() throws CorruptIndexException, IOException, InterruptedException {
		rwManager = new ReadersWritersManager();
		final Directory taxonomyDirectory = FSDirectory.open(new File(Config.INSTANCE.getTaxonomyIndexLocation()), new SingleInstanceLockFactory());
		taxWriter = new DirectoryTaxonomyWriter(taxonomyDirectory, OpenMode.CREATE_OR_APPEND);
		addTaxonomyCategories();
		taxReader = new DirectoryTaxonomyReader(taxonomyDirectory);
		lastRefresh.set(System.currentTimeMillis());
	}

	public void refreshCacheOnCommit(final boolean value) {
		this.refreshCacheOnCommit = value;
	}

	private void refreshEntitiesFreqCache() throws IOException {
		final IndexReader reader = rwManager.get();
		try {
			final IndexReader tempIndexesReader = rwManager.getTempReaders();
			try {
				entitiesFreqCache.refreshFreqs(tempIndexesReader, reader);
			} finally {
				rwManager.release(tempIndexesReader);
			}

		} finally {
			rwManager.release(reader);
		}
	}

	public void refreshReaders() throws IOException, InterruptedException, InconsistentTaxonomyException {
		indexLock.writeLock().lock(); // guaranteed that no document is added
										// between
										// refreshing the taxonomy and flushing
										// the
										// writer.
		try {
			refreshTaxonomy(); // NOTE ilel bug. refreshing the taxonomy while
								// adding categories to it might cause
								// corruption.
								// corruption will not occur because we hold the
								// index
			rwManager.refreshIndex();
		} finally {
			indexLock.writeLock().unlock();
		}
		lastRefresh.set(System.currentTimeMillis());
	}

	private void refreshTaxonomy() throws IOException, InconsistentTaxonomyException {
		taxWriter.commit();
		taxReader.refresh();
	}

	/**
	 * This method should be used to delegate the search to a different class.
	 * 
	 * @param searchEngine
	 *            the delegate class
	 * @throws ParseException
	 */
	public SearchResult search(final SearchEngine searchEngine) throws IOException, ParseException {
		final IndexReader reader = rwManager.get();
		try {
			final IndexSearcher searcher = rwManager.getMatchingSearcher(reader);
			final SearchResult res = searchEngine.search(searcher, reader, taxReader);
			return res;

		} finally {
			rwManager.release(reader);
		}
	}

	public SearchResult search(final SearchEngine searchEngine, final long from) throws IOException, ParseException {
		final IndexReader reader = rwManager.get(from);
		try {
			final IndexSearcher searcher = rwManager.getMatchingSearcher(reader);
			final SearchResult res = searchEngine.search(searcher, reader, taxReader);
			return res;

		} finally {
			rwManager.release(reader);
		}

	}

	private void updateEntitiesCache(final RTDocument rtDoc) throws IOException {
		final IndexReader reader = rwManager.get();
		try {
			if (rtDoc.getContainer() != null) {
				entitiesFreqCache.incFreq(rtDoc.getContainer(), reader);
			}
			if (rtDoc.getContainer2() != null) {
				entitiesFreqCache.incFreq(rtDoc.getContainer2(), reader);
			}
			if (rtDoc.getAuthorEmail() != null) {
				entitiesFreqCache.incFreq(rtDoc.getAuthorEmail().toLowerCase(), reader);
			}
			if (rtDoc.getLink() != null) {
				entitiesFreqCache.incFreq(rtDoc.getLink(), reader);
			}
		} finally {
			rwManager.release(reader);
		}

	}

	private void updateIndexSize() throws IOException {
		final IndexReader reader = rwManager.get();
		try {
			indexSize = reader.numDocs();
		} finally {
			rwManager.release(reader);
		}
	}

	/**
	 * update the taxonomy with a set of items which belong to a specific
	 * category (for example, add a set of tags to the taxonomy)
	 * 
	 * @throws IOException
	 */
	public void updateTaxonomyCategory(final Set<String> items, final String category) throws IOException {
		indexLock.readLock().lock(); // ilel bug. refreshing the taxonomy while
										// adding categories to it might cause
										// corruption.
		try {
			for (final String item : items) {
				taxWriter.addCategory(new CategoryPath(category, item));
			}
			taxWriter.commit();
		} finally {
			indexLock.readLock().unlock();
		}
	}

}
