package com.wistech.solr.elevation;

import java.io.IOException;
import java.io.StringReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.FieldComparator;
import org.apache.lucene.search.FieldComparatorSource;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.SentinelIntSet;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.QueryElevationParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.component.ResponseBuilder;
import org.apache.solr.handler.component.SearchComponent;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.transform.ElevatedMarkerFactory;
import org.apache.solr.response.transform.ExcludedMarkerFactory;
import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.SortSpec;
import org.apache.solr.util.plugin.SolrCoreAware;

/**
 * A component to elevate some documents to the top of the result set.
 * 
 * 
 */
public abstract class AbstractQueryElevationComponent extends SearchComponent
		implements SolrCoreAware {

	// Constants used in solrconfig.xml
	static final String FIELD_TYPE = "queryFieldType";
	static final String EXCLUDE = "exclude";
	public static final String BOOSTED = "BOOSTED";
	public static final String EXCLUDED = "EXCLUDED";

	// Runtime param -- should be in common?

	protected SolrParams initArgs = null;
	protected Analyzer analyzer = null;
	protected String idFieldName = null;
	protected FieldType idFieldType;

	protected boolean forceElevation = false;

	public class ElevationBean {
		final String text;
		final String analyzed;
		final TermQuery[] exclude;// just keep the term query, b/c we will not
									// always explicitly exclude the item based
									// on markExcludes query time param
		final BooleanQuery include;
		final Map<BytesRef, Integer> priority;
		final Set<String> ids;
		final Set<String> excludeIds;

		public ElevationBean(String qstr, List<String> elevate,
				List<String> exclude) throws IOException {
			this.text = qstr;
			this.analyzed = getAnalyzedQuery(this.text);
			this.ids = new HashSet<String>();
			this.excludeIds = new HashSet<String>();

			this.include = new BooleanQuery();
			this.include.setBoost(0);
			this.priority = new HashMap<BytesRef, Integer>();
			int max = elevate.size() + 5;
			for (String id : elevate) {
				id = idFieldType.readableToIndexed(id);
				ids.add(id);
				TermQuery tq = new TermQuery(new Term(idFieldName, id));
				include.add(tq, BooleanClause.Occur.SHOULD);
				this.priority.put(new BytesRef(id), max--);
			}

			if (exclude == null || exclude.isEmpty()) {
				this.exclude = null;
			} else {
				this.exclude = new TermQuery[exclude.size()];
				for (int i = 0; i < exclude.size(); i++) {
					String id = idFieldType.readableToIndexed(exclude.get(i));
					excludeIds.add(id);
					this.exclude[i] = new TermQuery(new Term(idFieldName, id));
				}
			}
		}
	}

	@Override
	@SuppressWarnings("rawtypes")
	public void init(NamedList args) {
		this.initArgs = SolrParams.toSolrParams(args);
	}

	@Override
	public void inform(SolrCore core) {
		String fieldTypeParam = initArgs.get(FIELD_TYPE);
		if (fieldTypeParam != null) {
			FieldType ft = core.getSchema().getFieldTypes().get(fieldTypeParam);
			if (ft == null) {
				throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
						"Unknown FieldType: '" + fieldTypeParam
								+ "' used in QueryElevationComponent");
			}
			analyzer = ft.getQueryAnalyzer();
		}

		SchemaField idField = core.getSchema().getUniqueKeyField();
		if (idField == null) {
			throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
					"QueryElevationComponent requires the schema to have a uniqueKeyField.");
		}
		idFieldType = idField.getType();
		idFieldName = idField.getName();
		// register the ExcludedMarkerFactory
		String excludeName = initArgs.get(
				QueryElevationParams.EXCLUDE_MARKER_FIELD_NAME, "excluded");
		if (excludeName == null || excludeName.equals("") == true) {
			excludeName = "excluded";
		}
		ExcludedMarkerFactory excludedMarkerFactory = new ExcludedMarkerFactory();
		core.addTransformerFactory(excludeName, excludedMarkerFactory);

		ElevatedMarkerFactory elevatedMarkerFactory = new ElevatedMarkerFactory();
		String markerName = initArgs.get(
				QueryElevationParams.EDITORIAL_MARKER_FIELD_NAME, "elevated");
		if (markerName == null || markerName.equals("") == true) {
			markerName = "elevated";
		}
		core.addTransformerFactory(markerName, elevatedMarkerFactory);
		forceElevation = initArgs.getBool(QueryElevationParams.FORCE_ELEVATION,
				forceElevation);

	}

	// get the elevation map from the data dir
	protected abstract Map<String, ElevationBean> getElevationMap(
			IndexReader reader, SolrCore core) throws Exception;

	String getAnalyzedQuery(String query) throws IOException {
		if (analyzer == null) {
			return query;
		}
		StringBuilder norm = new StringBuilder();
		TokenStream tokens = analyzer.tokenStream("", new StringReader(query));
		tokens.reset();

		CharTermAttribute termAtt = tokens
				.addAttribute(CharTermAttribute.class);
		while (tokens.incrementToken()) {
			norm.append(termAtt.buffer(), 0, termAtt.length());
		}
		tokens.end();
		tokens.close();
		return norm.toString();
	}

	// ---------------------------------------------------------------------------------
	// SearchComponent
	// ---------------------------------------------------------------------------------

	@Override
	public void prepare(ResponseBuilder rb) throws IOException {
		SolrQueryRequest req = rb.req;
		SolrParams params = req.getParams();
		// A runtime param can skip
		if (!params.getBool(QueryElevationParams.ENABLE, true)) {
			return;
		}

		boolean exclusive = params.getBool(QueryElevationParams.EXCLUSIVE,
				false);
		// A runtime parameter can alter the config value for forceElevation
		boolean force = params.getBool(QueryElevationParams.FORCE_ELEVATION,
				forceElevation);
		boolean markExcludes = params.getBool(
				QueryElevationParams.MARK_EXCLUDES, false);
		Query query = rb.getQuery();
		String qstr = rb.getQueryString();
		if (query == null || qstr == null) {
			return;
		}

		qstr = getAnalyzedQuery(qstr);
		IndexReader reader = req.getSearcher().getIndexReader();
		ElevationBean booster = null;
		try {
			booster = getElevationMap(reader, req.getCore()).get(qstr);
		} catch (Exception ex) {
			throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
					"Error loading elevation", ex);
		}

		if (booster != null) {
			rb.req.getContext().put(BOOSTED, booster.ids);

			// Change the query to insert forced documents
			if (exclusive == true) {
				// we only want these results
				rb.setQuery(booster.include);
			} else {
				BooleanQuery newq = new BooleanQuery(true);
				newq.add(query, BooleanClause.Occur.SHOULD);
				newq.add(booster.include, BooleanClause.Occur.SHOULD);
				if (booster.exclude != null) {
					if (markExcludes == false) {
						for (TermQuery tq : booster.exclude) {
							newq.add(new BooleanClause(tq,
									BooleanClause.Occur.MUST_NOT));
						}
					} else {
						// we are only going to mark items as excluded, not
						// actually exclude them. This works
						// with the EditorialMarkerFactory
						rb.req.getContext().put(EXCLUDED, booster.excludeIds);
					}
				}
				rb.setQuery(newq);
			}

			ElevationComparatorSource comparator = new ElevationComparatorSource(
					booster);
			// if the sort is 'score desc' use a custom sorting method to
			// insert documents in their proper place
			SortSpec sortSpec = rb.getSortSpec();
			if (sortSpec.getSort() == null) {
				sortSpec.setSort(new Sort(new SortField[] {
						new SortField("_elevate_", comparator, true),
						new SortField(null, SortField.Type.SCORE, false) }));
			} else {
				// Check if the sort is based on score
				boolean modify = false;
				SortField[] current = sortSpec.getSort().getSort();
				ArrayList<SortField> sorts = new ArrayList<SortField>(
						current.length + 1);
				// Perhaps force it to always sort by score
				if (force && current[0].getType() != SortField.Type.SCORE) {
					sorts.add(new SortField("_elevate_", comparator, true));
					modify = true;
				}
				for (SortField sf : current) {
					if (sf.getType() == SortField.Type.SCORE) {
						sorts.add(new SortField("_elevate_", comparator, !sf
								.getReverse()));
						modify = true;
					}
					sorts.add(sf);
				}
				if (modify) {
					sortSpec.setSort(new Sort(sorts.toArray(new SortField[sorts
							.size()])));
				}
			}
		}

		// Add debugging information
		if (rb.isDebug()) {
			List<String> match = null;
			if (booster != null) {
				// Extract the elevated terms into a list
				match = new ArrayList<String>(booster.priority.size());
				for (Object o : booster.include.clauses()) {
					TermQuery tq = (TermQuery) ((BooleanClause) o).getQuery();
					match.add(tq.getTerm().text());
				}
			}

			SimpleOrderedMap<Object> dbg = new SimpleOrderedMap<Object>();
			dbg.add("q", qstr);
			dbg.add("match", match);
			if (rb.isDebugQuery()) {
				rb.addDebugInfo("queryBoosting", dbg);
			}
		}
	}

	@Override
	public void process(ResponseBuilder rb) throws IOException {
		// Do nothing -- the real work is modifying the input query
	}

	// ---------------------------------------------------------------------------------
	// SolrInfoMBean
	// ---------------------------------------------------------------------------------

	@Override
	public String getDescription() {
		return "Query Boosting -- boost particular documents for a given query";
	}

	@Override
	public String getSource() {
		return "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_4_1/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java $";
	}

	@Override
	public URL[] getDocs() {
		try {
			return new URL[] { new URL(
					"http://wiki.apache.org/solr/QueryElevationComponent") };
		} catch (MalformedURLException e) {
			throw new RuntimeException(e);
		}
	}

	class ElevationComparatorSource extends FieldComparatorSource {
		private AbstractQueryElevationComponent.ElevationBean elevations;
		private SentinelIntSet ordSet; // the key half of the map
		private BytesRef[] termValues;// the value half of the map

		public ElevationComparatorSource(
				final AbstractQueryElevationComponent.ElevationBean elevations) {
			this.elevations = elevations;
			int size = elevations.ids.size();
			ordSet = new SentinelIntSet(size, -1);
			termValues = new BytesRef[ordSet.keys.length];
		}

		@Override
		public FieldComparator<Integer> newComparator(String fieldname,
				final int numHits, int sortPos, boolean reversed)
				throws IOException {
			return new FieldComparator<Integer>() {
				private final int[] values = new int[numHits];
				private int bottomVal;
				private TermsEnum termsEnum;
				private DocsEnum docsEnum;
				Set<String> seen = new HashSet<String>(elevations.ids.size());

				@Override
				public int compare(int slot1, int slot2) {
					return values[slot1] - values[slot2]; // values will be
															// small enough that
															// there is no
															// overflow concern
				}

				@Override
				public void setBottom(int slot) {
					bottomVal = values[slot];
				}

				private int docVal(int doc) {
					if (ordSet.size() > 0) {
						int slot = ordSet.find(doc);
						if (slot >= 0) {
							BytesRef id = termValues[slot];
							Integer prio = elevations.priority.get(id);
							return prio == null ? 0 : prio.intValue();
						}
					}
					return 0;
				}

				@Override
				public int compareBottom(int doc) {
					return bottomVal - docVal(doc);
				}

				@Override
				public void copy(int slot, int doc) {
					values[slot] = docVal(doc);
				}

				@Override
				public FieldComparator<Integer> setNextReader(
						AtomicReaderContext context) throws IOException {
					// convert the ids to Lucene doc ids, the ordSet and
					// termValues needs to be the same size as the number of
					// elevation docs we have
					ordSet.clear();
					Fields fields = context.reader().fields();
					if (fields == null)
						return this;
					Terms terms = fields.terms(idFieldName);
					if (terms == null)
						return this;
					termsEnum = terms.iterator(termsEnum);
					BytesRef term = new BytesRef();
					Bits liveDocs = context.reader().getLiveDocs();

					for (String id : elevations.ids) {
						term.copyChars(id);
						if (seen.contains(id) == false
								&& termsEnum.seekExact(term, false)) {
							docsEnum = termsEnum.docs(liveDocs, docsEnum,
									DocsEnum.FLAG_NONE);
							if (docsEnum != null) {
								int docId = docsEnum.nextDoc();
								if (docId == DocIdSetIterator.NO_MORE_DOCS)
									continue; // must have been deleted
								termValues[ordSet.put(docId)] = BytesRef
										.deepCopyOf(term);
								seen.add(id);
								assert docsEnum.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
							}
						}
					}
					return this;
				}

				@Override
				public Integer value(int slot) {
					return values[slot];
				}

				@Override
				public int compareDocToValue(int doc, Integer valueObj) {
					final int value = valueObj.intValue();
					final int docValue = docVal(doc);
					return docValue - value; // values will be small enough that
												// there is no overflow concern
				}
			};
		}
	}
}
