package org.apache.solr.ocean;

import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;

import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.HitCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.Weight;
import org.apache.solr.ocean.cache.MultiQueryDocSetCacheKey;
import org.apache.solr.ocean.core.Snapshot;
import org.apache.solr.ocean.search.Filters;
import org.apache.solr.ocean.search.Search;
import org.apache.solr.ocean.search.SnapshotDocSet;
import org.apache.solr.ocean.search.SnapshotDocSetSearch;
import org.apache.solr.ocean.search.SnapshotSearch;
import org.apache.solr.ocean.search.Search.SearchQuery;
import org.apache.solr.ocean.util.Timeout;
import org.apache.solr.ocean.util.Util;
import org.apache.solr.search.DocListAndSet;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.SolrIndexSearcher;

public class OceanSearcher extends SolrIndexSearcher {
  private IndexReader indexReader;
  private Searcher[] searchers;
  private int[] starts;
  private int maxDoc;
  private OceanCore oceanCore;

  public OceanSearcher(Snapshot snapshot, OceanCore oceanCore) throws IOException {
    super(oceanCore, snapshot.getIndexSchema(), "main", snapshot.getIndexReader(), false);
    this.searchers = snapshot.getSearchers();
    this.oceanCore = oceanCore;
    this.starts = snapshot.getStarts();
    indexReader = snapshot.getIndexReader();
  }
  
  public OceanCore getCore() {
    return oceanCore;
  }
  
  protected void getDocListC(DocListAndSet out, Query query, List<Query> filterList, DocSet filter, Sort lsort, int offset, int len, int flags)
      throws IOException {
    Snapshot snapshot = oceanCore.getThreadSnapshot();
    Timeout timeout = oceanCore.getThreadTimeout();
    Filters filters = null;
    if (filterList != null) {
      filters = new Filters(new MultiQueryDocSetCacheKey(filterList));
    } else if (filter != null) {
      SnapshotDocSet snapshotDocSet = (SnapshotDocSet)filter;
      filters = new Filters(snapshotDocSet.getCacheKey());
    }
    ExecutorService executorService = oceanCore.getExecutorService();
    Weight weight = createWeight(query);
    SearchQuery searchQuery = new SearchQuery(weight, offset, len, filters, lsort, timeout);
    SnapshotSearch snapshotSearch = new SnapshotSearch(searchQuery, snapshot, executorService);
    try {
      Search.Result result = snapshotSearch.call();
      out.docList = result.getDocList();
      out.docSet = result.getDocSet();
    } catch (Exception exception) {
      Util.throwAsIOException(exception);
    }
  }

  public Explanation explain(Weight weight, int doc) throws IOException {
    int i = subSearcher(doc); // find searcher index
    return searchers[i].explain(weight, doc - starts[i]); // dispatch to
    // searcher
  }

  public Query rewrite(Query original) throws IOException {
    Query[] queries = new Query[searchers.length];
    for (int i = 0; i < searchers.length; i++) {
      queries[i] = searchers[i].rewrite(original);
    }
    return queries[0].combine(queries);
  }

  public int docFreq(Term term) throws IOException {
    int docFreq = 0;
    for (int i = 0; i < searchers.length; i++)
      docFreq += searchers[i].docFreq(term);
    return docFreq;
  }

  // inherit javadoc
  public Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
    int i = subSearcher(n); // find searcher index
    return searchers[i].doc(n - starts[i], fieldSelector); // dispatch to
    // searcher
  }

  /**
   * Returns the document number of document <code>n</code> within its
   * sub-index.
   */
  public int subDoc(int n) {
    return n - starts[subSearcher(n)];
  }

  public int maxDoc() throws IOException {
    return maxDoc;
  }

  public Timeout getDefaultSearchTimeout() {
    return new Timeout();
  }

  public DocSet getDocSet(List<Query> queries) throws IOException {
    Snapshot snapshot = oceanCore.getThreadSnapshot();
    ExecutorService executorService = oceanCore.getExecutorService();
    MultiQueryDocSetCacheKey cacheKey = new MultiQueryDocSetCacheKey(queries);
    Timeout timeout = getDefaultSearchTimeout();
    SnapshotDocSetSearch snapshotDocSetSearch = new SnapshotDocSetSearch(cacheKey, snapshot, timeout, executorService);
    try {
      return snapshotDocSetSearch.call();
    } catch (IOException ioException) {
      throw ioException;
    } catch (Exception exception) {
      IOException ioException = new IOException("");
      ioException.initCause(exception);
      throw ioException;
    }
  }

  public DocSet getDocSet(Query query) throws IOException {
    List<Query> queries = buildQueryList(query);
    return getDocSet(queries);
  }
  
  private static class CachedDfSource extends Searcher {
    private Map dfMap; // Map from Terms to corresponding doc freqs
    private int maxDoc; // document count

    public CachedDfSource(Map dfMap, int maxDoc, Similarity similarity) {
      this.dfMap = dfMap;
      this.maxDoc = maxDoc;
      setSimilarity(similarity);
    }

    public int docFreq(Term term) {
      int df;
      try {
        df = ((Integer) dfMap.get(term)).intValue();
      } catch (NullPointerException e) {
        throw new IllegalArgumentException("df for term " + term.text() + " not available");
      }
      return df;
    }

    public int[] docFreqs(Term[] terms) {
      int[] result = new int[terms.length];
      for (int i = 0; i < terms.length; i++) {
        result[i] = docFreq(terms[i]);
      }
      return result;
    }

    public int maxDoc() {
      return maxDoc;
    }

    public Query rewrite(Query query) {
      // this is a bit of a hack. We know that a query which
      // creates a Weight based on this Dummy-Searcher is
      // always already rewritten (see preparedWeight()).
      // Therefore we just return the unmodified query here
      return query;
    }

    public void close() {
      throw new UnsupportedOperationException();
    }

    public Document doc(int i) {
      throw new UnsupportedOperationException();
    }

    public Document doc(int i, FieldSelector fieldSelector) {
      throw new UnsupportedOperationException();
    }

    public Explanation explain(Weight weight, int doc) {
      throw new UnsupportedOperationException();
    }

    public void search(Weight weight, Filter filter, HitCollector results) {
      throw new UnsupportedOperationException();
    }

    public TopDocs search(Weight weight, Filter filter, int n) {
      throw new UnsupportedOperationException();
    }

    public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) {
      throw new UnsupportedOperationException();
    }
  }

  protected Weight createWeight(Query original) throws IOException {
    // step 1
    Query rewrittenQuery = rewrite(original);

    // step 2
    Set terms = new HashSet();
    rewrittenQuery.extractTerms(terms);

    // step3
    Term[] allTermsArray = new Term[terms.size()];
    terms.toArray(allTermsArray);
    int[] aggregatedDfs = new int[terms.size()];
    for (int i = 0; i < searchers.length; i++) {
      int[] dfs = searchers[i].docFreqs(allTermsArray);
      for (int j = 0; j < aggregatedDfs.length; j++) {
        aggregatedDfs[j] += dfs[j];
      }
    }

    HashMap dfMap = new HashMap();
    for (int i = 0; i < allTermsArray.length; i++) {
      dfMap.put(allTermsArray[i], new Integer(aggregatedDfs[i]));
    }

    // step4
    int numDocs = maxDoc();
    CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, getSimilarity());

    return rewrittenQuery.weight(cacheSim);
  }

  public int subSearcher(int n) { // find searcher for doc n:
    // replace w/ call to Arrays.binarySearch in Java 1.2
    int lo = 0; // search starts array
    int hi = searchers.length - 1; // for first element less
    // than n, return its index
    while (hi >= lo) {
      int mid = (lo + hi) >> 1;
      int midValue = starts[mid];
      if (n < midValue)
        hi = mid - 1;
      else if (n > midValue)
        lo = mid + 1;
      else { // found a match
        while (mid + 1 < searchers.length && starts[mid + 1] == midValue) {
          mid++; // scan to last match
        }
        return mid;
      }
    }
    return hi;
  }
}
