package org.apache.ocean.solr;

import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.HitCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.MultiSearcher.CachedDfSource;
import org.apache.solr.highlight.SolrHighlighter;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
import org.apache.solr.search.DocListAndSet;
import org.apache.solr.search.DocSet;

public class OceanSearcher extends Searcher {
  private IndexReader indexReader;
  private Searcher[] searchers;
  private int[] starts;
  private int maxDoc;
  
  public OceanSearcher(Snapshot snapshot, OceanCore oceanCore, int[] starts) throws IOException {
    
    this.searchers = searchers;
    this.starts = starts;
    starts = new int[searchers.length + 1];   // build starts array
    for (int i = 0; i < searchers.length; i++) {
      starts[i] = maxDoc;
      maxDoc += searchers[i].maxDoc();          // compute maxDocs
    }
    starts[searchers.length] = maxDoc;
    
    indexReader = new MultiReader();
  }
  
  private void getDocList(DocListAndSet out, Query query, List<Query> filterList, DocSet filter, Sort lsort, int offset, int len, int flags) throws IOException {
    
  }
  
  private Query convertToOceanQuery(Query query) {
    
  }
  
  public int numDocs(Query a, DocSet b) throws IOException {
    
  }
  
  public void search(Weight weight, Filter filter, HitCollector results) throws IOException {
    throw new UnsupportedOperationException("");
  }
  
  public Explanation explain(Weight weight, int doc) throws IOException {
    int i = subSearcher(doc);       // find searcher index
    return searchers[i].explain(weight, doc-starts[i]); // dispatch to searcher
  }
  
  public Query rewrite(Query original) throws IOException {
    Query[] queries = new Query[searchers.length];
    for (int i = 0; i < searchers.length; i++) {
      queries[i] = searchers[i].rewrite(original);
    }
    return queries[0].combine(queries);
  }
  
  public int docFreq(Term term) throws IOException {
    int docFreq = 0;
    for (int i = 0; i < searchers.length; i++)
      docFreq += searchers[i].docFreq(term);
    return docFreq;
  }

  // inherit javadoc
  public Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
    int i = subSearcher(n);       // find searcher index
    return searchers[i].doc(n - starts[i], fieldSelector);    // dispatch to searcher
  }
  
  public int subSearcher(int n) {                 // find searcher for doc n:
    // replace w/ call to Arrays.binarySearch in Java 1.2
    int lo = 0;           // search starts array
    int hi = searchers.length - 1;      // for first element less
              // than n, return its index
    while (hi >= lo) {
      int mid = (lo + hi) >> 1;
      int midValue = starts[mid];
      if (n < midValue)
  hi = mid - 1;
      else if (n > midValue)
  lo = mid + 1;
      else {                                      // found a match
        while (mid+1 < searchers.length && starts[mid+1] == midValue) {
          mid++;                                  // scan to last match
        }
  return mid;
      }
    }
    return hi;
  }

  /** Returns the document number of document <code>n</code> within its
   * sub-index. */
  public int subDoc(int n) {
    return n - starts[subSearcher(n)];
  }

  public int maxDoc() throws IOException {
    return maxDoc;
  }
  
  public DocSet getDocSet(List<Query> queries) throws IOException {
    
  }
  
  public DocSet getDocSet(Query query) throws IOException {
    
  }
  
  public void readDocs(Document[] docs, DocList ids) throws IOException {
    readDocs(docs, ids, null);
  }
  /**
   * Takes a list of docs (the doc ids actually) and a set of fields to load,
   * and reads them into an array of Documents.
   */
  public void readDocs(Document[] docs, DocList ids, Set<String> fields) throws IOException {
    DocIterator iter = ids.iterator();
    for (int i=0; i<docs.length; i++) {
      docs[i] = doc(iter.nextDoc(), fields);
    }
  }
  
  public Document doc(int i) throws IOException {
    return doc(i, (Set<String>)null);
  }
  
  public Document doc(int i, Set<String> fields) throws IOException {
    
  }
  
  public int getFirstMatch(Term t) throws IOException {
    TermDocs tdocs = null;
    try {
      tdocs = indexReader.termDocs(t);
      if (!tdocs.next()) return -1;
      return tdocs.doc();
    } finally {
      if (tdocs!=null) tdocs.close();
    }
  }
  
  private static class CachedDfSource extends Searcher {
    private Map dfMap; // Map from Terms to corresponding doc freqs
    private int maxDoc; // document count

    public CachedDfSource(Map dfMap, int maxDoc, Similarity similarity) {
      this.dfMap = dfMap;
      this.maxDoc = maxDoc;
      setSimilarity(similarity);
    }

    public int docFreq(Term term) {
      int df;
      try {
        df = ((Integer) dfMap.get(term)).intValue();
      } catch (NullPointerException e) {
        throw new IllegalArgumentException("df for term " + term.text()
            + " not available");
      }
      return df;
    }

    public int[] docFreqs(Term[] terms) {
      int[] result = new int[terms.length];
      for (int i = 0; i < terms.length; i++) {
        result[i] = docFreq(terms[i]);
      }
      return result;
    }

    public int maxDoc() {
      return maxDoc;
    }

    public Query rewrite(Query query) {
      // this is a bit of a hack. We know that a query which
      // creates a Weight based on this Dummy-Searcher is
      // always already rewritten (see preparedWeight()).
      // Therefore we just return the unmodified query here
      return query;
    }

    public void close() {
      throw new UnsupportedOperationException();
    }

    public Document doc(int i) {
      throw new UnsupportedOperationException();
    }
    
    public Document doc(int i, FieldSelector fieldSelector) {
        throw new UnsupportedOperationException();
    }

    public Explanation explain(Weight weight,int doc) {
      throw new UnsupportedOperationException();
    }

    public void search(Weight weight, Filter filter, HitCollector results) {
      throw new UnsupportedOperationException();
    }

    public TopDocs search(Weight weight,Filter filter,int n) {
      throw new UnsupportedOperationException();
    }

    public TopFieldDocs search(Weight weight,Filter filter,int n,Sort sort) {
      throw new UnsupportedOperationException();
    }
  }
  
  protected Weight createWeight(Query original) throws IOException {
    // step 1
    Query rewrittenQuery = rewrite(original);

    // step 2
    Set terms = new HashSet();
    rewrittenQuery.extractTerms(terms);

    // step3
    Term[] allTermsArray = new Term[terms.size()];
    terms.toArray(allTermsArray);
    int[] aggregatedDfs = new int[terms.size()];
    for (int i = 0; i < searchers.length; i++) {
      int[] dfs = searchers[i].docFreqs(allTermsArray);
      for(int j=0; j<aggregatedDfs.length; j++){
        aggregatedDfs[j] += dfs[j];
      }
    }

    HashMap dfMap = new HashMap();
    for(int i=0; i<allTermsArray.length; i++) {
      dfMap.put(allTermsArray[i], new Integer(aggregatedDfs[i]));
    }

    // step4
    int numDocs = maxDoc();
    CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, getSimilarity());

    return rewrittenQuery.weight(cacheSim);
  }
}
