package org.apache.lucene.distributed.index;

/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements. See the NOTICE file distributed with this
 * work for additional information regarding copyright ownership. The ASF
 * licenses this file to You under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * 
 * http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations under
 * the License.
 */

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.ListIterator;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;

import org.apache.commons.io.FileUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.distributed.RMI;
import org.apache.lucene.distributed.RMI.Closeable;
import org.apache.lucene.distributed.RMI.ProxyEventListener;
import org.apache.lucene.distributed.index.IndexManagerService.IndexInfo;
import org.apache.lucene.distributed.index.IndexManagerService.IndexSettings;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.HitCollector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.TextFragment;
import org.apache.lucene.search.highlight.TokenSources;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.OpenBitSet;

public class IndexServiceImpl implements IndexService {
  private LinkedList<SearchableServiceImpl> searchables = new LinkedList<SearchableServiceImpl>();
  private IndexWriter indexWriter;
  Directory directory;
  private RMI.Server rmiServer;
  private RMI rmi;
  private String name;
  private String serviceName;
  private List<IndexListenerNotifier> indexListeners = new ArrayList<IndexListenerNotifier>();
  private ThreadPoolExecutor listenerThreadPool;
  private ProxyEventHandler proxyEventHandler = new ProxyEventHandler();
  private IndexSettings indexSettings;

  public IndexServiceImpl(String name, String serviceName, Directory directory, IndexSettings indexSettings, RMI.Server rmiServer)
      throws IOException {
    this.name = name;
    this.serviceName = serviceName;
    this.directory = directory;
    this.rmiServer = rmiServer;
    this.indexSettings = indexSettings;
    rmi = rmiServer.getRMI();
    FSDirectory fsDirectory = (FSDirectory) directory;
    IndexWriter.unlock(directory);
    boolean create = !IndexReader.indexExists(directory);
    indexWriter = new IndexWriter(directory, indexSettings.defaultAnalyzer, create, new IndexWriter.MaxFieldLength(
        indexSettings.maxFieldLength));
    if (indexSettings.ramBufferSizeMB != null) {
      indexWriter.setRAMBufferSizeMB(indexSettings.ramBufferSizeMB.doubleValue());
    }
    listenerThreadPool = new ThreadPoolExecutor(1, 20, 20 * 1000, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());
  }

  public class ProxyEventHandler implements ProxyEventListener {
    public void proxyRemoved(Object proxy) {
      synchronized (indexListeners) {
        Iterator<IndexListenerNotifier> iterator = indexListeners.iterator();
        while (iterator.hasNext()) {
          IndexListenerNotifier iln = iterator.next();
          if (iln.indexListener == proxy) {
            iterator.remove();
          }
        }
      }
    }
  }

  /**
   * 
   */
  public synchronized void registerIndexListener(IndexListener indexListener) throws Exception {
    indexListeners.add(new IndexListenerNotifier(indexListener));
    rmi.addProxyEventListener(indexListener, proxyEventHandler);
  }

  public class IndexListenerNotifier {
    public int errorCount = 0;
    public IndexListener indexListener;

    public IndexListenerNotifier(IndexListener indexListener) {
      this.indexListener = indexListener;
    }
  }

  /**
   * Notifies indexListeners of new searchable. If too many errors occur calling
   * the indexListener, then it is removed from the indexListeners list
   * 
   * @param s
   */
  private void notifyIndexListeners(final SearchableServiceImpl s) {
    if (indexListeners != null) {
      synchronized (indexListeners) {
        Iterator<IndexListenerNotifier> iterator = indexListeners.iterator();
        while (iterator.hasNext()) {
          final IndexListenerNotifier iln = iterator.next();
          listenerThreadPool.submit(new Runnable() {
            public void run() {
              try {
                iln.indexListener.newSearchable(s);
              } catch (Exception e) {
                e.printStackTrace();
                iln.errorCount++;
              }
            }
          });
          if (iln.errorCount >= 4) {
            iterator.remove();
          }
        }
      }
    }
  }

  public class SearchableServiceImpl implements SearchableService, Closeable {
    IndexReader reader;
    IndexSearcher searcher;
    String serviceName;
    ReentrantLock bitsLruLock = new ReentrantLock();
    ReentrantLock LruLock = new ReentrantLock();
    //Map<FacetKey,CacheValue<Facet[]>> bitSetMap;
    //Map<>

    public SearchableServiceImpl(String serviceName, IndexReader reader) {
      this.serviceName = serviceName;
      this.reader = reader;
      searcher = new IndexSearcher(reader);
      //bitSetMap = new LinkedHashMap();
    }
    
    public IndexVersion getIndexVersion() throws Exception {
      IndexCommit indexCommit = reader.getIndexCommit();
      IndexVersion indexVersion = new IndexVersion();
      indexVersion.generation = indexCommit.getGeneration();
      return indexVersion;
    }

    public void search(Weight weight, Filter filter, HitCollector results) throws IOException {
      throw new UnsupportedOperationException("");
    }
    
    public int getFacetCount(OpenBitSet bitSet, Query query) {
      //OpenBitSet bitSet2 = bitSetMap.get(query);
      //if (bitSet2 == null) {
        // create bitset from query
      //}
      return 0;
    }
    
    public class HighlighterImpl {
      Highlighter highlighter;
      HighlightKey highlight;
      
      public HighlighterImpl(HighlightKey highlight) {
        this.highlight = highlight;
      }
      
      public TextFragment[] dos(int docId, String field, Document doc) throws IOException {
        String[] docTexts = doc.getValues(field);
        TextFragment[] frag;
        if (docTexts.length == 1) {
          // single-valued field
          TokenStream tstream;
          try {
            // attempt term vectors
            tstream = TokenSources.getTokenStream(reader, docId, field);
          } catch (IllegalArgumentException e) {
            // fall back to analyzer
            tstream = new TokenOrderingFilter(highlight.analyzer.tokenStream(field, new StringReader(docTexts[0])), 10);
          }
          frag = highlighter.getBestTextFragments(tstream, docTexts[0], false, highlight.numFragments);
        } else {
          // multi-valued field
          MultiValueTokenStream tstream;
          tstream = new MultiValueTokenStream(field, docTexts, highlight.analyzer, true);
          frag = highlighter.getBestTextFragments(tstream, tstream.asSingleValue(), false, highlight.numFragments);
        }
        return frag;
      }
    }

    public void close() throws IOException {
      searcher.close();
      reader.close();
    }

    public int docFreq(Term term) throws IOException {
      return searcher.docFreq(term);
    }

    public int[] docFreqs(Term[] terms) throws IOException {
      return searcher.docFreqs(terms);
    }

    public int maxDoc() throws IOException {
      return searcher.maxDoc();
    }

    public TopDocs search(Weight weight, Filter filter, int n) throws IOException {
      return searcher.search(weight, filter, n);
    }

    public Document doc(int i) throws CorruptIndexException, IOException {
      return searcher.doc(i);
    }

    public Document[] docs(int[] docs, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
      Document[] documents = new Document[docs.length];
      for (int x = 0; x < docs.length; x++) {
        documents[x] = searcher.doc(docs[x], fieldSelector);
      }
      return documents;
    }

    public Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
      return searcher.doc(n, fieldSelector);
    }

    public Query rewrite(Query query) throws IOException {
      return searcher.rewrite(query);
    }

    public Explanation explain(Weight weight, int doc) throws IOException {
      return searcher.explain(weight, doc);
    }

    public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException {
      return searcher.search(weight, filter, n, sort);
    }
  }

  public synchronized SearchableService reopen() throws Exception {
    SearchableServiceImpl currentSearchable = searchables.peek();
    if (currentSearchable == null) {
      IndexReader reader = IndexReader.open(directory);
      currentSearchable = addSearchable(reader);
      notifyIndexListeners(currentSearchable);
    } else {
      IndexReader newReader = currentSearchable.reader.reopen();
      if (newReader != currentSearchable.reader) {
        currentSearchable = addSearchable(newReader);
        notifyIndexListeners(currentSearchable);
      }
    }
    return currentSearchable;
  }

  private SearchableServiceImpl addSearchable(IndexReader reader) throws IOException {
    long generation = reader.getIndexCommit().getGeneration();
    String serviceName = "index." + name + ".searchable." + generation;
    SearchableServiceImpl currentSearchable = new SearchableServiceImpl(serviceName, reader);
    rmiServer.addService(serviceName, currentSearchable, true);
    searchables.clear();
    searchables.addFirst(currentSearchable);
    return currentSearchable;
  }

  public void flush() throws Exception {
    indexWriter.commit();
  }

  public SearchableService flushAndReopen() throws Exception {
    indexWriter.commit();
    return reopen();
  }

  public synchronized void close() throws Exception {
    for (SearchableServiceImpl searchable : searchables) {
      searchable.close();
    }
    indexWriter.close();
    directory.close();
  }

  public IndexInfo getIndexInfo() throws Exception {
    IndexInfo indexInfo = new IndexInfo();
    indexInfo.name = name;
    indexInfo.indexSettings = indexSettings;
    if (directory instanceof FSDirectory)
      indexInfo.length = FileUtils.sizeOfDirectory(((FSDirectory) directory).getFile());
    indexInfo.serviceName = serviceName;
    return indexInfo;
  }

  // TODO: needs to return the ops that worked, and ones that failed
  public void execute(Operation[] operations) throws Exception {
    for (Operation operation : operations) {
      if (operation instanceof Add) {
        Add add = (Add) operation;
        if (add.analyzer == null)
          indexWriter.addDocument(add.document);
        else
          indexWriter.addDocument(add.document, add.analyzer);
      } else if (operation instanceof Update) {
        Update update = (Update) operation;
        if (update.analyzer == null)
          indexWriter.updateDocument(update.term, update.document);
        else
          indexWriter.updateDocument(update.term, update.document, update.analyzer);
      } else if (operation instanceof Delete) {
        Delete delete = (Delete) operation;
        if (delete.query != null)
          indexWriter.deleteDocuments(delete.query);
        else if (delete.term != null)
          indexWriter.deleteDocuments(delete.term);
      }
    }
  }

  public void addDocument(Document document, Analyzer analyzer) throws Exception {
    indexWriter.addDocument(document, analyzer);
  }

  public void updateDocument(Term term, Document document, Analyzer analyzer) throws Exception {
    indexWriter.updateDocument(term, document, analyzer);
  }

  public void deleteDocuments(Term term) throws Exception {
    indexWriter.deleteDocuments(term);
  }

  public void deleteDocuments(Query query) throws Exception {
    indexWriter.deleteDocuments(query);
  }
  
  /** 
   * Helper class which creates a single TokenStream out of values from a 
   * multi-valued field.
   * from SOLR
   */
  public static class MultiValueTokenStream extends TokenStream {
    private String fieldName;
    private String[] values;
    private Analyzer analyzer;
    private int curIndex;                  // next index into the values array
    private int curOffset;                 // offset into concatenated string
    private TokenStream currentStream;     // tokenStream currently being iterated
    private boolean orderTokenOffsets;

    /** Constructs a TokenStream for consecutively-analyzed field values
     *
     * @param fieldName name of the field
     * @param values array of field data
     * @param analyzer analyzer instance
     */
    public MultiValueTokenStream(String fieldName, String[] values, 
                                 Analyzer analyzer, boolean orderTokenOffsets) {
      this.fieldName = fieldName;
      this.values = values;
      this.analyzer = analyzer;
      curIndex = -1;
      curOffset = 0;
      currentStream = null;
      this.orderTokenOffsets=orderTokenOffsets;
    }

    /** Returns the next token in the stream, or null at EOS. */
    public Token next() throws IOException {
      int extra = 0;
      if(currentStream == null) {
        curIndex++;        
        if(curIndex < values.length) {
          currentStream = analyzer.tokenStream(fieldName, 
                                               new StringReader(values[curIndex]));
          if (orderTokenOffsets) currentStream = new TokenOrderingFilter(currentStream,10);
          // add extra space between multiple values
          if(curIndex > 0) 
            extra = analyzer.getPositionIncrementGap(fieldName);
        } else {
          return null;
        }
      }
      Token nextToken = currentStream.next();
      if(nextToken == null) {
        curOffset += values[curIndex].length();
        currentStream = null;
        return next();
      }
      // create an modified token which is the offset into the concatenated
      // string of all values
      Token offsetToken = new Token(nextToken.termText(), 
                                    nextToken.startOffset() + curOffset,
                                    nextToken.endOffset() + curOffset);
      offsetToken.setPositionIncrement(nextToken.getPositionIncrement() + extra*10);
      return offsetToken;
    }

    /**
     * Returns all values as a single String into which the Tokens index with
     * their offsets.
     */
    public String asSingleValue() {
      StringBuilder sb = new StringBuilder();
      for(String str : values)
        sb.append(str);
      return sb.toString();
    }

  }

  /**
   * A simple modification of SimpleFragmenter which additionally creates new
   * fragments when an unusually-large position increment is encountered
   * (this behaves much better in the presence of multi-valued fields).
   */
  public static class GapFragmenter extends SimpleFragmenter {
    /** 
     * When a gap in term positions is observed that is at least this big, treat
     * the gap as a fragment delimiter.
     */
    public static final int INCREMENT_THRESHOLD = 50;
    protected int fragOffsetAccum = 0;
    
    public GapFragmenter() {
    }
    
    public GapFragmenter(int fragsize) {
       super(fragsize);
    }
    
    /* (non-Javadoc)
     * @see org.apache.lucene.search.highlight.TextFragmenter#start(java.lang.String)
     */
    public void start(String originalText) {
      fragOffsetAccum = 0;
    }

    /* (non-Javadoc)
     * @see org.apache.lucene.search.highlight.TextFragmenter#isNewFragment(org.apache.lucene.analysis.Token)
     */
    public boolean isNewFragment(Token token) {
      boolean isNewFrag = 
        token.endOffset() >= fragOffsetAccum + getFragmentSize() ||
        token.getPositionIncrement() > INCREMENT_THRESHOLD;
      if(isNewFrag) {
          fragOffsetAccum += token.endOffset() - fragOffsetAccum;
      }
      return isNewFrag;
    }
  }

  /** Orders Tokens in a window first by their startOffset ascending.
   * endOffset is currently ignored.
   * This is meant to work around fickleness in the highlighter only.  It
   * can mess up token positions and should not be used for indexing or querying.
   */
  public static class TokenOrderingFilter extends TokenFilter {
    private final int windowSize;
    private final LinkedList<Token> queue = new LinkedList<Token>();
    private boolean done=false;

    protected TokenOrderingFilter(TokenStream input, int windowSize) {
      super(input);
      this.windowSize = windowSize;
    }

    public Token next() throws IOException {
      while (!done && queue.size() < windowSize) {
        Token newTok = input.next();
        if (newTok==null) {
          done=true;
          break;
        }

        // reverse iterating for better efficiency since we know the
        // list is already sorted, and most token start offsets will be too.
        ListIterator<Token> iter = queue.listIterator(queue.size());
        while(iter.hasPrevious()) {
          if (newTok.startOffset() >= iter.previous().startOffset()) {
            // insertion will be before what next() would return (what
            // we just compared against), so move back one so the insertion
            // will be after.
            iter.next();
            break;
          }
        }
        iter.add(newTok);
      }

      return queue.isEmpty() ? null : queue.removeFirst();
    }
  }

}
