package org.apache.lucene.ocean;

import java.io.IOException;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;

import org.apache.commons.lang.StringUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.OceanSegmentReader;
import org.apache.lucene.index.SerialMergeScheduler;
import org.apache.lucene.index.Term;
import org.apache.lucene.ocean.Batch.MasterBatch;
import org.apache.lucene.ocean.Batch.SlaveBatch;
import org.apache.lucene.ocean.DiskIndex.DiskIndexSnapshot;
import org.apache.lucene.ocean.Index.IndexException;
import org.apache.lucene.ocean.Index.IndexSnapshot;
import org.apache.lucene.ocean.IndexCreator.Add;
import org.apache.lucene.ocean.RamIndex.RamIndexSnapshot;
import org.apache.lucene.ocean.SnapshotInfo.IndexInfo;
import org.apache.lucene.ocean.WriteableMemoryIndex.MemoryIndexSnapshot;
import org.apache.lucene.ocean.log.TransactionLog;
import org.apache.lucene.ocean.log.TransactionLog.SlaveBatchIterator;
import org.apache.lucene.ocean.snapshotlog.SnapshotLogManager;
import org.apache.lucene.ocean.util.Constants;
import org.apache.lucene.ocean.util.LongSequence;
import org.apache.lucene.ocean.util.SortedListMap;
import org.apache.lucene.ocean.util.Timeout;
import org.apache.lucene.ocean.util.Util;
import org.apache.lucene.search.OceanMultiThreadSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * Main class for search transaction system.
 * 
 * Indexes on disk are immutable, they can only be deleted from or merged
 * periodically. Merges occur in the background. There is always one active
 * WriteableMemoryIndex that new documents are written to.
 * 
 * A snapshot corresponds to a transaction. Each transaction creates a new
 * snapshot. Snapshot ids have both major and minor version represented as a
 * decimal. The major represents the transaction. The minor increments with
 * index merges. Transaction data is known as a batch. There is a MasterBatch
 * and SlaveBatch. A MasterBatch is created in the initial update call to
 * TransactionSystem such as addDocument. All update calls eventually go through
 * commitBatch(Batch batch). A SlaveBatch is what is loaded from the
 * transactionlog during a recovery.
 * 
 * IndexWriter like methods such as addDocument, updateDocument are provided.
 * The commitTransaction method provides complete transaction access.
 * 
 * A _documentid field is added to each document. This is an internal number for
 * tracking a document and allows the transaction log system to be recovered
 * properly. During recovery a delete will use the _documentid rather than the
 * actual query or term to insure the exact documents are deleted at the point
 * in time the transaction occurred.
 * 
 * 
 */
// TODO: need test case of maybeMergeDiskIndices
// TODO: custom efficient document serializer
// TODO: not sure how to handle Document fields with a TokenStream
// TODO: make transaction timeout a batch parameter
// TODO: make multithreaded transactions optional
// TODO: test disk indexes with too many deletes merging
// TODO: remove lang and io library dependencies
// TODO: write test cases for LogFileManager
// TODO: build recovery to load based on last deletes id
// TODO: allow optional queryparser to be defined for serializing deletes in a
// more space efficient manner
// TODO: add optional LRU size cache to transaction log for new records
// TODO: remove snapshot id from document, rely on snapshotinfo
// TODO: test RawLogFile.FileStreamData.getInputStream crc check
// TODO: add commitnothing
// TODO: logfilemanager if logfile is empty then delete and use previous log
// file
// TODO: create file deleter that tries to delete files as windows has problems
// with this
public class TransactionSystem {
  final static Logger LOG = LoggerFactory.getLogger(TransactionSystem.class);
  /**
  public static final int DEFAULT_MEMORY_INDEX_MAX_DOCS = 50;
  public static final int DEFAULT_MAYBE_MERGE_DOC_CHANGES = 2000;
  public static final int DEFAULT_MAX_RAM_INDEXES_SIZE = 1024 * 1024 * 30;
  public static final float DEFAULT_MERGE_DISK_DELETED_PERCENT = 0.3f;
  public static final float DEFAULT_DELETES_FLUSH_THRESHOLD_PERCENT = 0.3f;
  public static final long DEFAULT_MAYBE_MERGES_TIMER_INTERVAL = 60 * 1000;
  public static final long DEFAULT_LOG_FILE_DELETE_TIMER_INTERVAL = 100 * 1000;
  **/
  private ThreadPoolExecutor commitThreadPool;
  private ThreadPoolExecutor mergeThreadPool;
  private TransactionLog transactionLog;
  private Indexes indexes = new Indexes();
  private ReentrantLock commitLock = new ReentrantLock();
  Snapshots snapshots;
  private ReentrantLock mergeIndexesLock = new ReentrantLock();
  private int docChangesSinceLastMerge = 0;
  private Analyzer defaultAnalyzer;
  private int serverNumber = 0;
  private LongSequence documentSequence;
  private LongSequence diskIndexSequence;
  private LongSequence ramIndexSequence;
  /**
  private int memoryIndexMaxDocs = DEFAULT_MEMORY_INDEX_MAX_DOCS;
  private int maybeMergeDocChanges = DEFAULT_MAYBE_MERGE_DOC_CHANGES;
  private int maxRamIndexesSize = DEFAULT_MAX_RAM_INDEXES_SIZE;
  private int maxDocsIndexes = -1;
  private int maxSnapshots = 5;
  private float mergeDiskDeletedPercent = DEFAULT_MERGE_DISK_DELETED_PERCENT;
  private long snapshotExpiration = 20 * 1000;
  private float deletesFlushThresholdPercent = DEFAULT_DELETES_FLUSH_THRESHOLD_PERCENT;
  private long maybeMergesTimerInterval = DEFAULT_MAYBE_MERGES_TIMER_INTERVAL;
  private long logFileDeleteTimerInterval = DEFAULT_LOG_FILE_DELETE_TIMER_INTERVAL;
  private int diskIndexRAMDirectoryBufferSize = 1024 * 512;
  **/
  DirectoryMap directoryMap;
  private ArrayBlockingQueue<Runnable> mergeQueue;
  private SearcherPolicy searcherPolicy;
  private ExecutorService searchThreadPool;
  private ArrayBlockingQueue<Runnable> searchQueue;
  private SortedListMap<IndexID,LargeBatch> largeBatches = new SortedListMap<IndexID,LargeBatch>();
  private ReentrantLock largeBatchLock = new ReentrantLock();
  static {
    System.setProperty("org.apache.lucene.SegmentReader.class", OceanSegmentReader.class.getName());
  }
  private long maybeMergesTimestamp = -1;
  private ScheduledExecutorService maybeMergesExecutorService;
  private ScheduledExecutorService logFileDeleteExecutorService;
  private SnapshotLogManager snapshotLogManager;
  private Config config;

  //public TransactionSystem(TransactionLog transactionLog, Analyzer defaultAnalyzer, DirectoryMap directoryMap, int maybeMergeDocChanges,
  //    int maxDocsIndexes, int memoryIndexMaxDocs, float mergeDiskDeletedPercent, SearcherPolicy searcherPolicy) throws Exception {
  /**
   * Constructor for the TransactionSystem.  
   * @param transactionLog TransactionLog implementation
   * @param defaultAnalyzer The default Analyzer to use for 
   * @param directoryMap 
   */
  public TransactionSystem(TransactionLog transactionLog, Analyzer defaultAnalyzer, DirectoryMap directoryMap, SearcherPolicy searcherPolicy, Config config) throws Exception {
    this.transactionLog = transactionLog;
    this.defaultAnalyzer = defaultAnalyzer;
    this.directoryMap = directoryMap;
    this.config = config;
    this.serverNumber = config.serverNumber;
    //this.maybeMergeDocChanges = maybeMergeDocChanges;
    //this.maxDocsIndexes = maxDocsIndexes;
    //this.memoryIndexMaxDocs = memoryIndexMaxDocs;
    //this.mergeDiskDeletedPercent = mergeDiskDeletedPercent;
    this.searcherPolicy = searcherPolicy;
    if (searcherPolicy instanceof MultiThreadSearcherPolicy) {
      MultiThreadSearcherPolicy multiThreadSearcherPolicy = (MultiThreadSearcherPolicy) searcherPolicy;
      searchQueue = new ArrayBlockingQueue<Runnable>(multiThreadSearcherPolicy.getQueueSize());
      searchThreadPool = new ThreadPoolExecutor(multiThreadSearcherPolicy.getMinThreads(), multiThreadSearcherPolicy.getMaxThreads(),
          1000 * 60, TimeUnit.MILLISECONDS, searchQueue);
    }
    mergeQueue = new ArrayBlockingQueue<Runnable>(2);
    mergeThreadPool = new ThreadPoolExecutor(1, 1, 1000 * 60, TimeUnit.MILLISECONDS, mergeQueue);
    commitThreadPool = new ThreadPoolExecutor(1, 10, 1000 * 60, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());
    snapshotLogManager = new SnapshotLogManager(100, 10, directoryMap.getDirectory());
    snapshots = new Snapshots(this);
    if (LOG.isInfoEnabled())
      LOG.info("TransactionSystem");
    load();
    maybeMergesExecutorService = (ScheduledExecutorService) Executors.newSingleThreadScheduledExecutor();
    maybeMergesExecutorService.scheduleWithFixedDelay(new MaybeMergesTimer(), config.maybeMergesTimerInterval, config.maybeMergesTimerInterval,
        TimeUnit.MILLISECONDS);
    logFileDeleteExecutorService = (ScheduledExecutorService) Executors.newSingleThreadScheduledExecutor();
    logFileDeleteExecutorService.scheduleWithFixedDelay(new LogFileDeleteTimer(), config.logFileDeleteTimerInterval, config.logFileDeleteTimerInterval,
        TimeUnit.MILLISECONDS);
  }
  
  public ReentrantLock getCommitLock() {
    return commitLock;
  }
  
  /**
   * Get the search thread pool.  Used to execute queries in parallel over multiple indexes.
   * @return
   */
  public ExecutorService getSearchThreadPool() {
    return searchThreadPool;
  }
  
  /**
   * Get the configuration
   * @return
   */
  public Config getConfig() {
    return config;
  }
  
  /**
   * 
   * @return
   */
  public SnapshotLogManager getSnapshotLogManager() {
    return snapshotLogManager;
  }

  // TODO: can use deleteFlushId instead of getMinSnapshotId
  /**
   * Executes periodically to delete unnecessary transaction log files
   */
  public class LogFileDeleteTimer implements Runnable {
    public void run() {
      Snapshot snapshot = snapshots.getLatestSnapshot();
      try {
        List<Long> minSnapshotIds = new ArrayList<Long>();
        List<DiskIndexSnapshot> diskIndexSnapshots = snapshot.getDiskIndexSnapshots();
        for (DiskIndexSnapshot diskIndexSnapshot : diskIndexSnapshots) {
          Long minSnapshotId = diskIndexSnapshot.getMinSnapshotId();
          if (minSnapshotId != null) {
            minSnapshotIds.add(minSnapshotId);
          }
        }
        Long minIndexSnapshotId = Util.min(minSnapshotIds);
        transactionLog.deleteOldLogFiles(minIndexSnapshotId);
      } catch (IOException ioException) {
        LOG.error("", ioException);
      } finally {
        snapshot.decRef();
      }
    }
  }

  /**
   * If the last merge timestamp is greater than maybeMergesTimerInterval then
   * MaybeMergeIndices is submitted to the mergeThreadPool
   * 
   */
  public class MaybeMergesTimer implements Runnable {
    public void run() {
      if (mergeIndexesLock.isLocked())
        return;
      if ((System.currentTimeMillis() - maybeMergesTimestamp) > config.maybeMergesTimerInterval) {
        if (LOG.isInfoEnabled())
          LOG.info("submitting MaybeMergeIndices from timer");
        mergeThreadPool.submit(new MaybeMergeIndexes());
      }
    }
  }
  
  /**
   * Finds the IndexID for the given documentId
   * @param documentId
   * @return
   * @throws IOException
   */
  public IndexID getIndexId(Long documentId) throws IOException {
    Snapshot snapshot = snapshots.getLatestSnapshot();
    try {
      for (IndexSnapshot indexSnapshot : snapshot.getIndexSnapshots()) {
        IndexReader indexReader = indexSnapshot.getIndexReader();
        int freq = indexReader.docFreq(new Term(Constants.DOCUMENTID, Util.longToEncoded(documentId)));
        if (freq > 0) {
          return indexSnapshot.getIndex().getId();
        }
      }
      return null;
    } finally {
      snapshot.decRef();
    }
  }
  
  /**
   * Stops the thread pools, closes the transaction log, closes the indexes
   * @throws IOException
   */
  public void close() throws IOException {
    if (LOG.isInfoEnabled())
      LOG.info("close");
    maybeMergesExecutorService.shutdown();
    mergeThreadPool.shutdown();
    commitThreadPool.shutdown();
    snapshotLogManager.close();
    transactionLog.close();
    for (Index index : indexes.getIndexes()) {
      index.close();
    }
  }
  
  /**
   * Returns a document matching the given term
   * @param term
   * @return
   * @throws IOException
   */
  public Document getDocument(Term term) throws IOException {
    Snapshot snapshot = snapshots.getLatestSnapshot();
    try {
      IndexReader reader = snapshot.getIndexReader();
      int doc = Util.getTermDoc(term, reader);
      if (doc >= 0)
        return reader.document(doc);
      else
        return null;
    } finally {
      snapshot.decRef();
    }
  }
  
  /**
   * Called to obtain the latest searcher to use for searching over the indexes.
   * @return
   * @throws IOException
   */
  public OceanSearcher getSearcher() throws IOException {
    Snapshot snapshot = snapshots.getLatestSnapshot();
    if (searcherPolicy instanceof SingleThreadSearcherPolicy) {
      return new OceanSearcher(snapshot);
    } else {
      return new OceanMultiThreadSearcher(snapshot, searchThreadPool);
    }
  }
  
  /**
   * Deletes documents using a query
   * @param query
   * @return
   * @throws Exception
   */
  public CommitResult deleteDocument(Query query) throws Exception {
    List<Query> deleteByQueries = new ArrayList<Query>(1);
    deleteByQueries.add(query);
    return commitTransaction(null, null, null, deleteByQueries);
  }
  
  /**
   * Deletes documents using a query
   * @param query
   * @return
   * @throws Exception
   */
  public CommitResult deleteDocument(Term term) throws Exception {
    List<Term> dterms = new ArrayList<Term>(1);
    dterms.add(term);
    return commitTransaction(null, null, dterms, null);
  }
  
  /**
   * Updates a document by first deleting the document(s) containing term and then adding the new document.
   * Uses the defaultAnalyzer.
   * @param term Deletes documents matching the term.
   * @param document
   * @return
   * @throws Exception
   */
  public CommitResult updateDocument(Term term, Document document) throws Exception {
    return updateDocument(term, document, defaultAnalyzer);
  }
  
  /**
   * Updates a document by first deleting the document(s) containing term and then adding the new document.
   * @param term Deletes documents matching the term.
   * @param document
   * @return
   * @throws Exception
   */
  public CommitResult updateDocument(Term term, Document document, Analyzer analyzer) throws Exception {
    List<Document> list = new ArrayList<Document>(1);
    list.add(document);
    List<Term> dterms = new ArrayList<Term>(1);
    dterms.add(term);
    return commitTransaction(list, analyzer, dterms, null);
  }
  
  /**
   * Adds a document to the index using the defaultAnalyzer.
   * @param document
   * @return
   * @throws Exception
   */
  public CommitResult addDocument(Document document) throws Exception {
    return addDocument(document, defaultAnalyzer);
  }
  
  /**
   * Adds a document using the given analyzer.
   * @return
   * @throws Exception
   */
  public CommitResult addDocument(Document document, Analyzer analyzer) throws Exception {
    List<Document> list = new ArrayList<Document>(1);
    list.add(document);
    return commitTransaction(list, analyzer, null, null);
  }
  
  /**
   * Method used by the other update index methods to create a MasterBatch and 
   * call the commitBatch method.
   * @param documents
   * @param analyzer
   * @param deleteByTerms
   * @param deleteByQueries
   * @return
   * @throws Exception
   */
  public CommitResult commitTransaction(List<Document> documents, Analyzer analyzer, List<Term> deleteByTerms, List<Query> deleteByQueries)
      throws Exception {
    if (analyzer == null)
      analyzer = getDefaultAnalyzer();
    MasterBatch masterBatch = new MasterBatch(this);
    if (documents != null)
      masterBatch.addDocuments(new Documents(documents));
    masterBatch.setAnalyzer(analyzer);
    Deletes deletes = new Deletes();
    if (deleteByTerms != null) {
      for (Term deleteTerm : deleteByTerms) {
        deletes.addTerm(deleteTerm);
      }
    }
    if (deleteByQueries != null) {
      for (Query query : deleteByQueries) {
        deletes.addQuery(query);
      }
    }
    if (deletes.hasDeletes())
      masterBatch.setDeletes(deletes);
    return commitBatch(masterBatch);
  }
  
  /**
   * Merges indexes if they need to be.
   * @param doWait Wait for the merges to complete, or run in the background
   */
  public void merge(boolean doWait) {
    if (doWait)
      new MaybeMergeIndexes().run();
    else
      mergeThreadPool.submit(new MaybeMergeIndexes());
  }
  
  /**
   * Get the default analyzer for this TransactionSystem
   * @return Analyzer
   */
  public Analyzer getDefaultAnalyzer() {
    return defaultAnalyzer;
  }
  
  /**
   * Get the next ram index id
   * @return
   */
  public long getNextRamIndexId() {
    return ramIndexSequence.getAndIncrement();
  }
  
  /**
   * Get the next disk index id
   * @return
   */
  public long getNextDiskIndexId() {
    return diskIndexSequence.getAndIncrement();
  }
  
  /**
   * Get the transactionlog implementation currently being used
   * @return
   */
  public TransactionLog getTransactionLog() {
    return transactionLog;
  }
  
  /**
   * Get the ExecutorService representing the commit thread pool
   * @return
   */
  public ExecutorService getCommitThreadPool() {
    return commitThreadPool;
  }
  
  /**
   * On startup of the TransactionSystem, executes the transactions not in disk indexes yet
   * @throws Exception
   */
  public void load() throws Exception {
    BigDecimal id;
    Long snapshotId;
    List<IndexSnapshot> indexSnapshots = null;
    SnapshotInfo snapshotInfo = snapshotLogManager.loadMaxSnapshotInfo();
    // Snapshots.loadMaxSnapshotInfo(directoryMap.getDirectory());
    if (LOG.isDebugEnabled())
      LOG.debug("snapshotInfo: " + snapshotInfo);
    long timestamp = System.currentTimeMillis();
    if (snapshotInfo != null) {
      id = snapshotInfo.getId();
      snapshotId = snapshotInfo.getSnapshotId();
      if (transactionLog.getMaxId() != null && snapshotId.longValue() != transactionLog.getMaxId().longValue()) {
        LOG.error("snapshotId: " + snapshotId + " transactionLog.getMaxId: " + transactionLog.getMaxId());
        assert snapshotId == transactionLog.getMaxId();
      }
      loadDiskIndexes(snapshotInfo, indexes);
      IndexID diskMaxId = indexes.getMaxId("disk");
      if (diskMaxId != null)
        diskIndexSequence = new LongSequence(diskMaxId.id.longValue() + 1, 1);
      else
        diskIndexSequence = new LongSequence(1, 1);
      ramIndexSequence = new LongSequence(1, 1);
      indexSnapshots = new ArrayList<IndexSnapshot>();
      List<Long> snapshotIds = new LinkedList<Long>();
      // TODO: what if index directory is deleted and it is still referenced
      for (IndexInfo indexInfo : snapshotInfo.getIndexInfos()) {
        if (indexInfo.getType().equals("disk")) {
          DiskIndex diskIndex = (DiskIndex) indexes.get(indexInfo.getIndexID());
          if (diskIndex != null) {
            IndexSnapshot indexSnapshot = diskIndex.getIndexSnapshot(snapshotInfo.getSnapshotId());
            indexSnapshots.add(indexSnapshot);
            Long indexSnapshotId = indexSnapshot.getMaxSnapshotId();
            if (indexSnapshotId == null) {
              throw new Exception(indexInfo.getIndexID() + " does not have a max snapshot id");
            }
            snapshotIds.add(indexSnapshotId);
          }
        }
      }
      Long maxDiskIndexSnapshotId = Util.max(snapshotIds);
      Long fromSnapshotId = null;
      System.out.println("maxDiskIndexSnapshotId: " + maxDiskIndexSnapshotId);
      if (maxDiskIndexSnapshotId != null) {
        fromSnapshotId = new Long(maxDiskIndexSnapshotId.longValue() + 1);
      }
      List<RamIndexSnapshot> ramIndexSnapshots = runTransactionsNotInIndex(fromSnapshotId);
      System.out.println("ramIndexSnapshots: " + ramIndexSnapshots);
      // TODO: verify all snapshots have same id
      indexSnapshots.addAll(ramIndexSnapshots);
      List<Long> documentIds = new ArrayList<Long>(indexSnapshots.size());
      for (IndexSnapshot indexSnapshot : indexSnapshots) {
        documentIds.add(indexSnapshot.getMaxDocumentId());
      }
      Long maxDocumentId = Util.max(documentIds);
      if (maxDocumentId != null) {
        Long documentSequenceId = Util.getNextServerSequence(maxDocumentId, serverNumber);
        documentSequence = new LongSequence(documentSequenceId, 100);
      } else {
        documentSequence = new LongSequence(serverNumber, 100);
      }
    } else {
      snapshotId = new Long(0);
      id = new BigDecimal(snapshotId.toString());
      documentSequence = new LongSequence(serverNumber, 100);
      diskIndexSequence = new LongSequence(1, 1);
      ramIndexSequence = new LongSequence(1, 1);
    }
    WriteableMemoryIndex writeableMemoryIndex = newWriteableMemoryIndex();
    MemoryIndexSnapshot writeableSnapshot = writeableMemoryIndex.createIndexSnapshot(snapshotId);
    if (indexSnapshots == null) {
      indexSnapshots = new ArrayList<IndexSnapshot>();
      indexSnapshots.add(writeableSnapshot);
    }
    Snapshot snapshot = new Snapshot(id, writeableSnapshot, indexSnapshots, this, timestamp);
    snapshots.add(snapshot, false);
    new MaybeMergeIndexes().run();
  }

  /**
   * Delete snapshotinfo if no longer referenced in Snapshots
   * 
   * @throws Exception
   * 
   * private void deleteUnreferencedSnapshots() throws Exception {
   * snapshots.remove(maxSnapshots, snapshotExpiration); LogDirectory directory =
   * directoryMap.getDirectory(); List<BigDecimal> ids =
   * Snapshots.loadSnapshotInfoIds(directory); for (BigDecimal id : ids) { if
   * (!snapshots.contains(id)) { // not referenced, delete it String fileName =
   * Snapshot.getFileName(id); //System.out.println("deleteFile: " + fileName + "
   * id: " + Snapshot.formatId(id)); try { directory.deleteFile(fileName); //if
   * (LOG.isDebugEnabled()) // LOG.debug("deleteFile: " + fileName); } catch
   * (Exception exception) { LOG.error(exception.getMessage()); } } } }
   */
  
  /**
   * Get the indexes
   */
  public Indexes getIndexes() {
    return indexes;
  }

  /**
   * Get the snapshots
   * @return
   */
  public Snapshots getSnapshots() {
    return snapshots;
  }

  /**
   * Runs the transactions from the transaction log starting
   * Lucene indexes
   * 
   * @param startSnapshotId Snapshot id to start from
   * @return loaded ram snapshots
   * @throws Exception
   * @throws CategoryException
   * @throws IOException
   */
  private List<RamIndexSnapshot> runTransactionsNotInIndex(Long startSnapshotId) throws Exception, IOException {
    LOG.info("startSnapshotId: " + startSnapshotId);
    SlaveBatchIterator iterator = transactionLog.getSlaveBatchIterator(startSnapshotId);
    if (!iterator.hasNext())
      return new ArrayList<RamIndexSnapshot>();
    try {
      long indexIdNum = ramIndexSequence.getAndIncrement();
      IndexID indexId = new IndexID(indexIdNum, "ram");
      RAMDirectory ramDirectory = new RAMDirectory();
      ExecutorService threadPool = getCommitThreadPool();
      IndexCreator indexCreator = new IndexCreator("runTransactionsNotInIndex", ramDirectory, Long.MAX_VALUE, 4, defaultAnalyzer,
          threadPool);
      BlockingQueue<IndexCreator.Add> addQueue = new ArrayBlockingQueue<IndexCreator.Add>(4000, true);
      List<Deletes> deletesList = new ArrayList<Deletes>(); // deletes are
      // recorded and run
      // against all of the
      // snapshots at the
      // end
      indexCreator.start(addQueue);
      List<RAMDirectory> ramDirectories = new ArrayList<RAMDirectory>();
      int docCount = 0;
      while (iterator.hasNext()) {
        SlaveBatch slaveBatch = iterator.next(true, true);
        Analyzer analyzer = slaveBatch.getAnalyzer();
        if (slaveBatch.hasDocuments()) {
          Documents documents = slaveBatch.getDocuments();
          for (Document document : documents) {
            addQueue.add(new IndexCreator.Add(document));
            docCount++;
          }
        } else if (slaveBatch.hasRAMDirectory()) {
          ramDirectories.add(slaveBatch.getRamDirectory());
        }
        if (slaveBatch.hasDeletes()) {
          deletesList.add(slaveBatch.getDeletes());
        }
      }
      LOG.info("docCount: " + docCount);
      // if zero means all the transactions were deletes
      if (docCount == 0) {
        indexCreator.close();
        return new ArrayList<RamIndexSnapshot>();
      }
      indexCreator.create(false);
      ramDirectories.add(ramDirectory);
      Long snapshotId = transactionLog.getMaxId();

      RAMDirectory totalRAMDirectory = new RAMDirectory();
      IndexWriter indexWriter = new IndexWriter(totalRAMDirectory, false, getDefaultAnalyzer(), true);
      indexWriter.setMergeScheduler(new SerialMergeScheduler());
      indexWriter.setUseCompoundFile(false);
      indexWriter.addIndexes((RAMDirectory[]) ramDirectories.toArray(new RAMDirectory[0]));
      indexWriter.close();
      RamIndex ramIndex = new RamIndex(indexId, snapshotId, deletesList, totalRAMDirectory, this);
      indexes.add(ramIndex);
      RamIndexSnapshot indexSnapshot = (RamIndexSnapshot) ramIndex.getIndexSnapshot(snapshotId);
      assert indexSnapshot != null;
      List<RamIndexSnapshot> indexSnapshots = new ArrayList<RamIndexSnapshot>();
      indexSnapshots.add(indexSnapshot);
      return indexSnapshots;
    } finally {
      if (iterator != null)
        iterator.close();
    }
  }
  
  /**
   * Loads any disk indexes found in the directoryMap
   * @param snapshotInfo
   * @param indices
   * @throws Exception
   * @throws IOException
   */
  private void loadDiskIndexes(SnapshotInfo snapshotInfo, Indexes indices) throws Exception, IOException {
    for (String name : directoryMap.list()) {
      try {
        if (name.endsWith("_index")) {
          String idString = StringUtils.split(name, "_")[0];
          Directory directory = directoryMap.get(name);
          Long indexIdNum = new Long(idString);
          IndexID indexId = new IndexID(indexIdNum, "disk");
          try {
            IndexInfo indexInfo = snapshotInfo.getIndexInfo(indexId);
            if (indexInfo != null) {
              Long snapshotId = snapshotInfo.getSnapshotId();
              DiskIndex diskIndex = new DiskIndex(indexId, directory, snapshotId, indexInfo, this);
              indices.add(diskIndex);
            } else {
              LOG.info("index no longer referenced deleting: " + name);
              // directoryMap.delete(name);
            }
          } catch (IndexException indexException) {
            LOG.error("index not ready, deleting: " + name, indexException);
            // directoryMap.delete(name);
          } catch (IOException ioException) {
            LOG.error("index not ready, deleting: " + name, ioException);
            // directoryMap.delete(name);
          }
        }
      } catch (Exception exception) {
        LOG.error("", exception);
        // if exception simply skip over the index
      }
    }
  }
  
  /**
   * Creates a MasterBatch object
   * @return
   * @throws Exception
   */
  public MasterBatch createMasterBatch() throws Exception {
    return new MasterBatch(this);
  }
  
  /**
   * Runnable class that checks the indexes to see if they need to be merged.  If they
   * do it merges them in the same thread.
   *
   */
  public class MaybeMergeIndexes implements Runnable {
    public MaybeMergeIndexes() {
    }

    public void run() {
      if (LOG.isDebugEnabled())
        LOG.debug("MaybeMergeIndexes");
      mergeIndexesLock.lock();
      try {
        docChangesSinceLastMerge = 0;
        maybeMergeWriteable();
        maybeMergeRamIndexes();
        maybeMergeDiskIndexes();
        maybeMergesTimestamp = System.currentTimeMillis();
      } catch (Throwable throwable) {
        LOG.error("", throwable);
      } finally {
        mergeIndexesLock.unlock();
      }
    }

    /**
     * If the existing ram indexes are above maxRamIndexesSize, then they are
     * merged and a new disk index is created from them. Or if the number of
     * documents exceeds maxDocsIndexes.
     * 
     * @param snapshot
     * @throws Exception
     */
    private void maybeMergeRamIndexes() throws Throwable {
      Snapshot snapshot = snapshots.getLatestSnapshot();
      try {
        long size = 0;
        int numDocs = 0;
        List<RamIndexSnapshot> ramIndexSnapshots = snapshot.getRamIndexSnapshots();
        for (RamIndexSnapshot ramIndexSnapshot : ramIndexSnapshots) {
          RamIndex ramIndex = (RamIndex) ramIndexSnapshot.getIndex();
          size += ramIndex.getSize();
          numDocs += ramIndexSnapshot.getIndexReader().maxDoc();
        }
        // if merging based on number of docs
        if (config.maxDocsIndexes > 0 && numDocs > config.maxDocsIndexes) {
          if (LOG.isDebugEnabled())
            LOG.debug("executeMerge because numDocs: " + numDocs + " more than maxDocsIndexes: " + config.maxDocsIndexes);
          executeMerge(ramIndexSnapshots, snapshot);
        } else if (size > config.maxRamIndexesSize) {
          // merging based on size of ram indexes
          executeMerge(ramIndexSnapshots, snapshot);
        }
      } finally {
        snapshot.decRef();
      }
    }

    // TODO: needs to limit the size of the resulting diskindex
    /**
     * Checks to see if any disk indexes need to be merged.
     */
    private void maybeMergeDiskIndexes() throws Throwable {
      Snapshot snapshot = snapshots.getLatestSnapshot();
      try {
        Long snapshotId = snapshot.getSnapshotId();
        List<IndexSnapshot> indexSnapshotsToMerge = new ArrayList<IndexSnapshot>();
        for (DiskIndex diskIndex : snapshot.getDiskIndexes()) {
          DiskIndexSnapshot indexSnapshot = (DiskIndexSnapshot) diskIndex.getIndexSnapshot(snapshotId);
          if (diskIndex.hasTooManyDeletedDocs(config.mergeDiskDeletedPercent)) {
            indexSnapshotsToMerge.add(indexSnapshot);
          }
        }
        if (indexSnapshotsToMerge.size() > 0) {
          executeMerge(indexSnapshotsToMerge, snapshot);
        }
      } finally {
        snapshot.decRef();
      }
    }

    /**
     * converts current memorywriteableindex to a ramindex
     * 
     * @param snapshot
     * @throws Exception
     */
    private void maybeMergeWriteable() throws Exception {
      // have to do everything in commit lock otherwise updates will
      // change the writeableIndexSnapshot. The negative case
      // however is fast
      commitLock.lock();
      try {
        Snapshot snapshot = snapshots.getLatestSnapshot();
        try {
          MemoryIndexSnapshot writeableIndexSnapshot = snapshot.getWriteableSnapshot();
          int numDocs = writeableIndexSnapshot.getIndexReader().numDocs();
          if (writeableIndexSnapshot.maxDoc() >= config.memoryIndexMaxDocs) {
            if (LOG.isInfoEnabled())
              LOG.info("merge writeable");
            long indexIdNum = ramIndexSequence.getAndIncrement();
            IndexID indexId = new IndexID(indexIdNum, "ram");
            RamIndex ramIndex = new RamIndex(indexId, writeableIndexSnapshot);
            indexes.add(ramIndex);
            IndexSnapshot ramIndexSnapshot = ramIndex.getLatestIndexSnapshot();
            assert ramIndexSnapshot.getIndexReader().numDocs() == numDocs;
            Snapshot currentSnapshot = snapshots.getLatestSnapshot();
            try {
              List<IndexID> removeIndexIds = new ArrayList<IndexID>();
              removeIndexIds.add(writeableIndexSnapshot.getIndex().getId());
              // create new WriteableMemoryIndex for the new snapshot because
              // the
              // one that was there
              // has been converted to a RamIndex
              WriteableMemoryIndex newWriteableMemoryIndex = newWriteableMemoryIndex();
              MemoryIndexSnapshot newMemoryIndexSnapshot = newWriteableMemoryIndex.createIndexSnapshot(snapshot.getSnapshotId());
              Snapshot newSnapshot = currentSnapshot.createMinor(removeIndexIds, newMemoryIndexSnapshot, ramIndex.getLatestIndexSnapshot());
              snapshots.add(newSnapshot, true);
              if (LOG.isInfoEnabled())
                LOG.info("merge writeable completed");
            } finally {
              currentSnapshot.decRef();
            }
          }
        } finally {
          snapshot.decRef();
        }
      } finally {
        commitLock.unlock();
      }
    }

    /**
     * Takes snapshots and makes a DiskIndex.
     * 
     * @param indexSnapshots
     * @param snapshot
     * @throws Exception
     */
    // TODO: has extraneous logging statements
    private void executeMerge(List<? extends IndexSnapshot> incoming, Snapshot snapshot) throws Throwable {
      try {
        List<? extends IndexSnapshot> oldIndexSnapshots = new ArrayList<IndexSnapshot>(incoming);
        Set<IndexID> removeIndexIds = new HashSet<IndexID>();
        // Snapshot currentSnapshot = snapshots.getLatestSnapshot();
        // if snapshot consists only of deletions skip it
        Iterator<? extends IndexSnapshot> iterator = oldIndexSnapshots.iterator();
        while (iterator.hasNext()) {
          IndexSnapshot indexSnapshot = iterator.next();
          if (indexSnapshot.deletedDoc() == indexSnapshot.maxDoc()) {
            removeIndexIds.add(indexSnapshot.getIndex().getId());
            iterator.remove();
          }
        }
        System.out.println("executeMerge removeIndexIds all deleted: " + removeIndexIds);
        // if there are removeIndexIds now, it means
        if (removeIndexIds.size() > 0 && removeIndexIds.size() == oldIndexSnapshots.size()) {
          commitLock.lock();
          try {
            Snapshot currentSnapshot = snapshots.getLatestSnapshot();
            try {
              Snapshot newSnapshot = currentSnapshot.createMinor(removeIndexIds);
              snapshots.add(newSnapshot, true);
            } finally {
              currentSnapshot.decRef();
            }
          } finally {
            commitLock.unlock();
          }
          return;
        }
        if (oldIndexSnapshots.size() == 0)
          return;
        Long snapshotId = snapshot.getSnapshotId();
        Long indexIdNum = diskIndexSequence.getAndIncrement();
        IndexID indexId = new IndexID(indexIdNum, "disk");
        Directory directory = directoryMap.create(indexIdNum + "_index");
        // initial creation happens outside of commitlock because it is the most
        // time consuming
        // deletes that could have occured since the DiskIndex was created
        // happen inside the commitlock as deletes have minimal performance
        // impact
        DiskIndex newDiskIndex = new DiskIndex(indexId, directory, oldIndexSnapshots, TransactionSystem.this);
        // TODO: ram directory snapshots need to merge somehow with the new
        // snapshot
        indexes.add(newDiskIndex);
        commitLock.lock();
        try {
          Snapshot currentSnapshot = snapshots.getLatestSnapshot();
          try {
            List<SlaveBatch> deleteOnlySlaveBatches = new ArrayList<SlaveBatch>();
            Long latestSnapshotId = currentSnapshot.getSnapshotId();
            if (latestSnapshotId.longValue() > snapshotId.longValue()) {
              SlaveBatchIterator batchIterator = transactionLog.getSlaveBatchIterator(snapshotId.longValue()+1);
              while (batchIterator.hasNext()) {
                SlaveBatch slaveBatch = batchIterator.next(false, true);
                if (slaveBatch.deletes != null) {
                  if (slaveBatch.deletes.getDocIds() == null) {
                    assert slaveBatch.deletes.getDocIds() != null;
                  }
                }
                deleteOnlySlaveBatches.add(slaveBatch);
              }
            }
            IndexSnapshot newIndexSnapshot = newDiskIndex.initialize(latestSnapshotId, deleteOnlySlaveBatches, TransactionSystem.this);
            for (IndexSnapshot indexSnapshot : oldIndexSnapshots) {
              Index index = indexSnapshot.getIndex();
              removeIndexIds.add(index.getId());
            }
            StringBuilder builder = new StringBuilder();
            Iterator<? extends IndexSnapshot> indexSnapshotsIterator = oldIndexSnapshots.iterator();
            while (indexSnapshotsIterator.hasNext()) {
              IndexSnapshot indexSnapshot = indexSnapshotsIterator.next();
              builder.append(indexSnapshot.getIndex().getId().toString());
              if (indexSnapshotsIterator.hasNext()) {
                builder.append(",");
              }
            }
            builder.append(" ").append(" indexes written to disk index: ").append(indexId.toString());
            LOG.info(builder.toString());
            LOG.info("newIndexSnapshot: " + newIndexSnapshot.getClass().getName());
            Snapshot newSnapshot = currentSnapshot.createMinor(removeIndexIds, newIndexSnapshot);
            System.out.println("snapshot: " + newSnapshot.getId() + " disk num: " + newSnapshot.getDiskIndexSnapshots().size());
            // assert newSnapshot.getDiskIndexSnapshots().size() != 0;
            snapshots.add(newSnapshot, true);
          } finally {
            currentSnapshot.decRef();
          }
        } finally {
          commitLock.unlock();
        }
      } catch (Throwable throwable) {
        throw throwable;
      }
    }
  }

  /**
   * Allows large indexes to be created and committed without putting the
   * documents in the transaction log. Deletes from the LargeBatch are placed in
   * the transaction log however.
   * 
   * @param threads
   * @param ramBufferSize
   * @return
   * @throws Exception
   */
  // TODO: not sure about recovery as eventually index will be merged, could be
  // ok
  public LargeBatch createLargeBatch(int threads, float ramBufferSize, boolean optimize) throws Exception {
    if (threads > commitThreadPool.getMaximumPoolSize()) {
      commitThreadPool.setMaximumPoolSize(threads);
    }
    Long indexIdNum = diskIndexSequence.getAndIncrement();
    IndexID indexId = new IndexID(indexIdNum, "disk");
    LargeBatch largeBatch = new LargeBatch(indexId, threads, ramBufferSize, optimize);
    largeBatchLock.lock();
    try {
      largeBatches.add(indexId, largeBatch);
    } finally {
      largeBatchLock.unlock();
    }
    return largeBatch;
  }
  
  /**
   * A transaction that does not get recorded in the transaction log because it is too large.
   * Useful for bulk loading.  Supports deletes as well.  The goal is to allow LargeBatches to
   * have transactional semantics with the transactional overhead.  Once the large batch is committed it
   * immediately available for searching like a regular transaction.
   * 
   * LargeBatch uses the IndexCreator class underneath to handle the multithreading.
   *
   */
  public class LargeBatch {
    IndexID indexId;
    ArrayBlockingQueue<Add> documentQueue;
    IndexCreator indexCreator;
    long timestamp = System.currentTimeMillis();
    Deletes deletes = new Deletes();
    boolean optimize;
    IndexWriter indexWriter;
    Directory directory;

    public LargeBatch(IndexID indexId, int threads, float ramBufferSize, boolean optimize) throws Exception {
      this.indexId = indexId;
      this.optimize = optimize;
      directory = directoryMap.create(indexId.id + "_index");
      indexWriter = new IndexWriter(directory, false, getDefaultAnalyzer(), true);
      indexWriter.setUseCompoundFile(false);
      indexWriter.setMergeScheduler(new SerialMergeScheduler());
      indexWriter.setMaxBufferedDocs(Integer.MAX_VALUE);
      indexWriter.setRAMBufferSizeMB(ramBufferSize);
      indexCreator = new IndexCreator(indexWriter, threads, commitThreadPool);
      documentQueue = new ArrayBlockingQueue<Add>(2000);
      indexCreator.start(documentQueue);
    }

    public void addDocument(Document document, Analyzer analyzer) {
      documentQueue.add(new IndexCreator.Add(document, analyzer));
      timestamp = Timeout.TIMER_THREAD.getTime();
    }

    public void deleteDocuments(Term term) {
      deletes.addTerm(term);
    }

    public void deleteDocuments(Query query) {
      deletes.addQuery(query);
    }

    public CommitResult commit() throws Exception {
      // wait until queue is empty
      while (documentQueue.peek() != null)
        Thread.sleep(5);
      if (deletes.hasDeleteByQueries()) {
        Query[] queries = deletes.getQueries().toArray(new Query[0]);
        indexWriter.deleteDocuments(queries);
      }
      if (deletes.hasTerms()) {
        Term[] terms = deletes.getTerms().toArray(new Term[0]);
        indexWriter.deleteDocuments(terms);
      }
      indexCreator.create(optimize);
      int numAdded = indexCreator.getNumAdded();
      // write to transaction log and create new snapshot
      // also perform deletes on indexsnapshots
      DiskIndex diskIndex = new DiskIndex(indexId, directory, TransactionSystem.this);
      commitLock.lock();
      try {
        Snapshot currentSnapshot = snapshots.getLatestSnapshot();
        try {
          Long snapshotId = transactionLog.getNextId();
          LargeBatchTransaction transaction = new LargeBatchTransaction(snapshotId, currentSnapshot);
          List<IndexSnapshot> indexSnapshots = currentSnapshot.getIndexSnapshots();
          // apply deletes to the other
          List<IndexSnapshotCommitResult> deletesResults = new ArrayList<IndexSnapshotCommitResult>(indexSnapshots.size());
          for (IndexSnapshot indexSnapshot : indexSnapshots) {
            Index index = indexSnapshot.getIndex();
            IndexSnapshotCommitResult deletesResult = index.commitDeletes(deletes, transaction);
            deletesResults.add(deletesResult);
          }
          List<IndexSnapshot> newIndexSnapshots = new ArrayList<IndexSnapshot>(indexSnapshots);
          DiskIndexSnapshot newIndexSnapshot = (DiskIndexSnapshot) diskIndex.createNewSnapshot(snapshotId, true, null);
          newIndexSnapshots.add(newIndexSnapshot);
          Snapshot newSnapshot = new Snapshot(snapshotId, 0, currentSnapshot.getWriteableSnapshot(), newIndexSnapshots,
              TransactionSystem.this, System.currentTimeMillis());
          snapshots.add(newSnapshot, true);
          return new CommitResult(newSnapshot, null, deletesResults, numAdded, indexId);
        } finally {
          currentSnapshot.decRef();
        }
      } finally {
        commitLock.unlock();
      }
    }
  }

  /**
   * Commits a batch to the transaction log
   * 
   * @param batch
   * @return CommitResult
   * @throws Exception
   * @throws IOException
   */
  CommitResult commitBatch(Batch batch) throws Exception, IOException {
    batch.close();
    commitLock.lock();
    try {
      Long snapshotId = null;
      List<Long> documentIds = null;
      if (batch instanceof SlaveBatch) {
        SlaveBatch slaveBatch = (SlaveBatch) batch;
        snapshotId = slaveBatch.getId();
      } else {
        MasterBatch masterBatch = (MasterBatch) batch;
        snapshotId = transactionLog.getNextId();
        if (batch.hasDocuments()) {
          Documents documents = batch.getDocuments();
          documentIds = new ArrayList<Long>(documents.size());
          for (Document document : documents) {
            Long documentId = documentSequence.getAndIncrement();
            documentIds.add(documentId);
            Util.setValue(Constants.DOCUMENTID, documentId, document);
            Util.setValue(Constants.SNAPSHOTID, snapshotId, document);
          }
          if (documents.size() >= config.memoryIndexMaxDocs || documents.hasFieldsWithTokenStreamOrReader()) {
            RAMDirectory ramDirectory = createRamDirectory(documents, batch.getAnalyzer());
            masterBatch.setRAMDirectory(ramDirectory);
          }
          // create here before the commitlock
          masterBatch.createDocData();
        }
      }
      System.out.println("documents: "+batch.getDocuments());
      Snapshot currentSnapshot = snapshots.getLatestSnapshot();
      try {
        MemoryIndexSnapshot writeableIndexSnapshot = currentSnapshot.getWriteableSnapshot();
        WriteableMemoryIndex writeableMemoryIndex = (WriteableMemoryIndex) writeableIndexSnapshot.getIndex();
        List<Index> nonWriteableIndices = currentSnapshot.getDeleteOnlyIndices();
        Transaction transaction = null;
        CommitResult commitResult = null;
        try {
          // Long previousId = transactionLog.getPreviousId(snapshotId);
          transaction = new SingleThreadTransaction(snapshotId, documentIds, currentSnapshot, batch, this);
          commitResult = transaction.call();
        } catch (Exception exception) {
          LOG.error("transaction failed");
          throw new Exception("transaction failed", exception);
        }
        Snapshot newSnapshot = commitResult.getSnapshot();
        snapshots.add(newSnapshot, true);
        docChangesSinceLastMerge += commitResult.getNumDocChanges();
        int writeableMaxDoc = writeableMemoryIndex.getLatestIndexSnapshot().getIndexReader().maxDoc();
        if (docChangesSinceLastMerge > config.maybeMergeDocChanges || writeableMaxDoc >= config.memoryIndexMaxDocs) {
          // System.out.println("docChangesSinceLastMerge: " +
          // docChangesSinceLastMerge + " maybeMergeDocChanges: " +
          // maybeMergeDocChanges);
          // System.out.println("writeableMaxDoc: " + writeableMaxDoc + "
          // memoryIndexMaxDocs: " + memoryIndexMaxDocs);
          // only submit if nothing is currently executing or pending
          if (mergeThreadPool.getActiveCount() == 0)
            merge(false);
          // mergeThreadPool.submit(new MaybeMergeIndexes());
        }
        // deleteUnreferencedSnapshots();
        return commitResult;
      } finally {
        currentSnapshot.decRef();
      }
    } finally {
      commitLock.unlock();
    }
  }

  // TODO: if documents is really small then just use one thread
  /**
   * Creates a RAMDirectory from the documents and analyzer given.  
   */
  RAMDirectory createRamDirectory(Documents documents, Analyzer analyzer) throws Exception {
    RAMDirectory ramDirectory = new RAMDirectory();
    ExecutorService threadPool = getCommitThreadPool();
    IndexCreator indexCreator = new IndexCreator("createRamDirectory", ramDirectory, Long.MAX_VALUE, 4, analyzer, threadPool);
    BlockingQueue<IndexCreator.Add> addQueue = new ArrayBlockingQueue<IndexCreator.Add>(1000);
    indexCreator.start(addQueue);
    for (Document document : documents) {
      boolean worked = addQueue.offer(new IndexCreator.Add(document));
      if (!worked) {
        LOG.warn("addQueue worked: " + worked);
      }
    }
    indexCreator.create(false);
    return ramDirectory;
  }

  WriteableMemoryIndex newWriteableMemoryIndex() throws Exception {
    Long indexIdNum = ramIndexSequence.getAndIncrement();
    IndexID indexId = new IndexID(indexIdNum, "memory");
    WriteableMemoryIndex writeableMemoryIndex = new WriteableMemoryIndex(indexId, this);
    indexes.add(writeableMemoryIndex);
    return writeableMemoryIndex;
  }
}
