package org.apache.solr.ocean.core;

import java.io.File;
import java.io.IOException;
import java.math.BigDecimal;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeSet;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;

import org.apache.commons.io.FileUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.store.RAMDirectory;
import org.apache.solr.core.SolrConfig;
import org.apache.solr.ocean.Documents;
import org.apache.solr.ocean.Loader;
import org.apache.solr.ocean.core.Batch.MasterBatch;
import org.apache.solr.ocean.core.Batch.SlaveBatch;
import org.apache.solr.ocean.core.DiskIndex.DiskIndexSnapshot;
import org.apache.solr.ocean.core.Index.IndexException;
import org.apache.solr.ocean.core.Index.IndexSnapshot;
import org.apache.solr.ocean.core.RamIndex.RamIndexSnapshot;
import org.apache.solr.ocean.core.Snapshot.SnapshotInfo;
import org.apache.solr.ocean.core.Snapshot.SnapshotInfo.IndexInfo;
import org.apache.solr.ocean.core.Transaction.CommitResult;
import org.apache.solr.ocean.core.TransactionSystem.Category.CategoryException;
import org.apache.solr.ocean.core.WriteableMemoryIndex.MemoryIndexSnapshot;
import org.apache.solr.ocean.replication.Replication;
import org.apache.solr.ocean.replication.RawLogFile.Load;
import org.apache.solr.ocean.replication.Replication.SlaveBatchIterator;
import org.apache.solr.ocean.util.ConcurrentTreeMap;
import org.apache.solr.ocean.util.LongSequence;
import org.apache.solr.ocean.util.Timeout;
import org.apache.solr.ocean.util.Util;
import org.apache.solr.schema.IndexSchema;

/**
 * Main class for search transaction system
 * 
 */
// TODO: OceanCore needs to access category
// TODO: make sure everything works with jdk1.5
// TODO: figure out directory structure for solr integration
// TODO: load category when core is supposed to load it
public class TransactionSystem {
  public static Logger log = Logger.getLogger(TransactionSystem.class.getName());
  public static final int MEMORY_INDEX_MAX_DOCS = 100;
  public static final int MAYBE_MERGE_DOC_CHANGES = 2000;
  public static final int MAX_RAM_INDICES_SIZE = 1024 * 1024 * 30;
  private ConcurrentHashMap<String,Category> categoryMap = new ConcurrentHashMap<String,Category>();
  private ReentrantLock categoryLoadLock = new ReentrantLock();
  private File rootDirectory;
  // private File categoriesDirectory;
  private ThreadPoolExecutor commitThreadPool;
  private Replication replication;
  private int serverNumber;
  private Loader loader;
  private OceanConfig config;

  public TransactionSystem(OceanConfig config, int serverNumber, Replication replication, File rootDirectory, Loader loader)
      throws Exception, IOException, CategoryException {
    this.config = config;
    this.serverNumber = serverNumber;
    this.replication = replication;
    this.rootDirectory = rootDirectory;
    this.loader = loader;
    // this.categoriesDirectory = new File(rootDirectory, "categories");
    // this.categoriesDirectory.mkdirs();
    commitThreadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
    // for (File categoryDirectory : rootDirectory.listFiles()) {
    // String name = categoryDirectory.getName();

    // Category category = new Category(name, categoryConfig, categoryDirectory,
    // this);
    // categoryMap.put(name, category);
    // }
  }

  public Category loadCategory(String name, SolrConfig solrConfig) throws Exception {
    categoryLoadLock.lock();
    try {
      Category category = categoryMap.get(name);
      if (category == null) {
        OceanCategoryConfig categoryConfig = new OceanCategoryConfig(config, solrConfig);
        category = new Category(name, categoryConfig, new File(solrConfig.getInstanceDir()), this);
        categoryMap.put(name, category);
      }
      return category;
    } finally {
      categoryLoadLock.unlock();
    }
  }

  public OceanConfig getConfig() {
    return config;
  }

  public Loader getLoader() {
    return loader;
  }

  public List<Category> getCategories() {
    return new ArrayList<Category>(categoryMap.values());
  }

  public Replication getReplication() {
    return replication;
  }

  public ExecutorService getCommitThreadPool() {
    return commitThreadPool;
  }

  /**
   * public Category newCategory(String name) throws IOException,
   * CategoryException, Exception { if (categoryMap.containsKey(name)) { throw
   * new IllegalArgumentException("name: " + name + " already exists"); } File
   * categoryDirectory = new File(categoriesDirectory, name);
   * categoryDirectory.mkdirs(); Category.Config categoryConfig = new
   * Category.Config(null, getConfig()); Category category = new Category(name,
   * categoryConfig, categoryDirectory, this); return category; }
   */
  public Category getCategory(String name) {
    return categoryMap.get(name);
  }

  /**
   * Indexes on disk are immutable, they can only be deleted from or merged
   * periodically. Merges occur in the background. There is always one active
   * WriteableMemoryIndex that new documents are written to.
   * 
   * A snapshot corresponds to a transaction. Each transction creates a new
   * snapshot. Snapshot ids have both major and minor decimal version. The major
   * represents the transaction. The minor increments with index merges.
   * 
   */
  public static class Category {
    private File indicesDirectory;
    private String name;
    private LongSequence documentSequence;
    private LongSequence indexSequence;
    private Indices indices = new Indices(this);
    private ReentrantLock commitLock = new ReentrantLock();
    private Snapshots snapshots;
    private TransactionSystem transactionSystem;
    private Replication replication;
    private ReentrantLock mergeIndicesLock = new ReentrantLock();
    private ExecutorService mergeThreadPool;
    private int docChangesSinceLastMerge = 0;
    private OceanCategoryConfig config;

    public Category(String name, OceanCategoryConfig config, File categoryDirectory, TransactionSystem transactionSystem) throws Exception,
        ParseException, CategoryException, IOException {
      this.name = name;
      this.config = config;
      this.transactionSystem = transactionSystem;
      this.indicesDirectory = new File(categoryDirectory, "indices");
      this.indicesDirectory.mkdirs();
      mergeThreadPool = Executors.newSingleThreadExecutor();
      Long highestIndexId = getHighestID(indicesDirectory);
      indexSequence = new LongSequence(highestIndexId + 1, 1);
      snapshots = new Snapshots(this);
      replication = transactionSystem.getReplication();

      IndexSchema schema = null;
      Long snapshotId = null;
      BigDecimal id = null;
      List<IndexSnapshot> indexSnapshots = null;
      SnapshotInfo snapshotInfo = Snapshot.loadHighestSnapshotInfo(indicesDirectory);
      if (snapshotInfo != null) {
        id = snapshotInfo.getId();
        snapshotId = snapshotInfo.getSnapshotId();
        assert snapshotId == replication.getHighestId();
        schema = getIndexSchema(snapshotInfo.getSchemaVersion());
        loadDiskIndices(snapshotId, indices, schema);
        indexSnapshots = new ArrayList<IndexSnapshot>();
        List<Long> snapshotIds = new LinkedList<Long>();
        for (IndexInfo indexInfo : snapshotInfo.getIndexInfos()) {
          if (indexInfo.getType().equals("disk")) {
            DiskIndex diskIndex = (DiskIndex) indices.get(indexInfo.getId());
            IndexSnapshot indexSnapshot = diskIndex.getIndexSnapshot(snapshotInfo.getSnapshotId());
            indexSnapshots.add(indexSnapshot);
            snapshotIds.add(indexSnapshot.getHighestSnapshotId());
          }
        }
        Long highestDiskIndexSnapshotID = Collections.max(snapshotIds);
        List<RamIndexSnapshot> ramIndexSnapshots = runTransactionsNotInIndex(highestDiskIndexSnapshotID);

        // TODO: verify all snapshots have same id
        indexSnapshots.addAll(ramIndexSnapshots);
        List<Long> documentIds = new ArrayList<Long>(indexSnapshots.size());
        for (IndexSnapshot indexSnapshot : indexSnapshots) {
          documentIds.add(indexSnapshot.getHighestDocumentId());
        }
        Long highestDocumentID = Collections.max(documentIds);
        int serverNumber = transactionSystem.serverNumber;
        Long documentSequenceId = Util.getNextServerSequence(highestDocumentID, serverNumber);
        documentSequence = new LongSequence(documentSequenceId, 100);
      } else {
        snapshotId = new Long(1);
        id = new BigDecimal(snapshotId.toString());
        schema = getIndexSchema();
        documentSequence = new LongSequence(transactionSystem.serverNumber, 100);
      }
      WriteableMemoryIndex writeableMemoryIndex = newWriteableMemoryIndex(schema);
      MemoryIndexSnapshot writeableSnapshot = writeableMemoryIndex.setSnapshot(snapshotId, null, schema);
      Snapshot snapshot = new Snapshot(id, writeableSnapshot, indexSnapshots, this);
      snapshots.add(snapshot);
    }
    
    public Replication getReplication() {
      return transactionSystem.getReplication();
    }
    
    public OceanCategoryConfig getConfig() {
      return config;
    }

    public Indices getIndices() {
      return indices;
    }

    public Snapshots getSnapshots() {
      return snapshots;
    }

    public static class Snapshots {
      private ConcurrentTreeMap<BigDecimal,Snapshot> snapshotMap = new ConcurrentTreeMap<BigDecimal,Snapshot>();
      private Category category;

      public Snapshots(Category category) {
        this.category = category;
      }

      // public SnapshotInfo getHighestSnapshotInfo() {

      // }

      public Snapshot get(long snapshotId) {
        Long head = snapshotId + 1;
        SortedMap<BigDecimal,Snapshot> subMap = snapshotMap.subMap(new BigDecimal(snapshotId), new BigDecimal(head));
        return subMap.get(subMap.lastKey());
      }

      public boolean contains(BigDecimal id) {
        return snapshotMap.containsKey(id);
      }

      public boolean contains(Long snapshotId) {
        Long head = snapshotId + 1;
        SortedMap<BigDecimal,Snapshot> subMap = snapshotMap.subMap(new BigDecimal(snapshotId), new BigDecimal(head));
        return subMap.size() > 0;
      }

      public boolean containsIndex(long indexid) {
        for (Snapshot snapshot : snapshotMap.values()) {
          if (snapshot.containsIndex(indexid))
            return true;
        }
        return false;
      }

      private File getFile(BigDecimal id) {
        String fileName = Snapshot.getFileName(id);
        return new File(category.indicesDirectory, fileName);
      }

      private void remove(Snapshot snapshot) {
        snapshotMap.remove(snapshot.getId());
        File file = getFile(snapshot.getId());
        file.delete();
      }

      public Snapshot getLatestSnapshot() {
        return snapshotMap.lastValue();
      }

      private void add(Snapshot snapshot) throws IOException {
        BigDecimal id = snapshot.getId();
        SnapshotInfo snapshotInfo = snapshot.getSnapshotInfo();
        File file = getFile(id);
        snapshotInfo.writeTo(file);
        snapshotMap.put(id, snapshot);
        SortedMap<BigDecimal,Snapshot> headMap = snapshotMap.headMap(id);
        for (Snapshot removeSnapshot : headMap.values()) {
          remove(removeSnapshot);
        }
      }
    }

    public static class Indices {
      private Category category;
      private Map<Long,Index> indexMap = new HashMap<Long,Index>();

      public Indices(Category category) {
        this.category = category;
      }

      public Index get(long id) {
        return indexMap.get(id);
      }

      public void add(Index index) {
        indexMap.put(index.getId(), index);
      }

      // TODO: cleanup of indices needs to iterate over the snapshots for refs
      // and delete
      public void cleanup() throws IOException {
        Snapshots snapshots = category.getSnapshots();
        for (File file : category.indicesDirectory.listFiles()) {
          if (file.isDirectory()) {
            Long indexid = new Long(file.getName());
            if (!snapshots.containsIndex(indexid)) {
              // TODO: if snapshot lock is too old then delete anyways
              if (!DiskIndex.hasSnapshotLock(file)) {
                FileUtils.deleteDirectory(file);
                indexMap.remove(indexid);
              }
            }
          }
        }
      }
    }

    /**
     * Runs the transactions from the transaction log that are not already in
     * Lucene indices
     * 
     * @param startSnapshotId
     * @return loaded ram snapshots
     * @throws Exception
     * @throws CategoryException
     * @throws IOException
     */
    private List<RamIndexSnapshot> runTransactionsNotInIndex(Long startSnapshotId) throws Exception, CategoryException, IOException {
      log.info("startSnapshotId: " + startSnapshotId);
      long indexId = indexSequence.getAndIncrement();
      RAMDirectory ramDirectory = new RAMDirectory();
      ExecutorService threadPool = transactionSystem.getCommitThreadPool();
      IndexCreator indexCreator = new IndexCreator(ramDirectory, Long.MAX_VALUE, 4, threadPool);
      BlockingQueue<IndexCreator.Add> addQueue = new ArrayBlockingQueue<IndexCreator.Add>(4000, true);
      List<Deletes> deletesList = new ArrayList<Deletes>();
      indexCreator.start(getDefaultAnalyzer(), addQueue);
      IndexSchema schema = null;
      List<RAMDirectory> ramDirectories = new ArrayList<RAMDirectory>();
      SlaveBatchIterator iterator = replication.getSlaveBatchIterator(name, startSnapshotId);
      byte[] buffer = new byte[1024*64];
      while (iterator.hasNext()) {
        SlaveBatch slaveBatch = iterator.next(true, true, buffer);
        Analyzer analyzer = slaveBatch.getAnalyzer();
        schema = slaveBatch.getIndexSchema();
        if (slaveBatch.hasDocuments()) {
          Documents documents = slaveBatch.getDocuments();
          for (Document document : documents) {
            addQueue.add(new IndexCreator.Add(document, schema));
          }
        } else if (slaveBatch.hasRAMDirectory()) {
          ramDirectories.add(slaveBatch.getRamDirectory());
        }
        if (slaveBatch.hasDeletes()) {
          deletesList.add(slaveBatch.getDeletes());
        }
      }
      indexCreator.create();
      ramDirectories.add(ramDirectory);
      Long snapshotId = replication.getHighestId();
      List<RamIndexSnapshot> indexSnapshots = new ArrayList<RamIndexSnapshot>(ramDirectories.size());
      for (RAMDirectory rd : ramDirectories) {
        RamIndex ramIndex = new RamIndex(indexId, snapshotId, deletesList, rd, Category.this, schema);
        indices.add(ramIndex);
        RamIndexSnapshot indexSnapshot = (RamIndexSnapshot) ramIndex.getIndexSnapshot(snapshotId);
        indexSnapshots.add(indexSnapshot);
      }
      // TODO: run maybe merge here
      return indexSnapshots;
    }

    public static class CategoryException extends Exception {
      public CategoryException(String message) {
        super(message);
      }

      public CategoryException(String message, Exception exception) {
        super(message, exception);
      }
    }

    private void loadDiskIndices(Long snapshotId, Indices indices, IndexSchema schema) throws Exception, IOException {
      for (File file : indicesDirectory.listFiles()) {
        if (file.isDirectory()) {
          Long indexID = new Long(file.getName());
          try {
            DiskIndex diskIndex = new DiskIndex(indexID, file, this, snapshotId, schema);
            indices.add(diskIndex);
          } catch (IndexException indexException) {
            log.severe("index not ready, deleting: " + file.getAbsolutePath());
            FileUtils.deleteDirectory(file);
          }
        }
      }
    }

    private long getHighestID(File directory) {
      TreeSet<Long> sortedSet = new TreeSet<Long>();
      File[] files = directory.listFiles();
      for (File file : files) {
        if (file.isDirectory()) {
          Long id = new Long(file.getName());
          sortedSet.add(id);
        }
      }
      Long value = sortedSet.last();
      if (value == null)
        return 0;
      else
        return value.longValue();
    }

    public String getName() {
      return name;
    }

    public IndexSchema getIndexSchema(float schemaVersion) throws Exception {
      return transactionSystem.getLoader().getIndexSchema(name, schemaVersion, new Timeout(1000));
    }

    /**
     * 
     * @return latest schema
     * @throws Exception
     */
    public IndexSchema getIndexSchema() throws Exception {
      return transactionSystem.getLoader().getIndexSchema(name, new Timeout(1000));
    }

    public Analyzer getDefaultAnalyzer() throws Exception {
      return transactionSystem.getLoader().getIndexSchema(name, new Timeout(1000)).getAnalyzer();
    }

    public TransactionSystem getTransactionSystem() {
      return transactionSystem;
    }

    public MasterBatch createMasterBatch() throws Exception {
      IndexSchema indexSchema = getIndexSchema();
      return new MasterBatch(this, indexSchema);
    }

    public class MaybeMergeIndices implements Runnable {
      public MaybeMergeIndices() {
      }

      public void run() {
        mergeIndicesLock.lock();
        try {
          Snapshot snapshot = snapshots.getLatestSnapshot();
          maybeMergeWriteable(snapshot);
          maybeMergeRamIndices(snapshot);
          maybeMergeDiskIndices(snapshot);
        } catch (Throwable throwable) {
          log.log(Level.SEVERE, "", throwable);
        } finally {
          mergeIndicesLock.unlock();
        }
      }

      private void maybeMergeRamIndices(Snapshot snapshot) throws Exception {
        long size = 0;
        List<RamIndexSnapshot> ramIndexSnapshots = snapshot.getRamIndexSnapshots();
        for (RamIndexSnapshot ramIndexSnapshot : ramIndexSnapshots) {
          RamIndex ramIndex = (RamIndex) ramIndexSnapshot.getIndex();
          size += ramIndex.getSize();
        }
        if (size > MAX_RAM_INDICES_SIZE) {
          executeMerge(ramIndexSnapshots, snapshot);
        }
      }

      private void maybeMergeDiskIndices(Snapshot snapshot) throws Exception {
        Long snapshotId = snapshot.getSnapshotId();
        List<IndexSnapshot> indexSnapshotsToMerge = new ArrayList<IndexSnapshot>();
        for (DiskIndex diskIndex : snapshot.getDiskIndices()) {
          DiskIndexSnapshot indexSnapshot = (DiskIndexSnapshot) diskIndex.getIndexSnapshot(snapshotId);
          // TODO: create config attribute for percentage of deleted docs
          if (diskIndex.hasTooManyDeletedDocs(0.5) && !diskIndex.hasSnapshotLock()) {
            indexSnapshotsToMerge.add(indexSnapshot);
          }
        }
        executeMerge(indexSnapshotsToMerge, snapshot);
      }

      /**
       * converts current memorywriteableindex to a ramindex
       * 
       * @param snapshot
       * @throws Exception
       */
      private void maybeMergeWriteable(Snapshot snapshot) throws Exception {
        MemoryIndexSnapshot writeableIndexSnapshot = snapshot.getWriteableSnapshot();
        int maxDoc = writeableIndexSnapshot.getIndexReader().maxDoc();
        if (maxDoc >= MEMORY_INDEX_MAX_DOCS) {
          commitLock.lock();
          try {
            long indexId = indexSequence.getAndIncrement();
            RamIndex ramIndex = new RamIndex(indexId, writeableIndexSnapshot);
            indices.add(ramIndex);
            Snapshot currentSnapshot = snapshots.getLatestSnapshot();
            List<Long> removeIndexIds = new ArrayList<Long>();
            removeIndexIds.add(writeableIndexSnapshot.getIndex().getId());
            WriteableMemoryIndex newWriteableMemoryIndex = newWriteableMemoryIndex(snapshot.getIndexSchema());
            MemoryIndexSnapshot newMemoryIndexSnapshot = newWriteableMemoryIndex.setSnapshot(snapshot.getSnapshotId(), null, snapshot
                .getIndexSchema());
            Snapshot newSnapshot = currentSnapshot.createMinor(removeIndexIds, newMemoryIndexSnapshot, ramIndex.getLatestIndexSnapshot());
            snapshots.add(newSnapshot);
          } finally {
            commitLock.unlock();
          }
        }
      }

      private void executeMerge(List<? extends IndexSnapshot> indexSnapshots, Snapshot snapshot) throws Exception {
        Long snapshotId = snapshot.getSnapshotId();
        Long indexID = indexSequence.getAndIncrement();
        File diskIndexFileDirectory = new File(indicesDirectory, indexID.toString());
        diskIndexFileDirectory.mkdirs();
        IndexSchema schema = snapshot.getIndexSchema();
        DiskIndex newDiskIndex = new DiskIndex(indexID, diskIndexFileDirectory, indexSnapshots, Category.this, schema);
        indices.add(newDiskIndex);
        commitLock.lock();
        try {
          List<SlaveBatch> deleteOnlySlaveBatches = new ArrayList<SlaveBatch>();
          Snapshot currentSnapshot = snapshots.getLatestSnapshot();
          Long latestSnapshotId = currentSnapshot.getSnapshotId();
          if (!snapshotId.equals(latestSnapshotId)) {
            SlaveBatchIterator iterator = replication.getSlaveBatchIterator(name, snapshotId);
            byte[] buffer = new byte[1024*16];
            while (iterator.hasNext()) {
              SlaveBatch slaveBatch = iterator.next(false, true, buffer);
              deleteOnlySlaveBatches.add(slaveBatch);
            }
          }
          IndexSnapshot newIndexSnapshot = newDiskIndex.initialize(latestSnapshotId, deleteOnlySlaveBatches, Category.this, snapshot.getIndexSchema());
          List<Long> removeIndexIds = new ArrayList<Long>();
          for (IndexSnapshot indexSnapshot : indexSnapshots) {
            Index index = indexSnapshot.getIndex();
            removeIndexIds.add(index.getId());
          }
          Snapshot newSnapshot = currentSnapshot.createMinor(removeIndexIds, newIndexSnapshot);
          snapshots.add(newSnapshot);
        } finally {
          commitLock.unlock();
        }
      }
    }

    /**
     * Commits batch to transaction log
     * 
     * @param batch
     * @return
     * @throws Exception
     * @throws IOException
     * @throws CategoryException
     */
    CommitResult commitBatch(Batch batch) throws Exception, IOException, CategoryException {
      batch.close();
      commitLock.lock();
      try {
        Long snapshotId = null;
        IndexSchema schema = batch.getIndexSchema();
        if (batch instanceof SlaveBatch) {
          snapshotId = ((SlaveBatch) batch).getId();
        } else {
          snapshotId = replication.getNextId();
          if (batch.hasDocuments()) {
            Documents documents = batch.getDocuments();
            for (Document document : documents) {
              Long documentId = documentSequence.getAndIncrement();
              Util.setId(documentId, document, schema);
              Util.setSnapshotId(snapshotId, document, schema);
              Util.setSchemaVersion(schema.getVersion(), document, schema);
            }
            if (documents.size() >= MEMORY_INDEX_MAX_DOCS) {
              RAMDirectory ramDirectory = createRamDirectory(batch.getIndexSchema(), documents);
              batch.setRAMDirectory(ramDirectory);
            }
          }
        }
        ExecutorService threadPool = transactionSystem.getCommitThreadPool();
        Snapshot currentSnapshot = snapshots.getLatestSnapshot();
        MemoryIndexSnapshot writeableIndexSnapshot = currentSnapshot.getWriteableSnapshot();
        WriteableMemoryIndex writeableMemoryIndex = (WriteableMemoryIndex) writeableIndexSnapshot.getIndex();
        List<Index> nonWriteableIndices = currentSnapshot.getDeleteOnlyIndices();
        Transaction transaction = null;
        CommitResult commitResult = null;
        try {
          Replication replication = transactionSystem.getReplication();
          Long previousId = replication.getPreviousId(snapshotId);
          transaction = new Transaction(snapshotId, previousId, batch, writeableMemoryIndex, nonWriteableIndices, threadPool, this, schema);
          commitResult = transaction.getCommitResult();
        } catch (Exception exception) {
          log.log(Level.SEVERE, "transaction failed");
          throw new CategoryException("transaction failed", exception);
        }
        List<IndexSnapshot> indexSnapshots = new ArrayList<IndexSnapshot>(nonWriteableIndices.size() + 1);
        for (Index index : nonWriteableIndices) {
          indexSnapshots.add(index.getIndexSnapshot(snapshotId));
        }
        for (IndexSnapshot newIndexSnapshot : transaction.getNewIndexSnapshots()) {
          indices.add(newIndexSnapshot.getIndex());
          indexSnapshots.add(newIndexSnapshot);
        }
        indexSnapshots.add(writeableMemoryIndex.getIndexSnapshot(snapshotId));
        Snapshot newSnapshot = new Snapshot(snapshotId, 0, this, writeableIndexSnapshot, indexSnapshots, schema);
        snapshots.add(newSnapshot);
        docChangesSinceLastMerge += commitResult.getNumDocChanges();
        int writeableMaxDoc = writeableMemoryIndex.getLatestIndexSnapshot().getIndexReader().maxDoc();
        if (docChangesSinceLastMerge > MAYBE_MERGE_DOC_CHANGES || writeableMaxDoc >= MEMORY_INDEX_MAX_DOCS) {
          mergeThreadPool.submit(new MaybeMergeIndices());
          docChangesSinceLastMerge = 0;
        }
        // TODO: reset document sequence

        return commitResult;
      } finally {
        commitLock.unlock();
      }
    }
    
    RAMDirectory createRamDirectory(IndexSchema indexSchema, Documents documents) throws Exception {
      RAMDirectory ramDirectory = new RAMDirectory();
      ExecutorService threadPool = transactionSystem.getCommitThreadPool();
      IndexCreator indexCreator = new IndexCreator(ramDirectory, Long.MAX_VALUE, 4, threadPool);
      BlockingQueue<IndexCreator.Add> addQueue = new ArrayBlockingQueue<IndexCreator.Add>(1000, true);
      indexCreator.start(indexSchema.getAnalyzer(), addQueue);
      for (Document document : documents) {
        addQueue.add(new IndexCreator.Add(document, indexSchema));
      }
      indexCreator.create();
      return ramDirectory;
    }

    long getNextIndexId() {
      return indexSequence.getAndIncrement();
    }

    private WriteableMemoryIndex newWriteableMemoryIndex(IndexSchema indexSchema) throws Exception {
      Long indexID = indexSequence.getAndIncrement();
      Analyzer defaultAnalyzer = getDefaultAnalyzer();
      WriteableMemoryIndex writeableMemoryIndex = new WriteableMemoryIndex(indexID, indexSchema, this);
      indices.add(writeableMemoryIndex);
      return writeableMemoryIndex;
    }
  }
}
