package org.apache.ocean.main;

import java.io.File;
import java.io.IOException;
import java.math.BigDecimal;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeSet;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;

import org.apache.commons.io.FileUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.store.RAMDirectory;
import org.apache.ocean.main.Batch.MasterBatch;
import org.apache.ocean.main.Batch.SlaveBatch;
import org.apache.ocean.main.DiskIndex.DiskIndexSnapshot;
import org.apache.ocean.main.Index.IndexException;
import org.apache.ocean.main.Index.IndexSnapshot;
import org.apache.ocean.main.IndexDocuments.IndexDocument;
import org.apache.ocean.main.RamIndex.RamIndexSnapshot;
import org.apache.ocean.main.Snapshot.SnapshotInfo;
import org.apache.ocean.main.Snapshot.SnapshotInfo.IndexInfo;
import org.apache.ocean.main.Transaction.CommitResult;
import org.apache.ocean.main.TransactionSystem.Category.CategoryException;
import org.apache.ocean.main.WriteableMemoryIndex.MemoryIndexSnapshot;
import org.apache.ocean.main.replication.Replication;
import org.apache.ocean.main.replication.RawLogFile.Load;
import org.apache.ocean.main.replication.Replication.ReplicationException;
import org.apache.ocean.main.search.cache.CacheKey;
import org.apache.ocean.main.util.LongSequence;
import org.apache.ocean.main.util.Timeout;
import org.apache.ocean.main.util.Util;
import org.apache.ocean.util.ConcurrentTreeMap;
import org.jdom.Element;

/**
 * Main class for search transaction system
 * 
 */
// TODO: implement name
// TODO: add LuceneQueryOptimizer from solr
// TODO: use only serial mergescheduler in indexwriters
// TODO: create batch command that reindexes existing documents probably using new schema
// TODO: aggregation w/field collapsing using fieldcollapsequeue
// TODO: finish facet keys
// TODO: look into integrating checksum input streams
// TODO: diskindex close
// TODO: have name server maintain leases with node and merge servers
// TODO: turn off use of compound file format
// TODO: field caches need to use primitive types
// TODO: writeablememoryindex needs to dynamically update field cache using binary search
public class TransactionSystem {
	public static Logger log = Logger.getLogger(TransactionSystem.class.getName());
	public static final int MEMORY_INDEX_MAX_DOCS = 100;
	public static final int MAYBE_MERGE_DOC_CHANGES = 2000;
	public static final int MAX_RAM_INDICES_SIZE = 1024 * 1024 * 30;
	private ConcurrentHashMap<String, Category> categoryMap = new ConcurrentHashMap<String, Category>();
	private File rootDirectory;
	private File categoriesDirectory;
	private ThreadPoolExecutor commitThreadPool;
	private Replication replication;
	private int serverNumber;
	private IndexSchema.Loader schemaLoader;
	private Config config;

	public TransactionSystem(Config config, int serverNumber, Replication replication, File rootDirectory, IndexSchema.Loader schemaLoader) throws Exception, IOException, CategoryException {
		this.config = config;
		this.serverNumber = serverNumber;
		this.replication = replication;
		this.rootDirectory = rootDirectory;
		this.categoriesDirectory = new File(rootDirectory, "categories");
		this.categoriesDirectory.mkdirs();
		commitThreadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
		for (File categoryDirectory : categoriesDirectory.listFiles()) {
			String name = categoryDirectory.getName();
			Category.Config categoryConfig = new Category.Config(null, getConfig());
			Category category = new Category(name, categoryConfig, categoryDirectory, this);
			categoryMap.put(name, category);
		}
	}
  
	public static class Config {
		private Integer docSetCacheMax;
    private Integer facetCacheMax;
		
		public Integer getFacetCacheMax() {
			return facetCacheMax;
		}
		
		public Integer getDocSetCacheMax() {
			return docSetCacheMax;
		}
	}
	
	public Config getConfig() {
		return config;
	}

	public IndexSchema.Loader getIndexSchemaLoader() {
		return schemaLoader;
	}

	public List<Category> getCategories() {
		return new ArrayList<Category>(categoryMap.values());
	}

	public Replication getReplication() {
		return replication;
	}

	public ExecutorService getCommitThreadPool() {
		return commitThreadPool;
	}

	public Category newCategory(String name) throws IOException, CategoryException, Exception {
		if (categoryMap.containsKey(name)) {
			throw new IllegalArgumentException("name: " + name + " already exists");
		}
		File categoryDirectory = new File(categoriesDirectory, name);
		categoryDirectory.mkdirs();
		Category.Config categoryConfig = new Category.Config(null, getConfig());
		Category category = new Category(name, categoryConfig, categoryDirectory, this);
		return category;
	}

	public Category getCategory(String name) {
		return categoryMap.get(name);
	}

	public static class Category {
		private File indicesDirectory;
		private String name;
		private LongSequence documentSequence;
		private LongSequence indexSequence;
		private Indices indices = new Indices(this);
		private ConcurrentTreeMap<Long, LargeBatch> largeBatchMap = new ConcurrentTreeMap<Long, LargeBatch>();
		private ReentrantLock commitLock = new ReentrantLock();
		private Snapshots snapshots;
		private TransactionSystem transactionSystem;
		private Replication replication;
		private ReentrantLock mergeIndicesLock = new ReentrantLock();
		private ExecutorService mergeThreadPool;
		private int docChangesSinceLastMerge = 0;
		private Category.Config config;

		public Category(String name, Category.Config config, File categoryDirectory, TransactionSystem transactionSystem) throws Exception, ParseException, CategoryException, IOException {
			this.name = name;
			this.config = config;
			this.transactionSystem = transactionSystem;
			this.indicesDirectory = new File(categoryDirectory, "indices");
			this.indicesDirectory.mkdirs();
			mergeThreadPool = Executors.newSingleThreadExecutor();
			Long highestIndexId = getHighestID(indicesDirectory);
			indexSequence = new LongSequence(highestIndexId + 1, 1);
			snapshots = new Snapshots(this);
			replication = transactionSystem.getReplication();
			
			IndexSchema.Category schemaCategory = null;
			Long snapshotId = null;
			BigDecimal id = null;
			List<IndexSnapshot> indexSnapshots = null;
			SnapshotInfo snapshotInfo = Snapshot.loadHighestSnapshotInfo(indicesDirectory);
			if (snapshotInfo != null) {
				id = snapshotInfo.getId();
				snapshotId = snapshotInfo.getSnapshotId();
				assert snapshotId == replication.getHighestId();
				schemaCategory = getCategoryIndexSchema(snapshotInfo.getSchemaVersion());
				loadDiskIndices(snapshotId, indices, schemaCategory);
				indexSnapshots = new ArrayList<IndexSnapshot>();
				List<Long> snapshotIds = new LinkedList<Long>();
				for (IndexInfo indexInfo : snapshotInfo.getIndexInfos()) {
					if (indexInfo.getType().equals("disk")) {
						DiskIndex diskIndex = (DiskIndex) indices.get(indexInfo.getId());
						IndexSnapshot indexSnapshot = diskIndex.getIndexSnapshot(snapshotInfo.getSnapshotId());
						indexSnapshots.add(indexSnapshot);
						snapshotIds.add(indexSnapshot.getHighestSnapshotId());
					}
				}
				Long highestDiskIndexSnapshotID = Collections.max(snapshotIds);
				List<RamIndexSnapshot> ramIndexSnapshots = runTransactionsNotInIndex(highestDiskIndexSnapshotID);
				
				// TODO: verify all snapshots have same id
				indexSnapshots.addAll(ramIndexSnapshots);
				List<Long> documentIds = new ArrayList<Long>(indexSnapshots.size());
				for (IndexSnapshot indexSnapshot : indexSnapshots) {
					documentIds.add(indexSnapshot.getHighestDocumentId());
				}
				Long highestDocumentID = Collections.max(documentIds);
				int serverNumber = transactionSystem.serverNumber;
				Long documentSequenceId = Util.getNextServerSequence(highestDocumentID, serverNumber);
				documentSequence = new LongSequence(documentSequenceId, 100);
			} else {
				snapshotId = new Long(1);
				id = new BigDecimal(snapshotId.toString());
				schemaCategory = getCategoryIndexSchema();
				documentSequence = new LongSequence(transactionSystem.serverNumber, 100);
			}			
			WriteableMemoryIndex writeableMemoryIndex = newWriteableMemoryIndex();
			MemoryIndexSnapshot writeableSnapshot = writeableMemoryIndex.setSnapshot(snapshotId, null, schemaCategory);
			Snapshot snapshot = new Snapshot(id, writeableSnapshot, indexSnapshots, this);
			snapshots.add(snapshot);
		}
		
		public static class Config {
			private Integer docSetCacheMax;
			private Integer facetCacheMax;
			private TransactionSystem.Config transactionSystemConfig;
      
			public Config(Element element, TransactionSystem.Config transactionSystemConfig) {
				this.transactionSystemConfig = transactionSystemConfig;
			}
			
			public int getFacetCacheMax(int defaultValue) {
				if (facetCacheMax == null) {
					if (transactionSystemConfig.getFacetCacheMax() != null) {
						return transactionSystemConfig.getFacetCacheMax();
					}
					return defaultValue;
				}
				return facetCacheMax;
			}
			
			public int getDocSetCacheMax(int defaultValue) {
				if (docSetCacheMax == null) {
					if (transactionSystemConfig.getDocSetCacheMax() != null) {
						return transactionSystemConfig.getDocSetCacheMax();
					}
					return defaultValue;
				}
				return docSetCacheMax;
			}
		}
		
		public Config getConfig() {
			return config;
		}

		public Indices getIndices() {
			return indices;
		}

		public Snapshots getSnapshots() {
			return snapshots;
		}

		public static class Snapshots {
			private ConcurrentTreeMap<BigDecimal, Snapshot> snapshotMap = new ConcurrentTreeMap<BigDecimal, Snapshot>();
			private Category category;

			public Snapshots(Category category) {
				this.category = category;
			}

			// public SnapshotInfo getHighestSnapshotInfo() {

			// }

			public Snapshot get(long snapshotId) {
				Long head = snapshotId + 1;
				SortedMap<BigDecimal, Snapshot> subMap = snapshotMap.subMap(new BigDecimal(snapshotId), new BigDecimal(head));
				return subMap.get(subMap.lastKey());
			}

			public boolean contains(BigDecimal id) {
				return snapshotMap.containsKey(id);
			}

			public boolean contains(Long snapshotId) {
				Long head = snapshotId + 1;
				SortedMap<BigDecimal, Snapshot> subMap = snapshotMap.subMap(new BigDecimal(snapshotId), new BigDecimal(head));
				return subMap.size() > 0;
			}

			public boolean containsIndex(long indexid) {
				for (Snapshot snapshot : snapshotMap.values()) {
					if (snapshot.containsIndex(indexid))
						return true;
				}
				return false;
			}

			private File getFile(BigDecimal id) {
				String fileName = Snapshot.getFileName(id);
				return new File(category.indicesDirectory, fileName);
			}

			private void remove(Snapshot snapshot) {
				snapshotMap.remove(snapshot.getId());
				File file = getFile(snapshot.getId());
				file.delete();
			}

			public Snapshot getLatestSnapshot() {
				return snapshotMap.lastValue();
			}

			private void add(Snapshot snapshot) throws IOException {
				BigDecimal id = snapshot.getId();
				SnapshotInfo snapshotInfo = snapshot.getSnapshotInfo();
				File file = getFile(id);
				snapshotInfo.writeTo(file);
				snapshotMap.put(id, snapshot);
				SortedMap<BigDecimal, Snapshot> headMap = snapshotMap.headMap(id);
				for (Snapshot removeSnapshot : headMap.values()) {
					remove(removeSnapshot);
				}
			}
		}

		public static class Indices {
			private Category category;
			private Map<Long, Index> indexMap = new HashMap<Long, Index>();

			public Indices(Category category) {
				this.category = category;
			}

			public Index get(long id) {
				return indexMap.get(id);
			}

			public void add(Index index) {
				indexMap.put(index.getId(), index);
			}
      // TODO: cleanup of indices needs to iterate over the snapshots for refs and delete
			public void cleanup() throws IOException {
				Snapshots snapshots = category.getSnapshots();
				for (File file : category.indicesDirectory.listFiles()) {
					if (file.isDirectory()) {
						Long indexid = new Long(file.getName());
						if (!snapshots.containsIndex(indexid)) {
							// TODO: if snapshot lock is too old then delete anyways
							if (!DiskIndex.hasSnapshotLock(file)) {
								FileUtils.deleteDirectory(file);
								indexMap.remove(indexid);
							}
						}
					}
				}
			}
		}

		private List<RamIndexSnapshot> runTransactionsNotInIndex(Long startSnapshotId) throws Exception, CategoryException, IOException {
			log.info("startSnapshotId: " + startSnapshotId);
			Long lookupSnapshotId = startSnapshotId;
			long indexId = indexSequence.getAndIncrement();
			RAMDirectory ramDirectory = new RAMDirectory();
			ExecutorService threadPool = transactionSystem.getCommitThreadPool();
			IndexCreator indexCreator = new IndexCreator(ramDirectory, Long.MAX_VALUE, 4, threadPool);
			BlockingQueue<IndexCreator.Add> addQueue = new ArrayBlockingQueue<IndexCreator.Add>(4000, true);
			List<Deletes> deletesList = new ArrayList<Deletes>();
			indexCreator.start(getDefaultAnalyzer(), addQueue);
			IndexSchema.Category schemaCategory = null;
			List<RAMDirectory> ramDirectories = new ArrayList<RAMDirectory>();
			for (;;) {
				Batch.SlaveBatch slaveBatch = replication.getBatchGreaterOrEqualTo(name, lookupSnapshotId);
				if (slaveBatch == null)
					break;
				Analyzer analyzer = slaveBatch.getAnalyzer();
				schemaCategory = slaveBatch.getIndexSchemaCategory();
				if (slaveBatch.hasDocuments()) {
					IndexDocuments documents = slaveBatch.getDocuments();
					for (IndexDocument indexDocument : documents) {
						addQueue.add(new IndexCreator.Add(indexDocument, schemaCategory));
					}
				} else if (slaveBatch.hasRAMDirectory()) {
					ramDirectories.add(slaveBatch.getRamDirectory());
				}
				if (slaveBatch.hasDeletes()) {
					deletesList.add(slaveBatch.getDeletes());
				}
				lookupSnapshotId = slaveBatch.getId() + 1;
			}
			indexCreator.create();
			ramDirectories.add(ramDirectory);
			Long snapshotId = replication.getHighestId();
			List<RamIndexSnapshot> indexSnapshots = new ArrayList<RamIndexSnapshot>(ramDirectories.size());
			for (RAMDirectory rd : ramDirectories) {
				RamIndex ramIndex = new RamIndex(indexId, snapshotId, deletesList, rd, Category.this, schemaCategory);
				indices.add(ramIndex);
				RamIndexSnapshot indexSnapshot = (RamIndexSnapshot)ramIndex.getIndexSnapshot(snapshotId);
				indexSnapshots.add(indexSnapshot);
			}
			return indexSnapshots;
		}

		public static class CategoryException extends Exception {
			public CategoryException(String message) {
				super(message);
			}

			public CategoryException(String message, Exception exception) {
				super(message, exception);
			}
		}

		private void loadDiskIndices(Long snapshotId, Indices indices, IndexSchema.Category schemaCategory) throws Exception, IOException {
			for (File file : indicesDirectory.listFiles()) {
				if (file.isDirectory()) {
					Long indexID = new Long(file.getName());
					try {
						DiskIndex diskIndex = new DiskIndex(indexID, file, this, snapshotId, schemaCategory);
						indices.add(diskIndex);
					} catch (IndexException indexException) {
						log.severe("index not ready, deleting: " + file.getAbsolutePath());
						FileUtils.deleteDirectory(file);
					}
				}
			}
		}

		private long getHighestID(File directory) {
			TreeSet<Long> sortedSet = new TreeSet<Long>();
			File[] files = directory.listFiles();
			for (File file : files) {
				if (file.isDirectory()) {
					Long id = new Long(file.getName());
					sortedSet.add(id);
				}
			}
			Long value = sortedSet.last();
			if (value == null)
				return 0;
			else
				return value.longValue();
		}

		public String getName() {
			return name;
		}

		public IndexSchema.Category getCategoryIndexSchema() throws Exception {
			IndexSchema indexSchema = transactionSystem.getIndexSchemaLoader().getIndexSchema(new Timeout(1000));
			return indexSchema.getCategory(name);
		}

		public IndexSchema.Category getCategoryIndexSchema(long schemaVersion) throws Exception {
			IndexSchema indexSchema = transactionSystem.getIndexSchemaLoader().getIndexSchema(schemaVersion, new Timeout(1000));
			return indexSchema.getCategory(name);
		}

		public Analyzer getDefaultAnalyzer() throws Exception {
			IndexSchema indexSchema = transactionSystem.getIndexSchemaLoader().getIndexSchema(new Timeout(1000));
			return indexSchema.getCategory(name).getAnalyzer();
		}

		public TransactionSystem getTransactionSystem() {
			return transactionSystem;
		}

		public MasterBatch createMasterBatch(long schemaVersion) throws Exception {
			IndexSchema.Category schemaCategory = getCategoryIndexSchema(schemaVersion);
			return new MasterBatch(this, schemaCategory);
		}

		public LargeBatch createLargeBatch() throws Exception {
			long id = indexSequence.getAndIncrement();
			File directoryFile = new File(indicesDirectory, Long.toString(id));
			LargeBatch largeBatch = new LargeBatch(id, directoryFile, getDefaultAnalyzer(), 10.0, this);
			largeBatchMap.put(id, largeBatch);
			return largeBatch;
		}

		public LargeBatch getLargeBatch(long id) {
			return largeBatchMap.get(id);
		}

		public class MaybeMergeIndices implements Runnable {
			public MaybeMergeIndices() {
			}

			public void run() {
				mergeIndicesLock.lock();
				try {
					Snapshot snapshot = snapshots.getLatestSnapshot();
					maybeMergeWriteable(snapshot);
					maybeMergeRamIndices(snapshot);
					maybeMergeDiskIndices(snapshot);
				} catch (Throwable throwable) {
					log.log(Level.SEVERE, "", throwable);
				} finally {
					mergeIndicesLock.unlock();
				}
			}

			private void maybeMergeRamIndices(Snapshot snapshot) throws Exception {
				long size = 0;
				List<RamIndexSnapshot> ramIndexSnapshots = snapshot.getRamIndexSnapshots();
				for (RamIndexSnapshot ramIndexSnapshot : ramIndexSnapshots) {
					RamIndex ramIndex = (RamIndex) ramIndexSnapshot.getIndex();
					size += ramIndex.getSize();
				}
				if (size > MAX_RAM_INDICES_SIZE) {
					executeMerge(ramIndexSnapshots, snapshot);
				}
			}

			private void maybeMergeDiskIndices(Snapshot snapshot) throws Exception {
				Long snapshotId = snapshot.getSnapshotId();
				List<IndexSnapshot> indexSnapshotsToMerge = new ArrayList<IndexSnapshot>();
				for (DiskIndex diskIndex : snapshot.getDiskIndices()) {
					DiskIndexSnapshot indexSnapshot = (DiskIndexSnapshot) diskIndex.getIndexSnapshot(snapshotId);
					if (diskIndex.hasTooManyDeletedDocs(0.5) && !diskIndex.hasSnapshotLock()) {
						indexSnapshotsToMerge.add(indexSnapshot);
					}
				}
				executeMerge(indexSnapshotsToMerge, snapshot);
			}

			/**
			 * converts current memorywriteableindex to a ramindex
			 * 
			 * @param snapshot
			 * @throws Exception
			 */
			private void maybeMergeWriteable(Snapshot snapshot) throws Exception {
				MemoryIndexSnapshot writeableIndexSnapshot = snapshot.getWriteableSnapshot();
				int maxDoc = writeableIndexSnapshot.getIndexReader().maxDoc();
				if (maxDoc >= MEMORY_INDEX_MAX_DOCS) {
					commitLock.lock();
					try {
						long indexId = indexSequence.getAndIncrement();
						RamIndex ramIndex = new RamIndex(indexId, writeableIndexSnapshot);
						indices.add(ramIndex);
						Snapshot currentSnapshot = snapshots.getLatestSnapshot();
						List<Long> removeIndexIds = new ArrayList<Long>();
						removeIndexIds.add(writeableIndexSnapshot.getIndex().getId());
						WriteableMemoryIndex newWriteableMemoryIndex = newWriteableMemoryIndex();
						MemoryIndexSnapshot newMemoryIndexSnapshot = newWriteableMemoryIndex.setSnapshot(snapshot.getSnapshotId(), null, snapshot.getIndexSchemaCategory());
						Snapshot newSnapshot = currentSnapshot.createMinor(removeIndexIds, newMemoryIndexSnapshot, ramIndex.getLatestIndexSnapshot());
						snapshots.add(newSnapshot);
					} finally {
						commitLock.unlock();
					}
				}
			}

			private void executeMerge(List<? extends IndexSnapshot> indexSnapshots, Snapshot snapshot) throws Exception {
				Long snapshotID = snapshot.getSnapshotId();
				Long indexID = indexSequence.getAndIncrement();
				File diskIndexFileDirectory = new File(indicesDirectory, indexID.toString());
				diskIndexFileDirectory.mkdirs();
				IndexSchema.Category schemaCategory = snapshot.getIndexSchemaCategory();
				DiskIndex newDiskIndex = new DiskIndex(indexID, diskIndexFileDirectory, indexSnapshots, Category.this, schemaCategory);
				indices.add(newDiskIndex);
				commitLock.lock();
				try {
					List<SlaveBatch> deleteOnlySlaveBatches = null;
					Snapshot currentSnapshot = snapshots.getLatestSnapshot();
					Long latestSnapshotID = currentSnapshot.getSnapshotId();
					if (!snapshotID.equals(latestSnapshotID)) {
						EnumSet<Load> load = EnumSet.of(Load.DELETES);
						deleteOnlySlaveBatches = replication.getBatchesGreaterOrEqualTo(load, name, snapshotID);
					}
					IndexSnapshot newIndexSnapshot = newDiskIndex.initialize(latestSnapshotID, deleteOnlySlaveBatches, schemaCategory);
					List<Long> removeIndexIds = new ArrayList<Long>();
					for (IndexSnapshot indexSnapshot : indexSnapshots) {
						Index index = indexSnapshot.getIndex();
						removeIndexIds.add(index.getId());
					}
					Snapshot newSnapshot = currentSnapshot.createMinor(removeIndexIds, newIndexSnapshot);
					snapshots.add(newSnapshot);
				} finally {
					commitLock.unlock();
				}
			}
		}

		void saveLargeBatchMaster(LargeBatch largeBatch) throws ReplicationException, IndexException, IOException, Exception {
			File directoryFile = largeBatch.getDirectoryFile();
			//DiskIndex diskIndex = new DiskIndex(largeBatch.getID(), directoryFile, getDefaultAnalyzer(), this);
			//indices.add(diskIndex);
		}

		CommitResult commitBatch(Batch batch) throws Exception, IOException, CategoryException {
			batch.close();
			commitLock.lock();
			try {
				Long snapshotId = null;
				IndexSchema.Category schemaCategory = batch.getIndexSchemaCategory();
				if (batch instanceof SlaveBatch) {
					snapshotId = ((SlaveBatch) batch).getId();
				} else {
					snapshotId = replication.getNextId();
					if (batch.hasDocuments()) {
						IndexDocuments documents = batch.getDocuments();
						for (IndexDocument document : documents) {
							Long documentId = documentSequence.getAndIncrement();
							document.setID(documentId);
							document.setSnapshotID(snapshotId);
							document.setSchemaVersion(schemaCategory.getIndexSchema().getVersion());
						}
						if (documents.size() >= MEMORY_INDEX_MAX_DOCS) {
							RAMDirectory ramDirectory = createRamDirectory(batch.getIndexSchemaCategory(), documents);
							batch.setRAMDirectory(ramDirectory);
						}
					}
				}
				ExecutorService threadPool = transactionSystem.getCommitThreadPool();
				Snapshot currentSnapshot = snapshots.getLatestSnapshot();
				MemoryIndexSnapshot writeableIndexSnapshot = currentSnapshot.getWriteableSnapshot();
				WriteableMemoryIndex writeableMemoryIndex = (WriteableMemoryIndex) writeableIndexSnapshot.getIndex();
				List<Index> nonWriteableIndices = currentSnapshot.getDeleteOnlyIndices();
				Transaction transaction = null;
				CommitResult commitResult = null;
				try {
					Replication replication = transactionSystem.getReplication();
					Long previousId = replication.getPreviousId(snapshotId);
					transaction = new Transaction(snapshotId, previousId, batch, writeableMemoryIndex, nonWriteableIndices, threadPool, this, schemaCategory);
					commitResult = transaction.getCommitResult();
				} catch (Exception exception) {
					log.log(Level.SEVERE, "transaction failed");
					throw new CategoryException("transaction failed", exception);
				}
				List<IndexSnapshot> indexSnapshots = new ArrayList<IndexSnapshot>(nonWriteableIndices.size() + 1);
				for (Index index : nonWriteableIndices) {
					indexSnapshots.add(index.getIndexSnapshot(snapshotId));
				}
				for (IndexSnapshot newIndexSnapshot : transaction.getNewIndexSnapshots()) {
					indices.add(newIndexSnapshot.getIndex());
					indexSnapshots.add(newIndexSnapshot);
				}
				indexSnapshots.add(writeableMemoryIndex.getIndexSnapshot(snapshotId));
				Snapshot newSnapshot = new Snapshot(snapshotId, 0, writeableIndexSnapshot, indexSnapshots, this, schemaCategory);
				snapshots.add(newSnapshot);
				docChangesSinceLastMerge += commitResult.getNumDocChanges();
				int writeableMaxDoc = writeableMemoryIndex.getLatestIndexSnapshot().getIndexReader().maxDoc();
				if (docChangesSinceLastMerge > MAYBE_MERGE_DOC_CHANGES || writeableMaxDoc >= MEMORY_INDEX_MAX_DOCS) {
					mergeThreadPool.submit(new MaybeMergeIndices());
					docChangesSinceLastMerge = 0;
				}
				// TODO: reset document sequence

				return commitResult;
			} finally {
				commitLock.unlock();
			}
		}

		RAMDirectory createRamDirectory(IndexSchema.Category schemaCategory, IndexDocuments documents) throws Exception {
			RAMDirectory ramDirectory = new RAMDirectory();
			ExecutorService threadPool = transactionSystem.getCommitThreadPool();
			IndexCreator indexCreator = new IndexCreator(ramDirectory, Long.MAX_VALUE, 4, threadPool);
			BlockingQueue<IndexCreator.Add> addQueue = new ArrayBlockingQueue<IndexCreator.Add>(1000, true);
			indexCreator.start(schemaCategory.getAnalyzer(), addQueue);
			for (IndexDocument indexDocument : documents) {
				addQueue.add(new IndexCreator.Add(indexDocument, schemaCategory));
			}
			indexCreator.create();
			return ramDirectory;
		}

		long getNextIndexId() {
			return indexSequence.getAndIncrement();
		}

		private WriteableMemoryIndex newWriteableMemoryIndex() throws Exception {
			Long indexID = indexSequence.getAndIncrement();
			Analyzer defaultAnalyzer = getDefaultAnalyzer();
			WriteableMemoryIndex writeableMemoryIndex = new WriteableMemoryIndex(indexID, defaultAnalyzer, this);
			indices.add(writeableMemoryIndex);
			return writeableMemoryIndex;
		}
	}
}
