package org.apache.ocean;

import java.io.File;
import java.io.IOException;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;

import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LogByteSizeMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NativeFSLockFactory;
import org.apache.ocean.cache.Cache;
import org.apache.ocean.database.JDBCDatabase;
import org.apache.ocean.server.FieldType;
import org.apache.ocean.util.OceanConcurrentHashMap;

/**
 * All update master methods write to the transaction log.
 * 
 * @author jason rutherglen
 */
// TODO: lowest transactionid index needs to look at ram if disk is null
// TODO: delete by query would look up all id versions of a query and then
// delete by id_version field terms
// TODO: highest index transaction id needs to be saved as new documents are
// added
// TODO: get terms for document in index
// TODO: create batchmanager
// TODO: create filter merger only for on disk index
// TODO: transaction log reading needs to handle commit record, rollback record
// TODO: open transaction batch needs to timeout
// TODO: update transaction record needs to keep record of previous transaction id
// TODO: slave needs collision resolver where master wins
// TODO: large batches need to be planned out, where data is organized by type in the shard, and swapped in and out
// TODO: create searchindexshard of type batch
public class SearchIndexShard {
	public static final long WRITE_LOCK_TIMEOUT = 10 * 1000;
	public static final CDouble MAX_RAM_INDEX_MB = new CDouble(5.0);
	public static final CDouble MAX_SHARD_SIZE = new CDouble(64.0);
	public static final String DOCUMENT_SEQUENCE = "document";
	public static final String TRANSACTION_SEQUENCE = "transaction";
	public static Logger log = Logger.getLogger(SearchIndexShard.class.getName());
	private AtomicReference<String> type = new AtomicReference<String>();
	private File directory;
	private Date dateCreated;
	private AtomicBoolean isClosed = new AtomicBoolean(false);
	private AtomicBoolean overThreshold = new AtomicBoolean(false);
	private FSDirectory fsDirectory;
	private LockFactory lockFactory;
	public static Map<String, SearchIndexShard> searchDirectoryMap = new OceanConcurrentHashMap<String, SearchIndexShard>();
	private SQLTransactionDatabase transactionDatabase;
	private CSequence transactionIDSequence;
	private CSequence documentIDSequence;
	private RamIndexManager ramIndexManager;
	private ClusterIndexModifier clusterIndexModifier;
	private Cache cache;
	private File databaseDirectory;
	private CurrentIndexSchema currentIndexSchema;
	private ShardInterface shardInterface;
	private String id;
	private RestartableTimer transactionLogReaderTimer;
	private ReentrantLock reloadLock = new ReentrantLock();
	private boolean transactionsSinceLastReload = true;
	private TransactionID highestTransactionIDIndex;

	public SearchIndexShard(File directory, ShardInterface shardInterface) throws Exception {
		this.directory = directory;
		directory.mkdirs();
		id = directory.getName();
		this.shardInterface = shardInterface;
		currentIndexSchema = shardInterface.getCurrentIndexSchema();
		fsDirectory = getFSDirectory();
		databaseDirectory = new File(directory, "database");
		File createCompleteFile = new File(directory, "create.complete");
		if (databaseDirectory.exists() && !createCompleteFile.exists()) {
			FileUtils.cleanDirectory(directory);
		}
		setType(ClusterConstants.READ_WRITE);

		File sequenceDirectory = new File(directory, "sequences");
		sequenceDirectory.mkdirs();
		transactionIDSequence = new CSequence(TRANSACTION_SEQUENCE, sequenceDirectory);
		documentIDSequence = new CSequence(DOCUMENT_SEQUENCE, sequenceDirectory);

		lockFactory = new NativeFSLockFactory(directory);
		dateCreated = new Date(directory.lastModified());
		ramIndexManager = new RamIndexManager(MAX_RAM_INDEX_MB, new StandardAnalyzer());
		ClusterUtil.testWriteableDirectory(directory);
		if (searchDirectoryMap.containsKey(directory.getName())) {
			String msg = "shard already exists: " + directory.getAbsolutePath();
			SearchService.seriousLog.log(Level.INFO, msg);
			throw new RuntimeException(msg);
		}
		searchDirectoryMap.put(directory.getName(), this);
		try {
			IndexReader.unlock(fsDirectory);
		} catch (Throwable ex) {
			log.log(Level.SEVERE, "", ex);
		}
		transactionDatabase = new SQLTransactionDatabase(databaseDirectory);
		LogByteSizeMergePolicy mergePolicy = new LogByteSizeMergePolicy();
		CurrentIndexSchema currentIndexSchema = shardInterface.getCurrentIndexSchema();
		// Analyzer analyzer =
		// currentIndexSchema.getIndexSchema(TimeoutState.FOREVER()).getAnalyzer();
		clusterIndexModifier = new ClusterIndexModifier(new CDouble(2.0), mergePolicy, fsDirectory, new StandardAnalyzer());
		FileUtils.touch(createCompleteFile);
		cache = new Cache(currentIndexSchema);
		reload(CLong.MAX_VALUE);
		syncRamIndex(true);
		transactionLogReaderTimer = new RestartableTimer("transactionLogReaderTimer", new TransactionLogReaderTask(), 3 * 1000, 2 * 60 * 1000);
	}
	
	public IndexInput getIndexFileInput(String indexID, String snapshotName, String fileName) throws Exception {
		IndexInput indexInput = null;
		if (clusterIndexModifier.getID().equals(indexID)) {
			indexInput = clusterIndexModifier.getIndexFileInput(snapshotName, fileName);
			if (indexInput != null)
				return indexInput;
		}
		indexInput = ramIndexManager.getIndexFileInput(indexID, snapshotName, fileName);
		return indexInput;
	}

	public ShardInfo createShardInfo() throws Exception {
		ShardInfo shardInfo = new ShardInfo();
		shardInfo.id = getID();
		shardInfo.status = getType();
		shardInfo.highestTransactionIDDatabase = getHighestTransactionIDDatabase();

		ShardInfo.Index diskIndex = new ShardInfo.Index();
		diskIndex.segmentsVersion = clusterIndexModifier.getSegmentsVersion();
		diskIndex.type = ShardInfo.Index.DISK;
		diskIndex.snapshot = clusterIndexModifier.getLatestSnapshot();
		//diskIndex.addAll(ClusterUtil.createIndexFiles(clusterIndexModifier.getDirectory()));
		shardInfo.add(diskIndex);
		Map<String,ShardInfo.Index> ramIndices = ramIndexManager.createShardInfoIndexes();
		if (ramIndices != null) {
			for (ShardInfo.Index ramIndex : ramIndices.values()) {
				shardInfo.add(ramIndex);
			}
		}
		return shardInfo;
	}

	public static interface ShardInterface {
		public void newTransactionRecord(TransactionRecord transactionRecord) throws Exception;

		public CurrentIndexSchema getCurrentIndexSchema() throws Exception;

		public String getCellID() throws Exception;

		public void reportCorruptIndex(SearchIndexShard searchIndexShard, CorruptIndexException corruptIndexException);

		public void shardClosed(SearchIndexShard searchIndexShard);
	}

	public Cache getCache() {
		return cache;
	}

	/**
	 * Reloads the index if there have been any changes since the last reload.
	 * returns number of transactions executed
	 */
	// TODO: needs to keep track of when to reload based on if updates have been
	// made since last one
	public CLong reload(CLong numTransactions) throws Exception {
		reloadLock.lock();
		try {
			if (!transactionsSinceLastReload)
				return new CLong(0);
			IndexSchema indexSchema = currentIndexSchema.getIndexSchema();
			TransactionID highestTransactionIDIndex = getHighestTransactionIDIndex(indexSchema);
			TransactionID highestTransactionIDDatabase = getHighestTransactionIDDatabase();
			log.info("reload highestTransactionIDIndex: " + highestTransactionIDIndex + " highestTransactionIDDatabase: " + highestTransactionIDDatabase);
			if (highestTransactionIDDatabase == null)
				return new CLong(0);
			if (highestTransactionIDIndex == null || highestTransactionIDDatabase.compareTo(highestTransactionIDIndex) > 0) {
				return doReload(numTransactions, highestTransactionIDIndex);
			} else if (highestTransactionIDDatabase.equals(highestTransactionIDIndex)) {
			} else if (highestTransactionIDDatabase.compareTo(highestTransactionIDIndex) < 0) {
				throw new ClusterException("highestTransactionIDDatabase: " + highestTransactionIDDatabase + " highestTransactionIDIndex: " + highestTransactionIDIndex);
			}
			return new CLong(0);
		} finally {
			transactionsSinceLastReload = false;
			reloadLock.unlock();
		}
	}

	private CLong doReload(CLong numTransactions, TransactionID highestTransactionIDIndex) throws Exception {
		TransactionID transactionID = null;
		if (highestTransactionIDIndex == null)
			transactionID = new TransactionID(1);
		else {
			transactionID = (TransactionID) highestTransactionIDIndex.clone();
			transactionID.increment();
		}
		JDBCDatabase.Results results = transactionDatabase.findTransactionRecordResults(transactionID);
		CLong transactionCount = new CLong(0);
		while (results.hasNext() && transactionCount.value < numTransactions.value) {
			TransactionRecord transactionRecord = (TransactionRecord) results.get();
			runTransactionRecord(transactionRecord, new TimeoutState(5 * 1000));
			transactionCount.value++;
		}
		ramIndexManager.reload();
		clusterIndexModifier.reload();
		return transactionCount;
	}

	public void setReadableOnly() {
		type.set(ClusterConstants.READ);
	}

	

	/**
	 * public List<IndexReader> getMultiReaders() throws Exception { List<IndexReader>
	 * multiReaders = new ArrayList<IndexReader>(6); IndexSearcher indexSearcher =
	 * openSearcher(false); multiReaders.add(indexSearcher.getIndexReader()); for
	 * (IndexSearcher ramIndexSearcher : ramIndexManager.createIndexSearchers()) {
	 * multiReaders.add(ramIndexSearcher.getIndexReader()); } return multiReaders; }
	 */
	/**
	 * public NodeShard getNodeShard() throws Exception { NodeShard nodeShard =
	 * new NodeShard(); nodeShard.highestTransactionLogID =
	 * getHighestTransactionIDDatabase(); nodeShard.highestTransactionID =
	 * getHighestTransactionIDIndex(); nodeShard.indexSize = new
	 * CLong((long)getIndexSizeMB().value*1024*1024); nodeShard.shardID = getID();
	 * nodeShard.nodeID = searchService.getID(); nodeShard.status = type.get();
	 * nodeShard.cellID = searchService.getCellID(); return nodeShard; }
	 */

	public CLong getNumRamDocuments() throws Exception {
		CLong total = new CLong(0);
		for (MultiIndexSearcher indexSearcher : ramIndexManager.createMultiIndexSearchers()) {
			total.value += indexSearcher.getIndexReader().numDocs();
		}
		return total;
	}

	public CLong getNumDocuments() throws Exception {
		CLong total = new CLong(0);
		List<MultiIndexSearcher> indexSearchers = createMultiIndexSearchers();
		for (MultiIndexSearcher indexSearcher : indexSearchers) {
			IndexReader indexReader = indexSearcher.getIndexReader();
			total.value += indexReader.numDocs();
		}
		return total;
	}

	public Document getDocument(IDVersion idVersion, CFieldSelector fieldSelector, Set<String> fieldCacheFields) throws Exception {
		Term term = ClusterUtil.toIDVersionTerm(idVersion.id, idVersion.version);
		return getDocument(term, fieldSelector, fieldCacheFields);
	}

	public Document getDocument(Term term, CFieldSelector fieldSelector, Set<String> fieldCacheFields) throws Exception {
		List<MultiIndexSearcher> indexSearchers = createMultiIndexSearchers();
		for (MultiIndexSearcher indexSearcher : indexSearchers) {
			IndexReader indexReader = indexSearcher.getIndexReader();
			TermDocs termDocs = indexReader.termDocs(term);
			try {
				if (termDocs.next()) {
					int doc = termDocs.doc();
					return ClusterUtil.getDocument(doc, indexReader, fieldSelector, fieldCacheFields, cache);
				}
			} finally {
				if (termDocs != null)
					termDocs.close();
			}
		}
		return null;
	}

	public Set<String> getFieldNames() throws Exception {
		Collection collection = clusterIndexModifier.getIndexReader().getFieldNames(IndexReader.FieldOption.ALL);
		Set<String> fieldNames = new HashSet<String>(collection.size());
		for (Object object : collection) {
			String field = (String) object;
			fieldNames.add(field);
		}
		return fieldNames;
	}

	public SQLTransactionDatabase getTransactionDatabase() {
		return transactionDatabase;
	}

	public TransactionID getLowestTransactionIDIndex() throws Exception {
		IndexReader indexReader = clusterIndexModifier.getIndexReader();
		List<String> ids = new ArrayList<String>();
		Term startTerm = new Term(ClusterConstants.TRANSACTION_ID_FIELD, "");
		Term diskTerm = clusterIndexModifier.getTerm(startTerm);
		if (diskTerm != null)
			ids.add(diskTerm.text());
		List<Term> ramTerms = ramIndexManager.getTermPerIndex(startTerm);
		for (Term ramTerm : ramTerms) {
			ids.add(ramTerm.text());
		}
		if (ids.size() > 0) {
			Collections.sort(ids);
			String lowestTransactionIDString = ids.get(0);
			IndexSchema indexSchema = currentIndexSchema.getIndexSchema();
			FieldType fieldType = indexSchema.getField(ClusterConstants.TRANSACTION_ID_FIELD).getFieldType();
			return TransactionID.parse(fieldType.indexedToReadable(lowestTransactionIDString));
		} else {
			return null;
		}
	}

	public TransactionID getHighestTransactionIDIndex(IndexSchema indexSchema) throws Exception {
		if (highestTransactionIDIndex == null) {
			highestTransactionIDIndex = getHighestTransactionIDIndexInternal(indexSchema);
		}
		return highestTransactionIDIndex;
	}

	/**
	 * get the transaction id by scanning termenums. TODO: maybe reload all
	 * searchers before running this
	 * 
	 * @return
	 */
	private TransactionID getHighestTransactionIDIndexInternal(IndexSchema indexSchema) throws Exception {
		List<MultiIndexSearcher> indexSearchers = createMultiIndexSearchers();
		String maxText = ClusterUtil.getMaxTerm(ClusterConstants.TRANSACTION_ID_FIELD, indexSearchers);
		if (maxText != null) {
			String readable = ClusterUtil.indexedToReadable(ClusterConstants.TRANSACTION_ID_FIELD, maxText, indexSchema);
			return TransactionID.parse(readable);
		} else
			return null;
	}

	/**
	 * To be called only by replication, otherwise concurrency issues will arise.
	 * Go through each indexreader, get the maxdoc transaction id from the field
	 * cache, return the highest.
	 * 
	 * public TransactionID getHighestTransactionIDIndex() throws Exception {
	 * TransactionID highestTransactionID = null; List<MultiIndexSearcher>
	 * indexSearchers = createMultiIndexSearchers(); for (IndexSearcher
	 * indexSearcher : indexSearchers) { IndexReader indexReader =
	 * indexSearcher.getIndexReader(); CLong[] longArray =
	 * cache.getLongArray(ClusterConstants.TRANSACTION_ID_FIELD, indexReader); int
	 * maxDoc = indexReader.maxDoc(); if (maxDoc == 0) continue; CLong value =
	 * longArray[maxDoc - 1]; if (value == null) continue; TransactionID
	 * transactionID = new TransactionID(value); if (highestTransactionID != null) {
	 * if (transactionID.compareTo(highestTransactionID) > 0) {
	 * highestTransactionID = transactionID; } } else { highestTransactionID =
	 * transactionID; } } if (highestTransactionID == null) { highestTransactionID =
	 * new TransactionID(0); } return highestTransactionID; }
	 */
	/**
	 * set if it's higher than the current one
	 * 
	 * @param transactionID
	 */
	public void setHighestTransactionIDIndex(TransactionID newTransactionID) throws Exception {
		if (newTransactionID == null) {
			throw new IllegalArgumentException("newTransactionID null");
		}
		if (highestTransactionIDIndex != null) {
			if (newTransactionID.compareTo(highestTransactionIDIndex) > 0) {
				highestTransactionIDIndex = newTransactionID;
			}
		} else {
			highestTransactionIDIndex = newTransactionID;
		}
	}

	public TransactionID getHighestCellTransactionIDDatabase() throws Exception {
		return transactionDatabase.findCellTransactionIDHighest();
	}

	public TransactionID getHighestTransactionIDDatabase() throws Exception {
		return transactionDatabase.findTransactionIDHighest();
	}

	public String getName() {
		return directory.getName();
	}

	// protected void errorIfNotMaster(TimeoutState timeoutState) throws Exception
	// {
	// if (searchService.isSlave(timeoutState)) throw new Exception("node is
	// slave");
	// }

	// protected void errorIfNotSlave(TimeoutState timeoutState) throws Exception
	// {
	// if (searchService.isMaster(timeoutState)) throw new Exception("node is
	// master");
	// }

	public String getMaxFieldValue(String field) throws Exception {
		List<MultiIndexSearcher> indexSearchers = createMultiIndexSearchers();
		if (indexSearchers.size() == 0)
			return null;
		List<String> values = new ArrayList<String>(indexSearchers.size());
		for (MultiIndexSearcher indexSearcher : indexSearchers) {
			String value = indexSearcher.getMaxFieldValue(field);
			if (value != null)
				values.add(value);
		}
		if (values.size() == 0)
			return null;
		return Collections.max(values);
	}

	// TODO: test setting sequences
	public void resetDocumentIDSequence() throws Exception {
		String indexedMax = getMaxFieldValue(ClusterConstants.ID_FIELD);
		if (StringUtils.isNotBlank(indexedMax)) {
			CLong maxID = ClusterUtil.getLong(indexedMax, currentIndexSchema.getIndexSchema());
			if (maxID != null) {
				log.info("documentIDSequence set:" + maxID);
				CLong cloned = (CLong)maxID.clone();
				cloned.value++;
				documentIDSequence.set(new BigInteger(cloned.toString()));
			}
		}
	}

	public void resetTransactionIDSequence() throws Exception {
		TransactionID highestTransactionID = getHighestTransactionIDDatabase();
		if (highestTransactionID != null) {
			TransactionID cloned = (TransactionID)highestTransactionID.clone();
			cloned.increment();
			log.info("transactionIDSequence.set: " + cloned);
			transactionIDSequence.set(cloned.value); 
		}
	}

	public CLong getNextDocumentIDSequence() throws Exception {
		return new CLong(documentIDSequence.next().longValue());
	}

	public TransactionID getNextTransactionIDSequence() throws Exception {
		BigInteger bigInteger = transactionIDSequence.next();
		return new TransactionID(bigInteger);
	}
	
	public TransactionID getCurrentTransactionIDSequence() throws Exception {
		BigInteger bigInteger = transactionIDSequence.current();
		return new TransactionID(bigInteger);
	}

	public void addDocumentsBatchMaster(String batchID, List<Document> documents, TimeoutState timeoutState) throws Exception {

	}

	public UpdateResponse addDocumentMaster(TransactionID cellTransactionID, Document document, Analyzer analyzer, TimeoutState timeoutState) throws Exception {
		// errorIfNotMaster(timeoutState);
		String cellID = shardInterface.getCellID();
		long startTime = System.currentTimeMillis();
		IndexSchema indexSchema = currentIndexSchema.getIndexSchema();
		CLong documentID = getNextDocumentIDSequence();
		TransactionID transactionID = getNextTransactionIDSequence();
		CLong version = new CLong(1);
		Date dateCreated = new Date();
		Date dateModified = dateCreated;

		ClusterUtil.setCellTransactionID(cellTransactionID, document, indexSchema);
		ClusterUtil.setTransactionID(transactionID, document, indexSchema);
		ClusterUtil.setID(documentID, document, indexSchema);
		ClusterUtil.setVersion(version, document, indexSchema);
		ClusterUtil.setIDVersion(documentID, version, document, indexSchema);
		ClusterUtil.setDateCreated(dateCreated, document, indexSchema);
		ClusterUtil.setDateModified(dateModified, document, indexSchema);
		ClusterUtil.setCellID(cellID, document, indexSchema);
		ClusterUtil.setShardID(getID(), document, indexSchema);
		ClusterUtil.setGUID(documentID, version, getID(), document, indexSchema);
		ClusterUtil.setSchemaVersion(indexSchema.version, document, indexSchema);

		TransactionRecord transactionRecord = new TransactionRecord();
		transactionRecord.type = TransactionRecord.ADD;
		transactionRecord.documentID = documentID;
		transactionRecord.status = TransactionRecord.SUCCESS;
		transactionRecord.thisStatus = TransactionRecord.SUCCESS;
		transactionRecord.version = version;
		transactionRecord.id = transactionID;
		transactionRecord.cellTransactionID = cellTransactionID;
		transactionRecord.shardID = getID();
		transactionRecord.schemaVersion = getSchemaVersion();
		transactionRecord.setDocument(document);
		transactionRecord = transactionDatabase.insertTransactionRecord(transactionRecord);
		transactionsSinceLastReload = true;
		// addDocumentTransaction(transactionRecord, timeoutState);

		notifyNewTransactions();

		UpdateResponse updateResponse = new UpdateResponse();
		updateResponse.status = UpdateResponse.SUCCESS;
		updateResponse.oceanDocument = ClusterUtil.toOceanDocument(document, indexSchema);
		updateResponse.duration = new CLong(System.currentTimeMillis() - startTime);
		return updateResponse;
	}

	/**
	 * private Document throwVersionExceptionReturnDocument(CLong id, CLong
	 * version) throws VersionException, Exception { //Term term =
	 * ClusterUtil.toIDVersionTerm(id, version); //Set<String> fieldsToLoad = new
	 * HashSet<String>(); fieldsToLoad.add(ClusterConstants.DATE_CREATED_FIELD);
	 * CFieldSelector fieldSelector = new CFieldSelector(fieldsToLoad); //Document
	 * document = getDocument(term, fieldSelector,
	 * SearchService.defaultDocumentFieldCaches);
	 * 
	 * //if (document == null) throw new VersionException("versions don't match",
	 * new IDVersion(id, version, getID())); return document; }
	 * 
	 * private void throwVersionException(CLong id, CLong version) throws
	 * Exception { Term term = ClusterUtil.toIDVersionTerm(id, version); if
	 * (!termExists(term)) throw new VersionException("versions don't match", new
	 * IDVersion(id, version, getID())); }
	 */

	private TransactionRecord throwVersionExceptionReturnTransactionRecord(CLong documentID, CLong version) throws VersionException, Exception {
		TransactionRecord transactionRecord = transactionDatabase.findVersionInfoByDocumentID(documentID);
		if (transactionRecord != null) {
			if (!transactionRecord.version.equals(version)) {
				throw new VersionException("versions don't match. given: "+version+" existing: " + transactionRecord.version, new IDVersion(documentID, version, getID()));
			}
		}
		return transactionRecord;
	}

	private void throwVersionException(CLong documentID, CLong version) throws VersionException, Exception {
		CLong existingVersionID = transactionDatabase.findMaxVersionByDocumentID(documentID);
		if (existingVersionID != null && !version.equals(existingVersionID)) {
			throw new VersionException("versions don't match. given: "+version+" existing: " + existingVersionID, new IDVersion(documentID, version, getID()));
		}
	}

	public UpdateResponse updateDocumentMaster(TransactionID cellTransactionID, Document document, Analyzer analyzer, TimeoutState timeoutState) throws Exception {
		IndexSchema indexSchema = currentIndexSchema.getIndexSchema();
		long startTime = System.currentTimeMillis();
		try {
			ClusterUtil.validateDocumentUpdate(document, indexSchema);
			CLong documentID = ClusterUtil.getID(document, indexSchema);
			CLong version = ClusterUtil.getVersion(document, indexSchema);
			TransactionRecord existingTransactionRecord = throwVersionExceptionReturnTransactionRecord(documentID, version);
			TransactionID transactionID = getNextTransactionIDSequence();
			Date newDateModified = new Date();
			Date dateCreated = existingTransactionRecord.dateCreated;
			CLong newVersion = new CLong(existingTransactionRecord.version.value + 1);

			ClusterUtil.setVersion(newVersion, document, indexSchema);
			ClusterUtil.setTransactionID(transactionID, document, indexSchema);
			ClusterUtil.setCellTransactionID(cellTransactionID, document, indexSchema);
			ClusterUtil.setID(documentID, document, indexSchema);
			ClusterUtil.setIDVersion(documentID, newVersion, document, indexSchema);
			ClusterUtil.setDateModified(newDateModified, document, indexSchema);
			ClusterUtil.setDateCreated(dateCreated, document, indexSchema);
			ClusterUtil.setCellID(shardInterface.getCellID(), document, indexSchema);
			ClusterUtil.setGUID(documentID, newVersion, getID(), document, indexSchema);
			ClusterUtil.setSchemaVersion(indexSchema.version, document, indexSchema);

			TransactionRecord transactionRecord = new TransactionRecord();
			transactionRecord.cellTransactionID = cellTransactionID;
			transactionRecord.type = TransactionRecord.UPDATE;
			transactionRecord.status = TransactionRecord.SUCCESS;
			transactionRecord.id = transactionID;
			transactionRecord.documentID = documentID;
			transactionRecord.version = newVersion;
			transactionRecord.shardID = getID();
			transactionRecord.schemaVersion = getSchemaVersion();
			transactionRecord.setDocument(document);
			transactionRecord = transactionDatabase.insertTransactionRecord(transactionRecord);
			transactionsSinceLastReload = true;
			// updateDocumentTransaction(transactionRecord, timeoutState);

			notifyNewTransactions();

			long duration = System.currentTimeMillis() - startTime;
			UpdateResponse updateResponse = new UpdateResponse();
			updateResponse.duration = new CLong(duration);
			updateResponse.oceanDocument = ClusterUtil.toOceanDocument(document, indexSchema);
			return updateResponse;
		} catch (Throwable throwable) {
			// throw new ClusterException("", throwable);
			UpdateResponse updateResponse = new UpdateResponse();
			updateResponse.status = UpdateResponse.ERROR;
			updateResponse.errorMessage = ExceptionUtils.getFullStackTrace(throwable);
			throw new Exception(updateResponse.errorMessage);
			//return updateResponse;
		}
	}

	private CLong getSchemaVersion() throws Exception {
		return currentIndexSchema.getIndexSchema().version;
	}

	public UpdateResponse deleteDocumentMaster(TransactionID cellTransactionID, IDVersion idVersion, TimeoutState timeoutState) throws Exception,
			VersionException {
		long startTime = System.currentTimeMillis();
		TransactionRecord transactionRecord = new TransactionRecord();
		try {
			throwVersionException(idVersion.id, idVersion.version);
			TransactionID transactionID = getNextTransactionIDSequence();
			transactionRecord.id = transactionID;
			transactionRecord.cellTransactionID = cellTransactionID;
			transactionRecord.type = TransactionRecord.DELETE;
			transactionRecord.documentID = idVersion.id;
			transactionRecord.status = TransactionRecord.SUCCESS;
			transactionRecord.version = idVersion.version;
			transactionRecord.shardID = getID();
			transactionRecord.schemaVersion = getSchemaVersion();
			transactionRecord = transactionDatabase.insertTransactionRecord(transactionRecord);
			transactionsSinceLastReload = true;
			// runTransactionRecord(transactionRecord, timeoutState);
			UpdateResponse updateResponse = new UpdateResponse();
			updateResponse.status = UpdateResponse.SUCCESS;
			updateResponse.duration = new CLong(System.currentTimeMillis() - startTime);
			return updateResponse;
		} catch (Throwable throwable) {
			try {
				if (transactionRecord != null) {
					transactionRecord.setStackTrace(throwable);
					transactionRecord.status = TransactionRecord.FAILURE;
					transactionDatabase.updateTransactionRecordStatus(transactionRecord);
				}
			} catch (Throwable transactionRecordThrowable) {
				log.log(Level.SEVERE, "", transactionRecordThrowable);
			}
			log.log(Level.SEVERE, "", throwable);
			UpdateResponse updateResponse = new UpdateResponse();
			updateResponse.status = UpdateResponse.ERROR;
			updateResponse.duration = new CLong(System.currentTimeMillis() - startTime);
			updateResponse.errorMessage = ExceptionUtils.getFullStackTrace(throwable);
			return updateResponse;
			// throw new Exception("", throwable);
		}
	}

	public int getFileCount() {
		File[] files = directory.listFiles();
		return files.length;
	}

	private void throwErrorIfClosed() throws ClusterException {
		if (isClosed.get()) {
			throw new ClusterException("searchdirectory " + getName() + " is closed");
		}
	}

	public boolean isSizeOverLimit() throws Exception {
		return getIndexSizeMB().value > MAX_SHARD_SIZE.value;
	}

	public CDouble getIndexSizeMB() throws Exception {
		CDouble ramSize = ramIndexManager.getSizeMB();
		long fileSize = ClusterUtil.getSize(fsDirectory);
		double fileSizeMB = (double) fileSize / 1024 / 1024;
		return new CDouble(fileSizeMB + ramSize.value);
	}

	public CLong getSize() {
		return new CLong(FileUtils.sizeOfDirectory(directory));
	}

	public String toString() {
		return "SearchIndexShard: " + getID() + " type: " + getType();
	}

	public boolean typeEquals(String other) {
		try {
			return StringUtils.equals(this.type.get(), other);
		} finally {
		}
	}

	public void setType(String type) {
		try {
			this.type.set(type);
		} finally {
		}
	}

	public File getDirectoryFile() {
		return directory;
	}

	public FSDirectory getFSDirectory() throws IOException {
		if (fsDirectory != null)
			return fsDirectory;
		if (lockFactory == null)
			lockFactory = new NativeFSLockFactory(directory);
		fsDirectory = FSDirectory.getDirectory(directory, lockFactory);
		return fsDirectory;
	}

	public CLong numDocs() throws Exception {
		CLong numDocs = new CLong(0);
		List<MultiIndexSearcher> indexSearchers = createMultiIndexSearchers();
		for (IndexSearcher indexSearcher : indexSearchers) {
			numDocs.value += indexSearcher.getIndexReader().numDocs();
		}
		return numDocs;
	}

	public CLong deleteDocuments(Term term, boolean ifDeletedReload) throws Exception {
		throwErrorIfClosed();
		if (getType().equals(ClusterConstants.READ)) {
			throw new ClusterException(toString() + " is read only");
		}
		CLong total = new CLong(0);
		try {
			long ramDeleted = ramIndexManager.deleteDocuments(term, true, ifDeletedReload).value;
			long diskDeleted = clusterIndexModifier.deleteDocuments(term, true, ifDeletedReload, true).value;
			total.value += ramDeleted + diskDeleted;
		} catch (CorruptIndexException corruptIndexException) {
			shardInterface.reportCorruptIndex(this, corruptIndexException);
			throw corruptIndexException;
		}
		return total;
	}

	public List<MultiIndexSearcher> createMultiIndexSearchers() throws Exception {
		List<MultiIndexSearcher> ramIndexSearchers = ramIndexManager.createMultiIndexSearchers();
		List<MultiIndexSearcher> indexSearchers = new ArrayList<MultiIndexSearcher>(ramIndexSearchers.size() + 1);
		indexSearchers.add(clusterIndexModifier.createMultiIndexSearcher());
		indexSearchers.addAll(ramIndexSearchers);
		return indexSearchers;
	}

	public List<SegmentIndexSearcher> createSegmentIndexSearchers() throws Exception {
		List<SegmentIndexSearcher> indexSearchers = clusterIndexModifier.createSegmentIndexSearchers();
		indexSearchers.addAll(ramIndexManager.createSegmentIndexSearchers());
		return indexSearchers;
	}

	public boolean syncRamIndex(boolean force) throws Exception {
		return syncRamIndex(force, Integer.MAX_VALUE);
	}

	/**
	 * Syncs the latest ram index
	 * 
	 * @param force
	 * @return
	 * @throws Exception
	 */
	public boolean syncRamIndex(boolean force, int max) throws Exception {
		if (force)
			ramIndexManager.flush();
		final List<RamIndex> ramIndices = ramIndexManager.getFinishedIndicesNoLock(max);
		if (ramIndices.size() == 0)
			return false;
		try {
			List<Directory> directories = new ArrayList<Directory>(ramIndices.size());
			for (ClusterIndexModifier ramIndex : ramIndices) {
				ramIndex.optimize();
				directories.add(ramIndex.getDirectory());
			}
			for (RamIndex ramIndex : ramIndices) {
				ramIndex.recordDeletes();
			}
			TransactionSemanticsHandler transactionSemanticsHandler = new TransactionSemanticsHandler(ramIndices);
			clusterIndexModifier.addIndexesNoOptimize(directories.toArray(new Directory[0]), transactionSemanticsHandler);
		} catch (CorruptIndexException corruptIndexException) {
			shardInterface.reportCorruptIndex(SearchIndexShard.this, corruptIndexException);
			throw corruptIndexException;
		}
		clusterIndexModifier.reload(new ReloadCall() {
			public void beforeNewReaderSet() throws Exception {
				ramIndexManager.removeRAMIndexFinished(ramIndices);
			}
		});
		return true;
	}

	private class TransactionSemanticsHandler implements IndexWriter.TransactionSemantics {
		List<RamIndex> ramIndices;

		public TransactionSemanticsHandler(List<RamIndex> ramIndices) {
			this.ramIndices = ramIndices;
		}

		public List<Term> getDeleteTerms() {
			List<Term> terms = new ArrayList<Term>();
			for (RamIndex ramIndex : ramIndices) {
				ramIndex.stopDeletes();
				terms.addAll(ramIndex.getQueuedDeletedTerms());
			}
			return terms;
		}
	}

	public boolean termExists(Term term) throws Exception {
		List<MultiIndexSearcher> indexSearchers = createMultiIndexSearchers();
		for (MultiIndexSearcher indexSearcher : indexSearchers) {
			int docFreq = indexSearcher.getIndexReader().docFreq(term);
			if (docFreq > 0)
				return true;
		}
		return false;
	}

	public List<TransactionRecord> getTransactionRecords(TransactionID id, CLong rows) throws Exception {
		return transactionDatabase.findTransactionRecords(id, rows);
	}

	public void saveTransactionRecords(List<TransactionRecord> transactionRecords, boolean notifyNewTransactions) throws Exception {
		transactionDatabase.insertTransactionRecords(transactionRecords);
		//log.info("transactionRecords: "+transactionRecords);
		//for (TransactionRecord transactionRecord : transactionRecords) {
		//	transactionDatabase.insertTransactionRecord(transactionRecord);
		//}
		transactionsSinceLastReload = true;
		if (notifyNewTransactions)
		  notifyNewTransactions();
	}

	public void notifyNewTransactions() {
		try {
			transactionLogReaderTimer.runNow();
		} catch (Throwable throwable) {
			log.log(Level.SEVERE, "", throwable);
		}
	}

	public class TransactionLogReaderTask implements RestartableTimer.TimeoutRunnable {
		public void run(TimeoutState timeoutState) {
			try {
				reload(new CLong(100));
			} catch (Throwable throwable) {
				log.log(Level.SEVERE, "", throwable);
			}
		}
	}

	public void saveTransactionRecord(TransactionRecord transactionRecord, boolean notifyNewTransactions) throws Exception {
		transactionDatabase.insertTransactionRecord(transactionRecord);
		transactionsSinceLastReload = true;
		if (notifyNewTransactions)
			notifyNewTransactions();
	}

	public void runTransactionRecord(TransactionRecord transactionRecord, TimeoutState timeoutState) throws Exception {
		log.info("runTransactionRecord id: " + transactionRecord.id);
		if (StringUtils.equals(TransactionRecord.ADD, transactionRecord.type)) {
			addDocumentTransaction(transactionRecord, timeoutState);
		} else if (StringUtils.equals(TransactionRecord.UPDATE, transactionRecord.type)) {
			updateDocumentTransaction(transactionRecord, timeoutState);
		} else if (StringUtils.equals(TransactionRecord.DELETE, transactionRecord.type)) {
			deleteDocumentTransaction(transactionRecord, timeoutState);
		}
	}

	private void throwIfReadOnly() throws ClusterException {
		if (getType().equals(ClusterConstants.READ)) {
			throw new ClusterException(toString() + " is read only");
		}
	}

	private void updateDocumentTransaction(TransactionRecord transactionRecord, TimeoutState timeoutState) throws Exception {
		// errorIfNotSlave(timeoutState);
		throwErrorIfClosed();
		throwIfReadOnly();
		Document document = transactionRecord.getDocument();
		CLong id = transactionRecord.documentID;
		CLong newVersion = transactionRecord.version;
		CLong currentVersion = new CLong(newVersion.value - 1);

		CLong numDeleted = deleteDocuments(ClusterUtil.toIDVersionTerm(id, currentVersion), true);
		if (numDeleted.value < 0)
			throw new ClusterException("num deleted " + numDeleted.value);
		addDocumentIndex(document);
	}

	private void addDocumentsIndex(List<Document> documents, boolean reload) throws Exception {
		IndexSchema indexSchema = shardInterface.getCurrentIndexSchema().getIndexSchema();
		Analyzer analyzer = indexSchema.getAnalyzer();
		ramIndexManager.addDocuments(documents, analyzer);
		if (reload)
			ramIndexManager.reload();
	}

	private void addDocumentIndex(Document document) throws Exception {
		IndexSchema indexSchema = shardInterface.getCurrentIndexSchema().getIndexSchema();
		Analyzer analyzer = indexSchema.getAnalyzer();
		ramIndexManager.addDocument(document, analyzer);
		TransactionID newTransactionID = ClusterUtil.getTransactionID(document, indexSchema);
		setHighestTransactionIDIndex(newTransactionID);
		ramIndexManager.reload();
	}

	/**
	 * private boolean deleteDocumentIndex(CLong id, CLong version, boolean
	 * ifDeletedReload) throws Exception { Term term =
	 * ClusterUtil.toIDVersionTerm(id, version); CLong ramNumDeleted =
	 * ramIndexManager.deleteDocuments(term, ifDeletedReload); if
	 * (ramNumDeleted.value > 0) return true; else { CLong numDeleted =
	 * clusterIndexModifier.deleteDocuments(term, ifDeletedReload); if
	 * (numDeleted.value > 0) return true; } return false; }
	 */
	private void deleteDocumentTransaction(TransactionRecord transactionRecord, TimeoutState timeoutState) throws Exception, VersionException {
		// errorIfNotSlave(timeoutState);
		throwErrorIfClosed();
		IDVersion idVersion = transactionRecord.getIDVersion();
		try {
			throwVersionException(idVersion.id, idVersion.version);
			deleteDocuments(ClusterUtil.toIDVersionTerm(idVersion.id, idVersion.version), true);
			return;
		} catch (Throwable throwable) {
			try {
				if (transactionRecord != null) {
					transactionRecord.setStackTrace(throwable);
					transactionRecord.status = TransactionRecord.FAILURE;
					transactionDatabase.updateTransactionRecordStatus(transactionRecord);
				}
			} catch (Throwable transactionRecordThrowable) {
				log.log(Level.SEVERE, "", transactionRecordThrowable);
			}
			log.log(Level.SEVERE, "", throwable);
			throw new ClusterException("", throwable);
		}
	}

	private void addDocumentTransaction(TransactionRecord transactionRecord, TimeoutState timeoutState) throws Exception {
		// errorIfNotSlave(timeoutState);
		throwErrorIfClosed();
		throwIfReadOnly();
		Document document = transactionRecord.getDocument();
		addDocumentIndex(document);
	}

	// public void reportCorruptIndex(CorruptIndexException corruptIndexException)
	// {
	// searchService.reportCorruptIndex(this, corruptIndexException);
	// }

	public boolean isClosed() {
		return isClosed.get();
	}

	public void close() throws Exception {
		if (isClosed.get()) {
			throw new IOException("directory already closed");
		}
		ramIndexManager.close();
		isClosed.set(true);
		transactionDatabase.close();
		// indexShardManager.removeShard(this);
		shardInterface.shardClosed(this);
		if (clusterIndexModifier != null) {
			try {
				clusterIndexModifier.close();
			} catch (Throwable throwable) {
				log.log(Level.SEVERE, "", throwable);
			}
		}
		log.log(Level.INFO, "closed searchindexshard " + toString());
	}

	public Date getDateCreated() {
		return dateCreated;
	}

	public String getType() {
		return type.get();
	}

	public String getID() {
		return getName();
	}
}
