package org.apache.lucene.ocean.log;

import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Serializable;
import java.util.concurrent.locks.ReentrantLock;

import org.apache.commons.lang.SerializationUtils;
import org.apache.lucene.ocean.Deletes;
import org.apache.lucene.ocean.Documents;
import org.apache.lucene.ocean.LogDirectory;
import org.apache.lucene.ocean.Batch.MasterBatch;
import org.apache.lucene.ocean.Batch.SlaveBatch;
import org.apache.lucene.ocean.log.LogFile.Record;
import org.apache.lucene.ocean.log.LogFileManager.RecordIterator;
import org.apache.lucene.ocean.log.RawLogFile.StreamData;
import org.apache.lucene.ocean.util.Bytes;
import org.apache.lucene.ocean.util.Constants;
import org.apache.lucene.ocean.util.LongSequence;
import org.apache.lucene.ocean.util.RAMDirectorySerializer;
import org.apache.lucene.ocean.util.Util;
import org.apache.lucene.store.RAMDirectory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * Serializes transactions known internally as batches to an underlying log
 * file. Provides an iterator over the batches.
 * 
 */
public class TransactionLog {
  final static Logger LOG = LoggerFactory.getLogger(TransactionLog.class);
  // private ByteBufferPool byteBufferPool = new ByteBufferPool(50 * 1024, 5,
  // 5); // not used right now
  LogFileManager logFileManager;
  private ReentrantLock writeLock = new ReentrantLock();
  private LogDirectory logDirectory;
  private LongSequence snapshotIdSequence;
  
  /**
   * 
   * @param maxFileSize The maximum file size of each log file before a new one is created
   * @param logDirectory The LogDirectory to use
   * @throws IOException
   */
  public TransactionLog(long maxFileSize, LogDirectory logDirectory) throws IOException {
    this.logDirectory = logDirectory;
    logFileManager = new LogFileManager(maxFileSize, 1000 * 30, logDirectory);
    Long maxId = logFileManager.getMaxId();
    if (maxId == null)
      maxId = new Long(0);
    snapshotIdSequence = new LongSequence(maxId + 1, 1);
  }
  
  /**
   * Returns the LogFileManager for the TransactionLog
   * @return
   */
  public LogFileManager getLogFileManager() {
    return logFileManager;
  }
  
  /**
   * Any log file with a maxSnapshotId less than minIndexSnapshotId can be
   * deleted. This is because it means the index would no longer recover
   * anything from the transaction log.
   * 
   * @param minSnapshotId
   */
  public void deleteOldLogFiles(Long minIndexSnapshotId) throws IOException {
    logFileManager.deleteOldLogFiles(minIndexSnapshotId);
  }
  
  /**
   * Closes the LogFileManager
   * @throws IOException
   */
  public void close() throws IOException {
    logFileManager.close();
  }
  
  /**
   * Deserialize based on the type given from the InputStream into a RAMDirectory or
   * a Documents object
   * @param type
   * @param input
   * @return
   * @throws IOException
   */
  public Object deserialize(int type, InputStream input) throws IOException {
    if (type == Constants.RAM_DIRECTORY_TYPE) {
      return RAMDirectorySerializer.deserialize(input);
    } else if (type == Constants.DOCUMENTS_TYPE) {
      return SerializationUtils.deserialize(input);
    } else {
      throw new RuntimeException("unknown object");
    }
  }
  
  /**
   * Serialize a RAMDirectory or a Documents object to a Bytes object
   * @param object
   * @return
   * @throws IOException
   */
  public static Bytes serialize(Serializable object) throws IOException {
    if (object instanceof RAMDirectory) {
      RAMDirectory ramDirectory = (RAMDirectory) object;
      int size = (int) Util.getSize(ramDirectory);
      int bufferSize = 8 * 1024;
      if (bufferSize > size) {
        bufferSize = size + 1024;
      }
      Bytes bytes = new Bytes(bufferSize);
      OutputStream output = bytes.getOutputStream();
      RAMDirectorySerializer.serialize(ramDirectory, output);
      return bytes;
    } else if (object instanceof Documents) {
      Bytes bytes = new Bytes(8 * 1024);
      OutputStream output = bytes.getOutputStream();
      SerializationUtils.serialize(object, output);
      return bytes;
    } else {
      throw new RuntimeException("unknown object");
    }
  }
  /**
  public RecordData getRecordData(MasterBatch masterBatch) throws IOException {
    Bytes docBytes = null;
    Bytes otherBytes = null;
    int docType = -1;
    int otherType = -1;
    if (masterBatch.hasRAMDirectory()) {
      // docBytes = SerializationUtils.serialize(masterBatch.getRamDirectory());
      docBytes = serialize(masterBatch.getRamDirectory());
      docType = Constants.RAM_DIRECTORY_TYPE;
    } else if (masterBatch.hasDocuments()) {
      Documents documents = masterBatch.getDocuments();
      docBytes = serialize(documents);
      docType = Constants.DOCUMENTS_TYPE;
    }
    if (masterBatch.hasDeletes()) {
      Deletes deletes = masterBatch.getDeletes();
      otherBytes = new Bytes(1024);
      SerializationUtils.serialize(deletes, otherBytes.getOutputStream());
      docType = Constants.DELETES_SERIALIZE_TYPE;
    }
    return new RecordData(docType, docBytes, otherType, otherBytes);
  }
  **/
  
  /**
   * Writes a MasterBatch to a log file
   */
  public void writeMasterBatch(final Long id, MasterBatch masterBatch) throws Exception {
    RecordData recordData = masterBatch.getRecordData();
    writeLock.lock();
    try {
      logFileManager.writeRecord(id, recordData);
    } finally {
      writeLock.unlock();
    }
  }
  
  /**
   * Get the total number of records in all of the log files
   * @return
   */
  public int getNumRecords() {
    return logFileManager.getNumRecords();
  }

  /**
   * Get the next id 
   * @return
   */
  public Long getNextId() {
    return snapshotIdSequence.getAndIncrement();
  }
  
  /**
   * Get the minimum id of all the logs
   * @return
   */
  public Long getMinId() {
    return logFileManager.getMinId();
  }
  
  /**
   * Get the maximum id of all the logs
   * @return
   */
  public Long getMaxId() {
    return logFileManager.getMaxId();
  }

  public Long getPreviousId(long id) {
    return logFileManager.getPreviousId(id);
  }
  
  /**
   * Get a SlaveBatchIterator starting from the given snapshotId
   * @param snapshotId
   * @return
   * @throws Exception
   */
  public SlaveBatchIterator getSlaveBatchIterator(Long snapshotId) throws Exception {
    return new SlaveBatchIterator(snapshotId);
  }
  
  /**
   * Iterates over the transactions in the transaction log returning 
   * SlaveBatch records.  
   */
  public class SlaveBatchIterator {
    RecordIterator recordIterator;

    public SlaveBatchIterator(Long snapshotId) throws IOException {
      recordIterator = logFileManager.getRecordIterator(snapshotId);
    }

    public boolean hasNext() throws IOException {
      return recordIterator.hasNext();
    }
    
    /**
     * Returns the next SlaveBatch
     * @param loadDocuments Whether to load and deserialize the documents
     * @param loadOther Whether to load and deserialize other
     * @return
     * @throws Exception
     */
    public SlaveBatch next(boolean loadDocuments, boolean loadOther) throws Exception {
      Record record = recordIterator.next();
      RecordHeader recordHeader = record.getRecordHeader();
      Documents documents = null;
      RAMDirectory ramDirectory = null;
      Deletes deletes = null;
      if (loadDocuments) {
        StreamData docData = record.getStreamRecord().getDocuments();
        if (docData != null) {
          InputStream docInput = docData.getInputStream();
          Object object = deserialize(recordHeader.docType, docInput);
          if (object instanceof RAMDirectory) {
            ramDirectory = (RAMDirectory) object;
          } else {
            documents = (Documents) object;
          }
        }
      }
      if (loadOther) {
        StreamData otherData = record.getStreamRecord().getOther();
        if (otherData != null) {
          InputStream otherInput = otherData.getInputStream();
          deletes = (Deletes) SerializationUtils.deserialize(otherInput);
        }
      }
      if (ramDirectory != null) {
        return new SlaveBatch(record.getId(), ramDirectory, deletes);
      } else {
        return new SlaveBatch(record.getId(), documents, deletes);
      }
    }
    
    /**
     * Closes the underlying RecordIterator
     * @throws IOException
     */
    public void close() throws IOException {
      recordIterator.close();
    }
  }
}
