/**
 * Copyright (c) 2011, www.quartzsource.org
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.quartzsource.core

import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.{ Map => MMap }

/**
 * Storage back-end for Quartz.
 *
 * This provides efficient delta storage with O(1) retrieve and append
 * and O(changes) merge between branches.
 */
object Revlog {
  //revlog header flags
  val REVLOGV0 = 0
  val REVLOGNG = 1
  val REVLOGNGINLINEDATA = (1 << 16)
  val REVLOGGENERALDELTA = (1 << 17)
  val REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
  val REVLOG_DEFAULT_FORMAT = REVLOGNG
  val REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
  val REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA

  //revlog index flags
  val REVIDX_KNOWN_FLAGS = 0

  //max size of revlog with inline data
  private val maxInline = 131072
  private val chunkSize = 1048576

  def getOffset(q: Long): Int = {
    (q >> 16).toInt
  }

  def getType(q: Long): Int = {
    (q & 0xFFFF).toInt
  }

  def offsetType(offset: Int, etype: Int): Long = {
    offset.toLong << 16 | etype
  }

  val nullhash = new Util.Sha1(NodeId.NULLID.id)
  /**
   * generate a hash from the given text and its parent hashes
   *
   * This hash combines both the current file contents and its history
   * in a manner that makes it easy to distinguish nodes with the same
   * content in the revision graph.
   */
  def hash(text: Binary, p1: NodeId, p2: NodeId): Binary = {
    //As of now, if one of the parent node is null, p2 is null
    val s: Util.Sha1 = if (p2 == NodeId.NULLID) {
      val s = nullhash.copy
      s.update(p1.id)
      s
    } else {
      //none of the parent nodes are nullid
      val (first, second) = if (p1 < p2)
        (p1, p2)
      else
        (p2, p1)
      val hash = new Util.Sha1(first.id)
      hash.update(second.id)
      hash
    }
    s.update(text)
    s.digest
  }

  /**
   * generate a possibly-compressed representation of text
   */
  def compress(text: String): (String, Binary) = {
    require(text != null)
    if (text.isEmpty()) return ("", Binary())
    val l = text.size
    if (l > 1000000) {
      //TODO reduce resources for Zip compression for big data
      println("Big data. Size: " + l)
    }
    val bin: Option[Binary] = if (l < 44) None else {
      throw new UnsupportedOperationException("Zip is not yet implemented.")
    }
    if (bin.isEmpty || bin.get.size > l) {
      (if (text.head == '\0') "" else "u", stringToBytes(text))
    } else {
      ("", bin.get)
    }
  }

  /**
   * decompress the given input
   */
  def decompress(bin: Binary): Binary = {
    require(bin != null)
    //TODO here list is better then buffer - only 'head' and 'tail' are used
    bin.headOption match {
      case None => Binary()
      case Some('\0') => bin
      case Some('x') => {
        val decompressor = new java.util.zip.Inflater()
        decompressor.setInput(bin.toArray, 0, bin.size)
        //Create an expandable byte array to hold the decompressed data
        val bos = new java.io.ByteArrayOutputStream(bin.size * 10)

        // Decompress the data
        val buf = new Array[Byte](1024)
        while (!decompressor.finished) {
          val count = decompressor.inflate(buf)
          if (count == 0) {
            throw new QuartzException(
              "needsInput() or needsDictionary() should be called");
          }
          bos.write(buf, 0, count)
        }
        bos.close()
        // Get the decompressed data
        val decompressedData = bos.toByteArray()
        Binary(decompressedData)
      }
      case Some('u') => bin.tail
      case Some(unknown) => throw new RevlogException(translate("unknown compression type %r", unknown))
    }
  }
}

class RevlogIO {
  def size = IndexFormatNG_SIZE

  def parseIndex(data: Binary, inline: Boolean): (List[IndexFormatNG], (Long, Binary)) = {
    val index = ArrayBuffer[IndexFormatNG]()
    var off = 0

    val l = data.size - IndexFormatNG_SIZE
    val cache: (Long, Binary) = if (inline) {
      var more = true
      while ((off <= l) && more) {
        val e = unpack(data.slice(off, off + IndexFormatNG_SIZE))
        index.append(e)
        if (e._2 < 0) {
          more = false
        } else {
          off += e._2 + IndexFormatNG_SIZE
        }
      }
      (0L, data)
    } else {
      while (off <= l) {
        val e = unpack(data.slice(off, off + IndexFormatNG_SIZE))
        index.append(e)
        off += IndexFormatNG_SIZE
      }
      (0L, Binary())
    }
    if (off != data.size) {
      throw new ValueException("corrupt index file")
    }

    if (!index.isEmpty) {
      val e = index.head
      val etype = Revlog.getType(e._1)
      index(0) = e.copy(_1 = Revlog.offsetType(0, etype))
    }

    // add the magic null revision at -1
    index.append((0, 0, 0, -1, -1, -1, -1, NodeId.NULLID))

    (index.toList, cache)
  }
  /**
   * <pre>
   * # index ng:
   * # 6 bytes offset
   * # 2 bytes flags
   * # 4 bytes compressed length
   * # 4 bytes uncompressed length
   * # 4 bytes: base rev
   * # 4 bytes link rev
   * # 4 bytes parent 1 rev
   * # 4 bytes parent 2 rev
   * # 32 bytes: nodeid (in fact only 20 bytes, 12 bytes are reserved)
   * indexformatng = &gt;Qiiiiii20s12x
   * </pre>
   */
  def unpack(data: Binary): IndexFormatNG = {
    require(data.size == IndexFormatNG_SIZE)
    (readLong(data), readInt(data, 8), readInt(data, 12), readInt(data, 16),
      readInt(data, 20), readInt(data, 24), readInt(data, 28), new NodeId(data.slice(32, 52)))
  }

  def pack(entry: IndexFormatNG): Binary = {
    //TODO it is more efficient to re-write the method without DataOutputStream
    val data = new java.io.ByteArrayOutputStream(IndexFormatNG_SIZE)
    val stream = new java.io.DataOutputStream(data)
    stream.writeLong(entry._1)
    stream.writeInt(entry._2)
    stream.writeInt(entry._3)
    stream.writeInt(entry._4)
    stream.writeInt(entry._5)
    stream.writeInt(entry._6)
    stream.writeInt(entry._7)
    stream.write(entry._8.id.toArray)
    for (i <- 0 until 12) stream.write(0) //12 bytes are reserved
    stream.close
    Binary(data.toByteArray)
  }

  private def readInt(data: Binary, start: Int): Int = {
    Parsers.readInt(data, start)
  }

  private def readLong(data: Binary): Long = {
    Parsers.readLong(data)
  }

  def packentry(entry: IndexFormatNG, version: Int, rev: Int): Binary = {
    val p = pack(entry)
    if (rev == 0) {
      // As the offset of the first data chunk is always zero, the first 4
      // bytes (part of the offset) are used to indicate revlog version
      // number and flags.
      import java.io._
      val data = new ByteArrayOutputStream(4)
      val stream = new DataOutputStream(data)
      stream.writeInt(version)
      stream.close()
      data.toByteArray() ++ p.drop(4)
    } else {
      p
    }
  }
}

abstract class SupportedFormats
case object Revlogv1 extends SupportedFormats
case object GeneralDelta extends SupportedFormats

/**
 * the underlying revision storage object
 *
 * A revlog consists of two parts, an index and the revision data.
 *
 * The index is a file with a fixed record size containing
 * information on each revision, including its nodeid (hash), the
 * nodeids of its parents, the position and offset of its data within
 * the data file, and the revision it's based on. Finally, each entry
 * contains a linkrev entry that can serve as a pointer to external
 * data.
 *
 * The revision data itself is a linear collection of data chunks.
 * Each chunk represents a revision and is usually represented as a
 * delta against the previous chunk. To bound lookup time, runs of
 * deltas are limited to about 2 times the length of the original
 * version data. This makes retrieval of a version proportional to
 * its size, or O(1) relative to the number of revisions.
 *
 * Both pieces of the revlog are written to in an append-only
 * fashion, which means we never need to rewrite a file to insert or
 * remove data, and can use some simple techniques to avoid the need
 * for locking while reading.
 *
 */
class Revlog(val opener: Opener, val indexFile: String) extends Iterable[Int] {
  require(indexFile.endsWith(".i"))
  val dataFile = indexFile.dropRight(2) + ".d"
  val nodeCache: MMap[NodeId, Int] = MMap(NodeId.NULLID -> NodeId.NULLREV)
  var nodePos: Option[Int] = None
  var cache: Option[(NodeId, Int, Binary)] = None

  var v = Revlog.REVLOG_DEFAULT_VERSION
  if (opener.options(Revlogv1) && opener.options(GeneralDelta))
    v |= Revlog.REVLOGGENERALDELTA
  else
    v = 0

  var initEmpty = true
  val i = try {
    val f = opener.openToRead(indexFile)
    val input: Binary = Binary.read(f)
    f.close
    if (input.size > 0) {
      v = Parsers.readInt(input, 0)
      initEmpty = false
    }
    input
  } catch {
    case e: java.io.FileNotFoundException => Binary() //index does not exist
  }
  var version = v
  var inline = (v & Revlog.REVLOGNGINLINEDATA) != 0
  var generalDelta = v & Revlog.REVLOGGENERALDELTA
  val flags = v & ~0xFFFF
  val fmt = v & 0xFFFF
  if (fmt == Revlog.REVLOGV0 && flags != 0) {
    throw new RevlogException(translate("index %s unknown flags %#04x for format v0", indexFile, flags >> 16))
  } else if ((fmt == Revlog.REVLOGNG) && (flags & ~Revlog.REVLOGNG_FLAGS) != 0) {
    throw new RevlogException(translate("index %s unknown flags %#04x for revlogng", indexFile, flags >> 16))
  } else if (fmt > Revlog.REVLOGNG) {
    throw new RevlogException(translate("index %s unknown format %d", indexFile, fmt))
  }
  val io = new RevlogIO()
  if (version == Revlog.REVLOGV0) {
    //TODO [ID=100] Old revlog format is not supported
    throw new RevlogException(translate("Old revlog format is not supported"))
  }
  val (index, ccache) = try {
    val d = io.parseIndex(i, inline)
    //TODO [ID=100] Old revlog format is not supported
    d
  } catch {
    case _ => throw new RevlogException(translate("index %s is corrupted", indexFile))
  }
  var chunkCache = ccache
  //end constructor

  def tip = node(index.size - 2)

  override def size = index.size - 1

  def iterator = {
    val list = 0 to size
    list.iterator
  }

  lazy val nodemap: MMap[NodeId, Int] = {
    rev(node(0))
    nodeCache
  }

  def rev(node: NodeId): Int = {
    nodeCache.get(node) match {
      case Some(value) => value
      case None => {
        val i = index
        val p = nodePos match {
          case None => index.size - 2
          case Some(v) => v
        }
        for (r <- p until -1 by -1) {
          val v = index(r)._8
          nodeCache(v) = r
          if (v == node) {
            nodePos = Some(r - 1)
            return r
          }
        }
        throw LookupException(node, indexFile, translate("no node"))
      }
    }
  }

  def node(rev: Int) = index(rev)._8

  def linkRev(rev: Int) = index(rev)._4

  def parents(node: NodeId): (NodeId, NodeId) = {
    val d: IndexFormatNG = index(rev(node))
    (index(d._6)._8, index(d._7)._8) //map revisions to nodes inline
  }

  def parentRevs(rev: Int): (Int, Int) = {
    val record = index(rev)
    (record._6, record._7)
  }

  def start(rev: Int): Int = {
    val offset = index(rev)._1 >> 16
    offset.toInt
  }

  def end(rev: Int): Int = {
    start(rev) + length(rev)
  }

  def length(rev: Int): Int = {
    index(rev)._2
  }

  def chainBase(revision: Int): Int = {
    var base = index(revision)._4
    var rev = revision
    while (base != rev) {
      rev = base
      base = index(rev)._4
    }
    base
  }

  def flags(rev: Int): Int = {
    val flags = index(rev)._1 & 0xFFFF
    flags.toInt
  }

  /**
   * return the length of the uncompressed text for a given revision
   */
  def rawSize(rev: Int): Int = {
    val l = index(rev)._3
    if (l >= 0) l else {
      val t = revision(node(rev))
      t.size
    }
  }

  def size(rev: Int) = rawSize(rev)

  private def addChunk(offset: Long, data: Binary) {
    val (o, d) = chunkCache
    //try to add to existing cache
    chunkCache = if ((o + d.size == offset) && (d.size + data.size < Revlog.chunkSize)) {
      (o, d ++ data)
    } else {
      (offset, data)
    }
  }

  private def loadChunk(offset: Long, length: Int): Binary = {
    val df = if (inline) opener.openToRead(indexFile) else opener.openToRead(dataFile)
    val readAhead = math.max(65536, length)
    df.skip(readAhead)
    val d = new Array[Byte](readAhead)
    val read = df.read(d)
    addChunk(offset, Binary(d.take(read)))
    Binary(if (read > length) d.take(length) else d)
  }

  private def getChunk(offset: Long, length: Int): Binary = {
    val (o, d) = chunkCache
    val l = d.size

    //is it in the cache?
    val cacheStart = offset - o
    val cacheEnd = cacheStart + length
    if (cacheStart >= 0 && cacheEnd <= l)
      if (cacheStart == 0 && cacheEnd == l)
        return d //avoid a copy
      else
        return d.slice(cacheStart.toInt, cacheEnd.toInt)
    loadChunk(offset, length)
  }

  private def chunkRaw(startRev: Int, endRev: Int): Binary = {
    var startv = start(startRev)
    val length = end(endRev) - startv
    if (inline) startv += (startRev + 1) * io.size
    getChunk(startv, length)
  }

  private def chunk(rev: Int): Binary = {
    Revlog.decompress(chunkRaw(rev, rev))
  }

  private def chunkBase(rev: Int): Binary = {
    chunk(rev)
  }

  private def chunkClear() {
    chunkCache = (0, Binary())
  }

  /**
   * return an uncompressed revision of a given node
   */
  def revision(node: NodeId): Binary = {
    if (node == NodeId.NULLID) return Binary()
    val cachedRev: Option[Int] = cache match {
      case None => None
      case Some(cache) if cache._1 == node => return cache._3
      case Some(cache) => Some(cache._2)
    }
    //look up what we need to read
    var text: Option[Binary] = None
    val revLocal = rev(node)
    //check rev flags
    val value = flags(revLocal) & ~Revlog.REVIDX_KNOWN_FLAGS
    if (value != 0)
      throw new RevlogException(translate("incompatible revision flag %x", value))
    //build delta chain
    var chain: List[Int] = Nil
    var iterRev = revLocal
    var e = index(iterRev)
    while (iterRev != e._4 && cachedRev.isDefined && iterRev != cachedRev.get) {
      chain = iterRev :: chain
      iterRev = if (generalDelta != 0) e._4 else iterRev - 1
      e = index(iterRev)
    }
    val base = iterRev
    if (cachedRev.isDefined && iterRev == cachedRev.get) {
      //cache hit
      text = Some(cache.get._3)
    }
    //drop cache to save memory
    cache = None

    chunkRaw(base, revLocal)
    if (text.isEmpty) text = Some(chunkBase(base))

    val bins = chain.map(r => chunk(r))
    text = Some(Qpatch.patches(text.get, bins))

    checkHash(text.get, node, revLocal)

    cache = Some(node, revLocal, text.get)
    text.get
  }

  def checkHash(text: Binary, node: NodeId, rev: Int) = {
    val (p1, p2) = parents(node)
    if (node != Revlog.hash(text, p1, p2))
      throw new RevlogException(translate("integrity check failed on %s:%d", indexFile, rev))
  }
}

