/*
 * Copyright 2012 The Netty Project
 *
 * The Netty Project licenses this file to you under the Apache License,
 * version 2.0 (the "License"); you may not use this file except in compliance
 * with the License. You may obtain a copy of the License at:
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 */

package io.netty.buffer;

import java.nio.ByteBuffer;
import java.util.ArrayDeque;
import java.util.Deque;

/**
 * Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk
 *
 * Notation: The following terms are important to understand the code > page  - a page is the
 * smallest unit of memory chunk that can be allocated > chunk - a chunk is a collection of pages >
 * in this code chunkSize = 2^{maxOrder} * pageSize
 *
 * To begin we allocate a byte array of size = chunkSize Whenever a ByteBuf of given size needs to
 * be created we search for the first position in the byte array that has enough empty space to
 * accommodate the requested size and return a (long) handle that encodes this offset information,
 * (this memory segment is then marked as reserved so it is always used by exactly one ByteBuf and
 * no more)
 *
 * For simplicity all sizes are normalized according to PoolArena#normalizeCapacity method This
 * ensures that when we request for memory segments of size >= pageSize the normalizedCapacity
 * equals the next nearest power of 2
 *
 * To search for the first offset in chunk that has at least requested size available we construct a
 * complete balanced binary tree and store it in an array (just like heaps) - memoryMap
 *
 * The tree looks like this (the size of each node being mentioned in the parenthesis)
 *
 * depth=0        1 node (chunkSize) depth=1        2 nodes (chunkSize/2) .. .. depth=d        2^d
 * nodes (chunkSize/2^d) .. depth=maxOrder 2^maxOrder nodes (chunkSize/2^{maxOrder} = pageSize)
 *
 * depth=maxOrder is the last level and the leafs consist of pages
 *
 * With this tree available searching in chunkArray translates like this: To allocate a memory
 * segment of size chunkSize/2^k we search for the first node (from left) at height k which is
 * unused
 *
 * Algorithm: ---------- Encode the tree in memoryMap with the notation memoryMap[id] = x => in the
 * subtree rooted at id, the first node that is free to be allocated is at depth x (counted from
 * depth=0) i.e., at depths [depth_of_id, x), there is no node that is free
 *
 * As we allocate & free nodes, we update values stored in memoryMap so that the property is
 * maintained
 *
 * Initialization - In the beginning we construct the memoryMap array by storing the depth of a node
 * at each node i.e., memoryMap[id] = depth_of_id
 *
 * Observations: ------------- 1) memoryMap[id] = depth_of_id  => it is free / unallocated 2)
 * memoryMap[id] > depth_of_id  => at least one of its child nodes is allocated, so we cannot
 * allocate it, but some of its children can still be allocated based on their availability 3)
 * memoryMap[id] = maxOrder + 1 => the node is fully allocated & thus none of its children can be
 * allocated, it is thus marked as unusable
 *
 * Algorithm: [allocateNode(d) => we want to find the first node (from left) at height h that can be
 * allocated] ---------- 1) start at root (i.e., depth = 0 or id = 1) 2) if memoryMap[1] > d =>
 * cannot be allocated from this chunk 3) if left node value <= h; we can allocate from left subtree
 * so move to left and repeat until found 4) else try in right subtree
 *
 * Algorithm: [allocateRun(size)] ---------- 1) Compute d = log_2(chunkSize/size) 2) Return
 * allocateNode(d)
 *
 * Algorithm: [allocateSubpage(size)] ---------- 1) use allocateNode(maxOrder) to find an empty
 * (i.e., unused) leaf (i.e., page) 2) use this handle to construct the PoolSubpage object or if it
 * already exists just call init(normCapacity) note that this PoolSubpage object is added to
 * subpagesPool in the PoolArena when we init() it
 *
 * Note: ----- In the implementation for improving cache coherence, we store 2 pieces of information
 * depth_of_id and x as two byte values in memoryMap and depthMap respectively
 *
 * memoryMap[id]= depth_of_id  is defined above depthMap[id]= x  indicates that the first node which
 * is free to be allocated is at depth x (from root)
 */
final class PoolChunk<T> implements PoolChunkMetric {

  private static final int INTEGER_SIZE_MINUS_ONE = Integer.SIZE - 1;

  final PoolArena<T> arena;
  final T memory;
  final boolean unpooled;
  final int offset;
  private final byte[] memoryMap;
  private final byte[] depthMap;
  private final PoolSubpage<T>[] subpages;
  /**
   * Used to determine if the requested capacity is equal to or greater than pageSize.
   */
  private final int subpageOverflowMask;
  private final int pageSize;
  private final int pageShifts;
  private final int maxOrder;
  private final int chunkSize;
  private final int log2ChunkSize;
  private final int maxSubpageAllocs;
  /**
   * Used to mark memory as unusable
   */
  private final byte unusable;

  // Use as cache for ByteBuffer created from the memory. These are just duplicates and so are only a container
  // around the memory itself. These are often needed for operations within the Pooled*ByteBuf and so
  // may produce extra GC, which can be greatly reduced by caching the duplicates.
  //
  // This may be null if the PoolChunk is unpooled as pooling the ByteBuffer instances does not make any sense here.
  private final Deque<ByteBuffer> cachedNioBuffers;

  private int freeBytes;

  PoolChunkList<T> parent;
  PoolChunk<T> prev;
  PoolChunk<T> next;

  // TODO: Test if adding padding helps under contention
  //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;

  PoolChunk(PoolArena<T> arena, T memory, int pageSize, int maxOrder, int pageShifts, int chunkSize,
      int offset) {
    unpooled = false;
    this.arena = arena;
    this.memory = memory;
    this.pageSize = pageSize; // 8KB
    this.pageShifts = pageShifts; // 13
    this.maxOrder = maxOrder; // 11
    this.chunkSize = chunkSize; // 16M
    this.offset = offset; // 0
    unusable = (byte) (maxOrder + 1); // 12 --> 树的叶节点深度为11，这里将12作为标记其为不可用状态的一个标记
    log2ChunkSize = log2(chunkSize);  // 24 --> 2^24=16M
    subpageOverflowMask = ~(pageSize - 1);  // -8192  用于判断目标内存是否小于8KB
    freeBytes = chunkSize;  // 当前空闲内存，初始时为16M

    assert maxOrder < 30 : "maxOrder should be < 30, but is: " + maxOrder;
    maxSubpageAllocs = 1 << maxOrder; // 2048 树的总页结点个数，每个最小的节点大小为8KB

    // Generate the memory map.
    // 这里的memoryMap存储了二叉树中每个节点是否已经被分配的情况，其实本质上，其存储的是每个节点当前的容量（这里的
    // 容量是通过0~12的数值来标识的），如果某个节点已经被分配了，那么其就会被标记为unusable=12，也就是不可用状态
    memoryMap = new byte[maxSubpageAllocs << 1];
    // 用于记录每个节点当前深度的数组，比如，树的第一层是0，第二层的两个数是1，第三层的四个数是2等等
    // 该数组是不会发生变化的，使用该数组的原因在于通过下标可以快速获取到该位置的内存所对应的树的深度
    depthMap = new byte[memoryMap.length];
    int memoryMapIndex = 1;
    for (int d = 0; d <= maxOrder; ++d) { // move down the tree one level at a time
      int depth = 1 << d;
      for (int p = 0; p < depth; ++p) {
        // in each level traverse left to right and set value to the depth of subtree
        memoryMap[memoryMapIndex] = (byte) d;
        depthMap[memoryMapIndex] = (byte) d;
        memoryMapIndex++;
      }
    }

    // 这里每个subpages都对应了二叉树的一个叶节点，其实就是其对应的memoryMap的索引下标是从2048开始的，
    // 父节点是不对应PoolSubpage的
    subpages = newSubpageArray(maxSubpageAllocs);
    cachedNioBuffers = new ArrayDeque<ByteBuffer>(8);
  }

  /**
   * Creates a special chunk that is not pooled.
   */
  PoolChunk(PoolArena<T> arena, T memory, int size, int offset) {
    unpooled = true;
    this.arena = arena;
    this.memory = memory;
    this.offset = offset;
    memoryMap = null;
    depthMap = null;
    subpages = null;
    subpageOverflowMask = 0;
    pageSize = 0;
    pageShifts = 0;
    maxOrder = 0;
    unusable = (byte) (maxOrder + 1);
    chunkSize = size;
    log2ChunkSize = log2(chunkSize);
    maxSubpageAllocs = 0;
    cachedNioBuffers = null;
  }

  @SuppressWarnings("unchecked")
  private PoolSubpage<T>[] newSubpageArray(int size) {
    return new PoolSubpage[size];
  }

  @Override
  public int usage() {
    final int freeBytes;
    synchronized (arena) {
      freeBytes = this.freeBytes;
    }
    return usage(freeBytes);
  }

  /**
   * 这里需要注意的是，这种算法会导致实际使用量还没达到目标量的时候就已经将当前Chunk移动到下一个ChunkList中了。
   * 主要原因在于(int) (freeBytes * 100L / chunkSize)最前面对计算结果进行了截断。由于freeBytes最开始肯定是最大的，
   * 也就是说该表达式一开始计算结果为100，但随着内存的使用，其会慢慢减少，以qInit为例，其会在内存使用量达到25%时
   * 将Chunk移动到下一个ChunkList，那么这里当freeBytes * 100L / chunkSize计算结果为75.999%的时候，由于进行了截断，
   * 计算结果就为75%，最后通过100-75，那么本方法的结果也会为25%，从而将其移动到下一个ChunkList。因而，实际上当
   * 使用率达到24%的时候Chunk就会移动到下一个ChunkList了。
   */
  private int usage(int freeBytes) {
    if (freeBytes == 0) {
      return 100;
    }

    int freePercentage = (int) (freeBytes * 100L / chunkSize);
    if (freePercentage == 0) {
      return 99;
    }
    return 100 - freePercentage;
  }

  boolean allocate(PooledByteBuf<T> buf, int reqCapacity, int normCapacity) {
    final long handle;
    // 这里subpageOverflowMask=-8192，通过判断的结果可以看出目标容量是否小于8KB
    if ((normCapacity & subpageOverflowMask) != 0) { // >= pageSize
      handle = allocateRun(normCapacity);
    } else {
      handle = allocateSubpage(normCapacity);
    }

    if (handle < 0) {
      return false;
    }
    ByteBuffer nioBuffer = cachedNioBuffers != null ? cachedNioBuffers.pollLast() : null;
    initBuf(buf, nioBuffer, handle, reqCapacity);
    return true;
  }

  /**
   * Update method used by allocate This is triggered only when a successor is allocated and all its
   * predecessors need to update their state The minimal depth at which subtree rooted at id has
   * some free space
   *
   * @param id id
   */
  private void updateParentsAlloc(int id) {
    while (id > 1) {
      int parentId = id >>> 1;
      byte val1 = value(id);
      byte val2 = value(id ^ 1);
      byte val = val1 < val2 ? val1 : val2;
      setValue(parentId, val);
      id = parentId;
    }
  }

  /**
   * Update method used by free This needs to handle the special case when both children are
   * completely free in which case parent be directly allocated on request of size = child-size * 2
   *
   * @param id id
   */
  private void updateParentsFree(int id) {
    int logChild = depth(id) + 1;
    while (id > 1) {
      int parentId = id >>> 1;
      byte val1 = value(id);
      byte val2 = value(id ^ 1);
      logChild -= 1; // in first iteration equals log, subsequently reduce 1 from logChild as we traverse up

      if (val1 == logChild && val2 == logChild) {
        setValue(parentId, (byte) (logChild - 1));
      } else {
        byte val = val1 < val2 ? val1 : val2;
        setValue(parentId, val);
      }

      id = parentId;
    }
  }

  /**
   * Algorithm to allocate an index in memoryMap when we query for a free node at depth d
   *
   * @param d depth
   * @return index in memoryMap
   */
  private int allocateNode(int d) {
    int id = 1;
    int initial = -(1 << d); // has last d bits = 0 and rest all = 1
    byte val = value(id); // 获取memoryMap中下标为id的数据
    if (val > d) { // unusable
      return -1;
    }
    while (val < d || (id & initial) == 0) { // id & initial == 1 << d for all ids at depth d, for < d it is 0
      id <<= 1;
      val = value(id);
      // 这里val > d其实就是表示当前节点的数值比目标数值要大，也就是说当前节点是没法申请到目标容量的内存，
      // 那么就会执行 id ^= 1，其实也就是将id切换到当前节点的兄弟节点，本质上其实就是从二叉树的左子节点开始查找，
      // 如果左子节点无法分配目标大小的内存，那么就到又子节点进行查找
      if (val > d) {
        id ^= 1;
        val = value(id);
      }
    }
    byte value = value(id);
    assert value == d && (id & initial) == 1 << d : String
        .format("val = %d, id & initial = %d, d = %d",
            value, id & initial, d);
    setValue(id, unusable); // mark as unusable
    updateParentsAlloc(id);
    return id;
  }

  /**
   * Allocate a run of pages (>=1)
   *
   * @param normCapacity normalized capacity
   * @return index in memoryMap
   */
  private long allocateRun(int normCapacity) {
    int d = maxOrder - (log2(normCapacity) - pageShifts);
    int id = allocateNode(d);
    if (id < 0) {
      return id;
    }
    freeBytes -= runLength(id);
    return id;
  }

  /**
   * Create / initialize a new PoolSubpage of normCapacity Any PoolSubpage created / initialized
   * here is added to subpage pool in the PoolArena that owns this PoolChunk
   *
   * @param normCapacity normalized capacity
   * @return index in memoryMap
   */
  private long allocateSubpage(int normCapacity) {
    // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
    // This is need as we may add it back and so alter the linked-list structure.
    // 这里其实也是与PoolThreadCache中存储PoolSubpage的方式相同，也是采用分层的方式进行存储的，具体是取目标数组
    // 中哪一个元素的PoolSubpage则是根据目标容量normCapacity来进行的。
    PoolSubpage<T> head = arena.findSubpagePoolHead(normCapacity);
    int d = maxOrder; // subpages are only be allocated from pages i.e., leaves
    synchronized (head) {
      // 这里的allocateNode()方法的主要作用是在二叉树找到第一个层数与指定层数d相同的层的下标索引。
      // 比如这里查找d=11的可使用的内存，那么就会从0号位开始查找，每次都先从左分支查找，如果该分支的数值比当前的要小，
      // 那么继续在左分支查找，如果左分支的数值比当前的要小，那么就从又分支开始查找，直到找到一个与目标数值相同的节点。
      // 那么这个节点就是目标节点。在找到目标节点之后就会将该节点标记为unusable=12，并且将其与其兄弟节点进行比较，将两者
      // 中较小的一个记录下来重新赋值给父节点。最后，如果找不到目标节点，则返回-1。
      int id = allocateNode(d);
      if (id < 0) {
        return id;
      }

      final PoolSubpage<T>[] subpages = this.subpages;
      final int pageSize = this.pageSize;

      freeBytes -= pageSize;

      // 计算当前id对应的PoolSubpage数组中的位置
      int subpageIdx = subpageIdx(id);
      PoolSubpage<T> subpage = subpages[subpageIdx];
      if (subpage == null) {
        // 这里runOffset()方法会返回该id在PoolChunk中维护的字节数组中的偏移量位置，
        // normCapacity则记录了当前将要申请的内存大小；
        // pageSize记录了每个页的大小，默认为8KB
        subpage = new PoolSubpage<T>(head, this, id, runOffset(id), pageSize, normCapacity);
        subpages[subpageIdx] = subpage;
      } else {
        subpage.init(head, normCapacity);
      }
      return subpage.allocate();
    }
  }

  /**
   * Free a subpage or a run of pages When a subpage is freed from PoolSubpage, it might be added
   * back to subpage pool of the owning PoolArena If the subpage pool in PoolArena has at least one
   * other PoolSubpage of given elemSize, we can completely free the owning Page so it is available
   * for subsequent allocations
   *
   * @param handle handle to free
   */
  void free(long handle, ByteBuffer nioBuffer) {
    int memoryMapIdx = memoryMapIdx(handle);
    int bitmapIdx = bitmapIdx(handle);

    if (bitmapIdx != 0) { // free a subpage
      PoolSubpage<T> subpage = subpages[subpageIdx(memoryMapIdx)];
      assert subpage != null && subpage.doNotDestroy;

      // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
      // This is need as we may add it back and so alter the linked-list structure.
      PoolSubpage<T> head = arena.findSubpagePoolHead(subpage.elemSize);
      synchronized (head) {
        if (subpage.free(head, bitmapIdx & 0x3FFFFFFF)) {
          return;
        }
      }
    }
    freeBytes += runLength(memoryMapIdx);
    setValue(memoryMapIdx, depth(memoryMapIdx));
    updateParentsFree(memoryMapIdx);

    if (nioBuffer != null && cachedNioBuffers != null &&
        cachedNioBuffers.size() < PooledByteBufAllocator.DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK) {
      cachedNioBuffers.offer(nioBuffer);
    }
  }

  void initBuf(PooledByteBuf<T> buf, ByteBuffer nioBuffer, long handle, int reqCapacity) {
    int memoryMapIdx = memoryMapIdx(handle);
    int bitmapIdx = bitmapIdx(handle);
    if (bitmapIdx == 0) {
      byte val = value(memoryMapIdx);
      assert val == unusable : String.valueOf(val);
      buf.init(this, nioBuffer, handle, runOffset(memoryMapIdx) + offset,
          reqCapacity, runLength(memoryMapIdx), arena.parent.threadCache());
    } else {
      initBufWithSubpage(buf, nioBuffer, handle, bitmapIdx, reqCapacity);
    }
  }

  void initBufWithSubpage(PooledByteBuf<T> buf, ByteBuffer nioBuffer, long handle,
      int reqCapacity) {
    initBufWithSubpage(buf, nioBuffer, handle, bitmapIdx(handle), reqCapacity);
  }

  private void initBufWithSubpage(PooledByteBuf<T> buf, ByteBuffer nioBuffer,
      long handle, int bitmapIdx, int reqCapacity) {
    assert bitmapIdx != 0;

    int memoryMapIdx = memoryMapIdx(handle);

    PoolSubpage<T> subpage = subpages[subpageIdx(memoryMapIdx)];
    assert subpage.doNotDestroy;
    assert reqCapacity <= subpage.elemSize;

    buf.init(
        this, nioBuffer, handle,
        runOffset(memoryMapIdx) + (bitmapIdx & 0x3FFFFFFF) * subpage.elemSize + offset,
        reqCapacity, subpage.elemSize, arena.parent.threadCache());
  }

  private byte value(int id) {
    return memoryMap[id];
  }

  private void setValue(int id, byte val) {
    memoryMap[id] = val;
  }

  private byte depth(int id) {
    return depthMap[id];
  }

  private static int log2(int val) {
    // compute the (0-based, with lsb = 0) position of highest set bit i.e, log2
    return INTEGER_SIZE_MINUS_ONE - Integer.numberOfLeadingZeros(val);
  }

  private int runLength(int id) {
    // represents the size in #bytes supported by node 'id' in the tree
    // 这里runLength()方法的作用是计算每一层的每个节点所占用的字节数，比如
    // 对于深度为11的叶节点，其每个节点占用的字节数是8192；
    // 对于深度为12的叶节点，其每个节点占用的字节数是16384；
    // 等等
    return 1 << log2ChunkSize - depth(id);
  }

  private int runOffset(int id) {
    // represents the 0-based offset in #bytes from start of the byte-array chunk
    // 这里runOffset()方法的作用是获取当前id对应的索引下标memoryMapIdx在PoolChunk所维护的整个字节数组中的
    // 位置偏移量。比如
    // id为2048时，其偏移量为0；
    // id为2049时，其偏移量为8192，也即8KB的大小，因为一个叶节点代表的长度为8KB，前面2048已经占用了一个8KB，因而2049偏移量为8KB处
    // id为2050时，其偏移量为16384，也即16KB的大小，因为一个叶节点代表的长度为8KB，前面2048和2049已经占用了两个8KB，因而2050偏移量为16KB处
    // 对于父节点：
    // id为1024时，其偏移量为0；
    // id为1025时，其偏移量为16384，也即16KB的大小，因为一个深度为10的父节点代表的长度为16KB，前面1024已经占用了一个16KB，因而1025偏移量为16KB处
    int shift = id ^ 1 << depth(id);
    return shift * runLength(id);
  }

  // 这里由于PoolSubpage只对应二叉树的叶节点，而memoryMapIndex则是其在memoryMap中的位置下标，总长度有4096个，
  // 因而这里需要将memoryMapIndex所在位置对应的PoolSubpage找到，这里就是将memoryMap中的下标对应到PoolSubpage[]数组
  // 下标的计算方式，比如，memoryMap[2048] -> PoolSubpage[0]  因而这里传入2048时计算会得到0
  private int subpageIdx(int memoryMapIdx) {
    return memoryMapIdx ^ maxSubpageAllocs; // remove highest set bit, to get offset
  }

  private static int memoryMapIdx(long handle) {
    return (int) handle;
  }

  private static int bitmapIdx(long handle) {
    return (int) (handle >>> Integer.SIZE);
  }

  @Override
  public int chunkSize() {
    return chunkSize;
  }

  @Override
  public int freeBytes() {
    synchronized (arena) {
      return freeBytes;
    }
  }

  @Override
  public String toString() {
    final int freeBytes;
    synchronized (arena) {
      freeBytes = this.freeBytes;
    }

    return new StringBuilder()
        .append("Chunk(")
        .append(Integer.toHexString(System.identityHashCode(this)))
        .append(": ")
        .append(usage(freeBytes))
        .append("%, ")
        .append(chunkSize - freeBytes)
        .append('/')
        .append(chunkSize)
        .append(')')
        .toString();
  }

  void destroy() {
    arena.destroyChunk(this);
  }
}
