package org.apache.hadoop.hdfs;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.EnumSet;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.hdfs.DFSClient.Conf;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.util.DataChecksum;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;

/**
 * BlockReaderLocal类实现了本地短路读取功能，也就是当客户端与Datanode在同一台机器上时，客户端可以绕过Datanode进程直接从本地磁盘读取数据。
 *
 * 当客户端向Datanode请求数据时，Datanode会打开块文件以及该块文件的元数据文件，将这两个文件的文件描述符通过domainSocket传给客户端，
 * 客户端拿到文件描述符后构造输入流，之后通过输入流直接读取磁盘上的块文件。
 *
 * 采用这种方式，数据绕过了Datanode进程的转发，提供了更好的读取性能。
 * 由于文件描述符是只读的，所以客户端不能修改收到的文件；
 * 同时由于客户端自身无法访问块文件所在的目录，所以它也就不能访问数据目录中的其他文件了，从而提高了数据读取的安全性。
 *
 * 创建一个本地短路读取器有如下要求。
 * - 短路读相关配置项已经打开。
 * - DFSClient通过DataTransferProtocol.requestShortCircuitFds()方法获取了数据块文件以及元数据文件的文件描述符。
 * - DFSClient读取了文件描述符。
 *
 * 从入口程序read(ByteBuffer buf)方法入手
 */
class BlockReaderLocal implements BlockReader {
  static final Log LOG = LogFactory.getLog(BlockReaderLocal.class);

  private static final DirectBufferPool bufferPool = new DirectBufferPool();

  public static class Builder {
    private final int bufferSize;
    private boolean verifyChecksum;
    private int maxReadahead;
    private String filename;
    private ShortCircuitReplica replica;
    private long dataPos;
    private ExtendedBlock block;
    private StorageType storageType;

    public Builder(Conf conf) {
      this.maxReadahead = Integer.MAX_VALUE;
      this.verifyChecksum = !conf.skipShortCircuitChecksums;
      this.bufferSize = conf.shortCircuitBufferSize;
    }

    public Builder setVerifyChecksum(boolean verifyChecksum) {
      this.verifyChecksum = verifyChecksum;
      return this;
    }

    public Builder setCachingStrategy(CachingStrategy cachingStrategy) {
      long readahead = cachingStrategy.getReadahead() != null ?
          cachingStrategy.getReadahead() :
              DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT;
      this.maxReadahead = (int)Math.min(Integer.MAX_VALUE, readahead);
      return this;
    }

    public Builder setFilename(String filename) {
      this.filename = filename;
      return this;
    }

    public Builder setShortCircuitReplica(ShortCircuitReplica replica) {
      this.replica = replica;
      return this;
    }

    public Builder setStartOffset(long startOffset) {
      this.dataPos = Math.max(0, startOffset);
      return this;
    }

    public Builder setBlock(ExtendedBlock block) {
      this.block = block;
      return this;
    }

    public Builder setStorageType(StorageType storageType) {
      this.storageType = storageType;
      return this;
    }

    public BlockReaderLocal build() {
      Preconditions.checkNotNull(replica);
      return new BlockReaderLocal(this);
    }
  }

  private boolean closed = false;

  /**
   * Pair of streams for this block.
   */
  private final ShortCircuitReplica replica;

  /**
   * The data FileChannel.
   */
  private final FileChannel dataIn;

  /**
   * The next place we'll read from in the block data FileChannel.
   *
   * If data is buffered in dataBuf, this offset will be larger than the
   * offset of the next byte which a read() operation will give us.
   */
  private long dataPos;

  /**
   * The Checksum FileChannel.
   */
  private final FileChannel checksumIn;
  
  /**
   * Checksum type and size.
   */
  private final DataChecksum checksum;

  /**
   * If false, we will always skip the checksum.
   */
  private final boolean verifyChecksum;

  /**
   * Name of the block, for logging purposes.
   */
  private final String filename;
  
  /**
   * Block ID and Block Pool ID.
   */
  private final ExtendedBlock block;
  
  /**
   * Cache of Checksum#bytesPerChecksum.
   */
  private final int bytesPerChecksum;

  /**
   * Cache of Checksum#checksumSize.
   */
  private final int checksumSize;

  /**
   * Maximum number of chunks to allocate.
   *
   * This is used to allocate dataBuf and checksumBuf, in the event that
   * we need them.
   */
  private final int maxAllocatedChunks;

  /**
   * True if zero readahead was requested.
   */
  private final boolean zeroReadaheadRequested;

  /**
   * Maximum amount of readahead we'll do.  This will always be at least the,
   * size of a single chunk, even if {@link #zeroReadaheadRequested} is true.
   * The reason is because we need to do a certain amount of buffering in order
   * to do checksumming.
   * 
   * This determines how many bytes we'll use out of dataBuf and checksumBuf.
   * Why do we allocate buffers, and then (potentially) only use part of them?
   * The rationale is that allocating a lot of buffers of different sizes would
   * make it very difficult for the DirectBufferPool to re-use buffers. 
   */
  private final int maxReadaheadLength;

  /**
   * Buffers data starting at the current dataPos and extending on
   * for dataBuf.limit().
   *
   * This may be null if we don't need it.
   */
  private ByteBuffer dataBuf;

  /**
   * Buffers checksums starting at the current checksumPos and extending on
   * for checksumBuf.limit().
   *
   * This may be null if we don't need it.
   */
  private ByteBuffer checksumBuf;

  /**
   * StorageType of replica on DataNode.
   */
  private StorageType storageType;

  private BlockReaderLocal(Builder builder) {
    this.replica = builder.replica;
    // dataIn实际就是包裹的是短路读的文件输入流，
    this.dataIn = replica.getDataStream().getChannel();
    this.dataPos = builder.dataPos;
    this.checksumIn = replica.getMetaStream().getChannel();
    BlockMetadataHeader header = builder.replica.getMetaHeader();
    this.checksum = header.getChecksum();
    this.verifyChecksum = builder.verifyChecksum && (this.checksum.getChecksumType().id != DataChecksum.CHECKSUM_NULL);
    this.filename = builder.filename;
    this.block = builder.block;
    this.bytesPerChecksum = checksum.getBytesPerChecksum();
    this.checksumSize = checksum.getChecksumSize();

    this.maxAllocatedChunks = (bytesPerChecksum == 0) ? 0 : ((builder.bufferSize + bytesPerChecksum - 1) / bytesPerChecksum);
    // Calculate the effective maximum readahead.
    // We can't do more readahead than there is space in the buffer.
    int maxReadaheadChunks = (bytesPerChecksum == 0) ? 0 :
        ((Math.min(builder.bufferSize, builder.maxReadahead) + bytesPerChecksum - 1) / bytesPerChecksum);
    if (maxReadaheadChunks == 0) {
      this.zeroReadaheadRequested = true;
      maxReadaheadChunks = 1;
    } else {
      this.zeroReadaheadRequested = false;
    }
    this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
    this.storageType = builder.storageType;
  }

  private synchronized void createDataBufIfNeeded() {
    if (dataBuf == null) {
      dataBuf = bufferPool.getBuffer(maxAllocatedChunks * bytesPerChecksum);
      dataBuf.position(0);
      dataBuf.limit(0);
    }
  }

  private synchronized void freeDataBufIfExists() {
    if (dataBuf != null) {
      // When disposing of a dataBuf, we have to move our stored file index
      // backwards.
      dataPos -= dataBuf.remaining();
      dataBuf.clear();
      bufferPool.returnBuffer(dataBuf);
      dataBuf = null;
    }
  }

  private synchronized void createChecksumBufIfNeeded() {
    if (checksumBuf == null) {
      checksumBuf = bufferPool.getBuffer(maxAllocatedChunks * checksumSize);
      checksumBuf.position(0);
      checksumBuf.limit(0);
    }
  }

  private synchronized void freeChecksumBufIfExists() {
    if (checksumBuf != null) {
      checksumBuf.clear();
      bufferPool.returnBuffer(checksumBuf);
      checksumBuf = null;
    }
  }

  private synchronized int drainDataBuf(ByteBuffer buf) {
    if (dataBuf == null) return -1;
    int oldLimit = dataBuf.limit();
    int nRead = Math.min(dataBuf.remaining(), buf.remaining());
    if (nRead == 0) {
      return (dataBuf.remaining() == 0) ? -1 : 0;
    }
    try {
      dataBuf.limit(dataBuf.position() + nRead);
      buf.put(dataBuf);
    } finally {
      dataBuf.limit(oldLimit);
    }
    return nRead;
  }

  /**
   * Read from the block file into a buffer.
   *
   * This function overwrites checksumBuf.  It will increment dataPos.
   *
   * @param buf   The buffer to read into.  May be dataBuf.
   *              The position and limit of this buffer should be set to multiples of the checksum size.
   * @param canSkipChecksum  True if we can skip checksumming.
   *
   * @return      Total bytes read.  0 on EOF.
   */
  private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum) throws IOException {
    int total = 0;
    long startDataPos = dataPos;
    int startBufPos = buf.position();
    while (buf.hasRemaining()) {
      int nRead = dataIn.read(buf, dataPos);
      if (nRead < 0) {
        break;
      }
      dataPos += nRead;
      total += nRead;
    }
    if (canSkipChecksum) {
      freeChecksumBufIfExists();
      return total;
    }
    if (total > 0) {
      try {
        buf.limit(buf.position());
        buf.position(startBufPos);
        createChecksumBufIfNeeded();
        int checksumsNeeded = (total + bytesPerChecksum - 1) / bytesPerChecksum;
        checksumBuf.clear();
        checksumBuf.limit(checksumsNeeded * checksumSize);
        long checksumPos = BlockMetadataHeader.getHeaderSize() + ((startDataPos / bytesPerChecksum) * checksumSize);
        while (checksumBuf.hasRemaining()) {
          int nRead = checksumIn.read(checksumBuf, checksumPos);
          if (nRead < 0) {
            throw new IOException("Got unexpected checksum file EOF at " +
                checksumPos + ", block file position " + startDataPos + " for " +  "block " + block + " of file " + filename);
          }
          checksumPos += nRead;
        }
        checksumBuf.flip();
  
        checksum.verifyChunkedSums(buf, checksumBuf, filename, startDataPos);
      } finally {
        buf.position(buf.limit());
      }
    }
    return total;
  }

  /**
   * createNoChecksumContext()会判断如果verifyChecksum字段为false，也就是当前配置本来就不需要进行校验，则直接返回true，创建免校验上下文成功。
   * 如果当前配置需要进行校验，那么尝试在Datanode和Client共享内存中副本的Slot上添加一个免校验的锚。
   *
   * 这里注意，当且仅当Datanode已经缓存了这个副本时，才可以添加一个锚。
   * 因为当Datanode尝试缓存一个数据块副本时，会验证数据块的校验和，然后通过mmap以及mlock 将数据块缓存到内存中。
   * 也就是说，当前Datanode上缓存的数据块是经过校验的、是正确的，不用再次进行校验。
   */
  private boolean createNoChecksumContext() {
    if (verifyChecksum) {
      if (storageType != null && storageType.isTransient()) {
        return true;
      } else {
        return replica.addNoChecksumAnchor();
      }
    } else {
      return true;
    }
  }

  private void releaseNoChecksumContext() {
    if (verifyChecksum) {
      if (storageType == null || !storageType.isTransient()) {
        replica.removeNoChecksumAnchor();
      }
    }
  }

  /**
   * read(ByteBuffer buf)方法的代码可以切分为三块：
   * 1. 判断能否通过createNoChecksumContext()方法创建一个免校验上下文；
   * 2. 如果可以免校验，并且无预读取请求，则调用readWithoutBounceBuffer()方法读取数据；
   * 3. 如果不可以免校验，并且开启了预读取，则调用readWithBounceBuffer()方法读取数据。
   */
  @Override
  public synchronized int read(ByteBuffer buf) throws IOException {
    // 能否跳过数据校验
    boolean canSkipChecksum = createNoChecksumContext(); // TODO 核心代码
    try {
      int nRead;
      try {
        //  可以跳过数据校验，以及不需要预读取时，调用readWithoutBounceBuffer()方法
        if (canSkipChecksum && zeroReadaheadRequested) {
          nRead = readWithoutBounceBuffer(buf); // TODO 核心代码
        } else {
          // 需要校验，以及开启了预读取时，调用 readWithBounceBuffer() 方法
          nRead = readWithBounceBuffer(buf, canSkipChecksum); // TODO 核心代码
        }
      } catch (IOException e) {
        throw e;
      }
      return nRead;
    } finally {
      if (canSkipChecksum) {
        releaseNoChecksumContext();
      }
    }
  }

  /**
   * 它不使用额外的数据以及校验和缓冲区预读取数据以及校验和，而是直接从数据流中将数据读取到缓冲区
   */
  private synchronized int readWithoutBounceBuffer(ByteBuffer buf) throws IOException {
    // 释放 databuffer
    freeDataBufIfExists();
    // 释放 checksumBuffer
    freeChecksumBufIfExists();
    int total = 0;
    // 直接从输入流中将数据读取到buf
    while (buf.hasRemaining()) {
      int nRead = dataIn.read(buf, dataPos);
      if (nRead <= 0) {
        break;
      }
      dataPos += nRead;
      total += nRead;
    }
    return (total == 0 && (dataPos == dataIn.size())) ? -1 : total;
  }

  /**
   * Fill the data buffer.  If necessary, validate the data against the checksums.
   * 
   * We always want the offsets of the data contained in dataBuf to be aligned to the chunk boundary.
   * If we are validating checksums, we accomplish this by seeking backwards in the file until we're on a chunk boundary.
   * (This is necessary because we can't checksum a partial chunk.)
   * If we are not validating checksums, we simply only fill the latter part of dataBuf.
   * 
   * @param canSkipChecksum  true if we can skip checksumming.
   * @return                 true if we hit EOF.
   * @throws IOException
   */
  private synchronized boolean fillDataBuf(boolean canSkipChecksum)
      throws IOException {
    createDataBufIfNeeded();
    final int slop = (int)(dataPos % bytesPerChecksum);
    final long oldDataPos = dataPos;
    dataBuf.limit(maxReadaheadLength);
    if (canSkipChecksum) {
      dataBuf.position(slop);
      fillBuffer(dataBuf, canSkipChecksum);
    } else {
      dataPos -= slop;
      dataBuf.position(0);
      fillBuffer(dataBuf, canSkipChecksum);
    }
    dataBuf.limit(dataBuf.position());
    dataBuf.position(Math.min(dataBuf.position(), slop));
    if (LOG.isTraceEnabled()) {
      LOG.trace("loaded " + dataBuf.remaining() + " bytes into bounce " +
          "buffer from offset " + oldDataPos + " of " + block);
    }
    return dataBuf.limit() != maxReadaheadLength;
  }

  /**
   * readWithBounceBuffer()方法在BlockReaderLocal对象上申请了两个缓冲区：dataBuf，数据缓冲区；checksumBuf；校验和缓冲区。
   *   dataBuf缓冲区的大小为maxReadaheadLength，这个长度始终是校验块（chunk，一个校验值对应的数据长度）的整数倍，这样设计是为了进行校验操作时比较方便，能够以校验块为单位读取数据。
   *   dataBuf 和 checksumBuf的构造使用了direct byte buffer，也就是堆外内存上的缓冲区。
   *
   *   dataBuf以及checksumBuf都是通过调用java.nio.ByteBuffer.allocateDirect()方法分配的堆外内存。
   *   这里值得我们积累，对于比较大的缓冲区，可以通过调用java.nio提供的方法，将缓冲区分配在堆外，节省宝贵的堆内存空间。
   *
   * BlockReaderLocal提供了对缓冲区操作的几个方法。
   * - fillBuffer(ByteBuffer buf, boolean canSkipChecksum)：将数据从输入流读入指定buf中，并将校验和读入checksumBuf中进行校验操作。
   * - fillDataBuf()：调用fillBuffer()方法将数据读入dataBuf缓冲区中，将校验和读入checksumBuf缓冲区中。这里要注意，dataBuf缓冲区中的数据始终是chunk（一个校验值对应的数据长度）的整数倍。
   * - drainDataBuf(ByteBuffer buf)：将dataBuf缓冲区中的数据拉取到buf中，然后返回读取的字节数。
   *
   * 首先从dataBuf中拉取缓存中的数据到buf，这样就保证了读取游标pos在chunk边界上。
   * 如果buf的剩余空间大于dataBuf缓冲区的大小，且当前数据流游标在chunk边界上，则调用fillBuffer(buf)方法将数据直接读入buf，而不通过dataBuf缓存。
   * 如果buf的剩余空间小于dataBuf缓冲区的大小，则先调用fillDataBuf()方法将数据读入dataBuf缓存，然后再调用drainDataBuf()将dataBuf中的数据拉取到buf缓冲区。
   */
  private synchronized int readWithBounceBuffer(ByteBuffer buf, boolean canSkipChecksum) throws IOException {
    int total = 0;
    // 调用 drainDataBuf() ，将dataBuf缓冲区
    int bb = drainDataBuf(buf); // drain bounce buffer if possible
    if (bb >= 0) {
      total += bb;
      if (buf.remaining() == 0) {
        return total;
      }
    }
    boolean eof = true, done = false;
    do {
      // 如果buf的空间足够大，并且输入流游标在chunk边界上，则直接从IO流中将数据写入buf
      if (buf.isDirect() && (buf.remaining() >= maxReadaheadLength) && ((dataPos % bytesPerChecksum) == 0)) {
        int oldLimit = buf.limit();
        int nRead;
        try {
          buf.limit(buf.position() + maxReadaheadLength);
          nRead = fillBuffer(buf, canSkipChecksum); // TODO 核心代码
        } finally {
          buf.limit(oldLimit);
        }
        if (nRead < maxReadaheadLength) {
          done = true;
        }
        if (nRead > 0) {
          eof = false;
        }
        total += nRead;
      } else {
        // 否则，将数据读入dataBuf缓存
        if (fillDataBuf(canSkipChecksum)) {  // TODO 核心代码
          done = true;
        }
        // 然后将dataBuf中的数据导入buf
        bb = drainDataBuf(buf); // drain bounce buffer if possible
        if (bb >= 0) {
          eof = false;
          total += bb;
        }
      }
    } while ((!done) && (buf.remaining() > 0));
    return (eof && total == 0) ? -1 : total;

    /**
     * 这个设计非常的巧妙，为什么读取操作都要先经过dataBuf缓存呢？ 因为dataBuf中的数据始终保持为校验块（chunk）的整数倍。
     * 当循环调用readWithBounceBuffer()方法时，第一次调用drainDataBuf()方法会确保当前输入流的游标pos定位到校验块的边界上。
     * 接下来的读取都可以以校验块的整数倍（maxReadaheadLength）读取。
     *
     * 这里每一个chunk大小为64MB，maxReadaheadLength为3个chunk大小，dataBuf缓冲区也为3个chunk大小，读取数据的buf大小为480MB。
     *
     * ![](https://markdown-sjc.oss-cn-beijing.aliyuncs.com/img/hdfs-readWithBounceBuffer()%E7%A4%BA%E4%BE%8B.png)
     *
     * 1. 由于buf的空间足够大，并且输入流游标在chunk边界上，直接调用fillBuffer()方法将IO流中的3个chunk读入buf。
     * 2. 同第1次读取，直接将IO流中的3个chunk读入buf。
     * 3. 由于buf的空间不足，只有98MB，小于maxReadaheadLength的192MB，则调用fillDataBuf()方法将3个chunk写入dataBuf缓存。
     * 4. 从dataBuf中取出98MB数据，写入buf中。至此，第一次readWithBounceBuffer()操作完成。480MB数据读入成功。
     * 5. 第二次readWithBounceBuffer()操作。由于dataBuf中还有数据，先从dataBuf中读取98MB数据到buf中。读取完成后，IO流游标又回到了chunk边界上。
     * 6. buf的空间足够大，并且输入流游标在chunk边界上，直接调用fillBuffer()方法将IO流中的数据读入buf。
     *
     */
  }

  @Override
  public synchronized int read(byte[] arr, int off, int len) throws IOException {
    boolean canSkipChecksum = createNoChecksumContext();
    int nRead;
    try {
      try {
        if (canSkipChecksum && zeroReadaheadRequested) {
          nRead = readWithoutBounceBuffer(arr, off, len);
        } else {
          nRead = readWithBounceBuffer(arr, off, len, canSkipChecksum);
        }
      } catch (IOException e) {
        throw e;
      }
    } finally {
      if (canSkipChecksum) {
        releaseNoChecksumContext();
      }
    }
    return nRead;
  }

  private synchronized int readWithoutBounceBuffer(byte arr[], int off, int len) throws IOException {
    freeDataBufIfExists();
    freeChecksumBufIfExists();
    int nRead = dataIn.read(ByteBuffer.wrap(arr, off, len), dataPos);
    if (nRead > 0) {
      dataPos += nRead;
    } else if ((nRead == 0) && (dataPos == dataIn.size())) {
      return -1;
    }
    return nRead;
  }

  private synchronized int readWithBounceBuffer(byte arr[], int off, int len, boolean canSkipChecksum) throws IOException {
    createDataBufIfNeeded();
    if (!dataBuf.hasRemaining()) {
      dataBuf.position(0);
      dataBuf.limit(maxReadaheadLength);
      fillDataBuf(canSkipChecksum);
    }
    if (dataBuf.remaining() == 0) {
      return -1;
    }
    int toRead = Math.min(dataBuf.remaining(), len);
    dataBuf.get(arr, off, toRead);
    return toRead;
  }

  @Override
  public synchronized long skip(long n) throws IOException {
    int discardedFromBuf = 0;
    long remaining = n;
    if ((dataBuf != null) && dataBuf.hasRemaining()) {
      discardedFromBuf = (int)Math.min(dataBuf.remaining(), n);
      dataBuf.position(dataBuf.position() + discardedFromBuf);
      remaining -= discardedFromBuf;
    }
    if (LOG.isTraceEnabled()) {
      LOG.trace("skip(n=" + n + ", block=" + block + ", filename=" + 
        filename + "): discarded " + discardedFromBuf + " bytes from " +
        "dataBuf and advanced dataPos by " + remaining);
    }
    dataPos += remaining;
    return n;
  }

  @Override
  public int available() throws IOException {
    // We never do network I/O in BlockReaderLocal.
    return Integer.MAX_VALUE;
  }

  @Override
  public synchronized void close() throws IOException {
    if (closed) return;
    closed = true;
    if (LOG.isTraceEnabled()) {
      LOG.trace("close(filename=" + filename + ", block=" + block + ")");
    }
    replica.unref();
    freeDataBufIfExists();
    freeChecksumBufIfExists();
  }

  @Override
  public synchronized void readFully(byte[] arr, int off, int len)
      throws IOException {
    BlockReaderUtil.readFully(this, arr, off, len);
  }

  @Override
  public synchronized int readAll(byte[] buf, int off, int len)
      throws IOException {
    return BlockReaderUtil.readAll(this, buf, off, len);
  }

  @Override
  public boolean isLocal() {
    return true;
  }

  @Override
  public boolean isShortCircuit() {
    return true;
  }

  /**
   * Get or create a memory map for this replica.
   * 
   * There are two kinds of ClientMmap objects we could fetch here: one that 
   * will always read pre-checksummed data, and one that may read data that
   * hasn't been checksummed.
   *
   * If we fetch the former, "safe" kind of ClientMmap, we have to increment
   * the anchor count on the shared memory slot.  This will tell the DataNode
   * not to munlock the block until this ClientMmap is closed.
   * If we fetch the latter, we don't bother with anchoring.
   *
   * @param opts     The options to use, such as SKIP_CHECKSUMS.
   * 
   * @return         null on failure; the ClientMmap otherwise.
   */
  @Override
  public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
    boolean anchor = verifyChecksum &&  (opts.contains(ReadOption.SKIP_CHECKSUMS) == false);
    if (anchor) {
      if (!createNoChecksumContext()) {
        return null;
      }
    }
    ClientMmap clientMmap = null;
    try {
      clientMmap = replica.getOrCreateClientMmap(anchor); //
    } finally {
      if ((clientMmap == null) && anchor) {
        releaseNoChecksumContext();
      }
    }
    return clientMmap;
  }
  
  @VisibleForTesting
  boolean getVerifyChecksum() {
    return this.verifyChecksum;
  }

  @VisibleForTesting
  int getMaxReadaheadLength() {
    return this.maxReadaheadLength;
  }
  
  /**
   * Make the replica anchorable.  Normally this can only be done by the
   * DataNode.  This method is only for testing.
   */
  @VisibleForTesting
  void forceAnchorable() {
    replica.getSlot().makeAnchorable();
  }

  /**
   * Make the replica unanchorable.  Normally this can only be done by the
   * DataNode.  This method is only for testing.
   */
  @VisibleForTesting
  void forceUnanchorable() {
    replica.getSlot().makeUnanchorable();
  }
}
