package org.apache.hadoop.hdfs.server.datanode;

import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_ACCESS_TOKEN;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_INVALID;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_UNSUPPORTED;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.DO_NOT_USE_RECEIPT_VERIFICATION;
import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;
import static org.apache.hadoop.util.Time.now;

import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketException;
import java.nio.channels.ClosedChannelException;
import java.security.MessageDigest;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;

import org.apache.commons.logging.Log;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException;
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException;
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;

import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;

/**
 * 在DataNode中，有一个后台工作的线程DataXceiverServer。它被用于接收来自客户端或其他数据节点的数据读写请求，为每个数据读写请求创建一个单独的线程去处理。
 * 而处理每次读写请求时所创建的线程，就是DataXceiver。
 *
 * 一个客户端或者一个datanode,建立连接之后，就会由一个DataXceiver线程来处理跟他们所有的请求和响应。
 */
class DataXceiver extends Receiver implements Runnable {
  public static final Log LOG = DataNode.LOG;
  static final Log ClientTraceLog = DataNode.ClientTraceLog;

  // 封装了Socket、输入流、输出流的Peer，是DataXceiver线程工作的主要依托者
  private Peer peer;

  // 通讯两端地址：远端地址remoteAddress、本地端地址localAddress，均是从peer（即socket）中获得的
  private final String remoteAddress; // address of remote side
  private final String localAddress;  // local address of this daemon


  // DataNode节点进程实例datanode
  private final DataNode datanode;
  // DataNode节点配置信息dnConf
  private final DNConf dnConf;
  // DataXceiverServer线程实例dataXceiverServer
  private final DataXceiverServer dataXceiverServer;

  // 连接DataNode是否使用主机名，取参数dfs.datanode.use.datanode.hostname，参数未配置的话默认为false，不使用
  private final boolean connectToDnViaHostname;
  // 接收到一个操作op的开始时间
  private long opStartTime; //the start time of receiving an Op
  // InputStream输入流socketIn
  private final InputStream socketIn;
  // OutputStream输出流socketOut
  private OutputStream socketOut;
  // 数据块接收器BlockReceiver对象blockReceiver
  private BlockReceiver blockReceiver = null;
  
  // previousOpClientName为之前操作的客户端名字，它对于socket上的第一个请求不可用
  private String previousOpClientName;

  /**
   * 提供了一个静态方法create()，调用私有构造函数构造DataXceiver对象
   */
  public static DataXceiver create(Peer peer, DataNode dn, DataXceiverServer dataXceiverServer) throws IOException {
    return new DataXceiver(peer, dn, dataXceiverServer);
  }
  
  private DataXceiver(Peer peer, DataNode datanode, DataXceiverServer dataXceiverServer) throws IOException {

    this.peer = peer;
    this.dnConf = datanode.getDnConf();
    this.socketIn = peer.getInputStream();
    this.socketOut = peer.getOutputStream();
    this.datanode = datanode;
    this.dataXceiverServer = dataXceiverServer;
    this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname;
    remoteAddress = peer.getRemoteAddressString();
    localAddress = peer.getLocalAddressString();
  }

  /**
   * Update the current thread's name to contain the current status.
   * Use this only after this receiver has started on its thread, i.e.,
   * outside the constructor.
   */
  private void updateCurrentThreadName(String status) {
    StringBuilder sb = new StringBuilder();
    sb.append("DataXceiver for client ");
    if (previousOpClientName != null) {
      sb.append(previousOpClientName).append(" at ");
    }
    sb.append(remoteAddress);
    if (status != null) {
      sb.append(" [").append(status).append("]");
    }
    Thread.currentThread().setName(sb.toString());
  }

  /** Return the datanode object. */
  DataNode getDataNode() {return datanode;}
  
  private OutputStream getOutputStream() {
    return socketOut;
  }

  public void sendOOB() throws IOException, InterruptedException {
    LOG.info("Sending OOB to peer: " + peer);
    if(blockReceiver!=null)
      blockReceiver.sendOOB();
  }
  
  /**
   * DataXceiver线程在启动后，是如何处理来自客户端或者其他数据节点发送的数据读写请求的。从DataXceiverServer中读取或者往DataXceiverServer中写入数据
   *
   * run()方法的处理流程逻辑十分清晰，概括如下：
   * 1、在dataXceiverServer中增加peer与该DataXceiver实例所在线程和DataXceiver实例的映射关系；
   * 2、peer中设置socket写入超时时间，取参数dfs.datanode.socket.write.timeout，参数未配置的话默认为8分钟；
   * 3、获取IOStreamPair类型的saslStreams，其为一个输入输出流对，既包含输入流，也包含输出流；
   * 4、包装saslStreams的输入流in为BufferedInputStream，得到输入流input，其缓冲区大小取参数io.file.buffer.size的一半，参数未配置的话默认为512，且最大也不能超过512；
   * 5、从saslStreams中获取输出流socketOut；
   * 6、调用父类initialize()方法，完成初始化，实际上就是设置父类的输入流in；
   * 7、在一个do...while循环内完成请求的处理，循环的条件便是--peer未关闭且复用超时时间socketKeepaliveTimeout大于0：
   *    7.1、更新当前线程名称，通过线程名标识进度的一种手段，不错，线程名此时为Waiting for operation #100（100为操作处理次数累加器的下一个值）；
   *    7.2、处理读超时时间设置：
   *        由于第一次是创建一个新的socket使用，连接的时间可能会很长，所以连接超时时间设置的比较大，而后续使用的话，是复用socket，连接的超时时间限制就没必要设置那么大了。
   *        所以，最开始第一次处理请求时，设置peer（即socket）读超时时间为dnConf的socketTimeout，即取参数dfs.client.socket-timeout，参数未配置的话默认为60s；
   *        如果不是第一次出来请求，确保dnConf的socketKeepaliveTimeout大于0，将其设置为设置peer（即socket）的读超时时间，取参数dfs.datanode.socket.reuse.keepalive，参数为配置的话，默认为4s；
   *    7.3、通过readOp()方法读取操作符op；
   *    7.4、重新存储正常的超时时间，即dnConf的socketTimeout；
   *    7.5、设置操作的起始时间opStartTime，为当前时间；
   *    7.6、通过processOp()方法根据操作符op调用相应的方法处理操作符op；
   *    7.7、累加操作数opsProcessed；
   * 8、更新当前线程名称：Cleaning up；
   * 9、关闭peer（socket）、输入流等资源。
   */
  @Override
  public void run() {
    int opsProcessed = 0;
    Op op = null;

    try {
      // 在dataXceiverServer中增加peer与该DataXceiver实例所在线程和DataXceiver实例的映射关系
      dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
      // peer中设置socket写入超时时间
      peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
      InputStream input = socketIn;
      try {
        // 通过这个工具类，获取对方客户端的输入流,拿到这个数据流，就可以读取hdfs客户端发送过来的数据了
        // IOStreamPair为一个输入输出流对，既包含输入流，也包含输出流
        IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId());
        // 包装saslStreams的输入流in为BufferedInputStream，得到输入流input，其缓冲区大小取参数io.file.buffer.size的一半，参数未配置的话默认为512，且最大也不能超过512
        input = new BufferedInputStream(saslStreams.in, HdfsConstants.SMALL_BUFFER_SIZE);
        // 从saslStreams中获取输出流socketOut
        socketOut = saslStreams.out;
      } catch (InvalidMagicNumberException imne) {
        LOG.info("Failed to read expected encryption handshake from client " +
            "at " + peer.getRemoteAddressString() + ". Perhaps the client  is running an older version of Hadoop which does not support  encryption");
        return;
      }

      // 调用父类initialize()方法，完成初始化，实际上就是设置父类的输入流in
      super.initialize(new DataInputStream(input));

      // 在一个do...while循环内完成请求的处理
      do {
        // 更新当前线程名称，通过线程名标识进度的一种手段，牛逼
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
        try {
          // 由于第一次是创建一个新的socket使用，连接的时间可能会很长，所以连接超时时间设置的比较大，
          // 而后续使用的话，是复用socket，连接的超时时间限制就没必要设置那么大了
          if (opsProcessed != 0) {
            // 如果不是第一次出来请求，确保dnConf的socketKeepaliveTimeout大于0，
            // 将其设置为设置peer（即socket）的读超时时间，
            // 取参数dfs.datanode.socket.reuse.keepalive，参数为配置的话，默认为4s
            assert dnConf.socketKeepaliveTimeout > 0;
            peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            // 最开始第一次处理请求时，设置peer（即socket）读超时时间为dnConf的socketTimeout
            // 即取参数dfs.client.socket-timeout，参数未配置的话默认为60s
            peer.setReadTimeout(dnConf.socketTimeout);
          }

          // 核心代码，通过readOp()方法读取操作符op
          // 在这里读取到的就是hdfs客户端发送过来的那个OP操作，就是WRITE_BLOCK
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // 如果是InterruptedIOException异常，跳出循环
          break;
        } catch (IOException err) {
          if (opsProcessed > 0 && (err instanceof EOFException || err instanceof ClosedChannelException)) {
          } else {
            throw err;
          }
          break;
        }

        // 重新存储正常的超时时间，即dnConf的socketTimeout
        if (opsProcessed != 0) {
          peer.setReadTimeout(dnConf.socketTimeout);
        }

        // 设置操作的起始时间opStartTime
        opStartTime = now();

        // 核心方法：通过processOp()方法根据操作符op调用相应的方法处理操作符op
        processOp(op); // TODO
        ++opsProcessed; // 累加操作数
      } while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0)); // 循环的条件便是：peer未关闭且复用超时时间socketKeepaliveTimeout大于0
    } catch (Throwable t) {
      String s = datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dst: " + localAddress;
      if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
      } else {
        LOG.error(s, t);
      }
    } finally {
      // 更新当前线程名称
      updateCurrentThreadName("Cleaning up");
      // 关闭peer（socket）、输入流等资源
      if (peer != null) {
        dataXceiverServer.closePeer(peer);
        IOUtils.closeStream(in);
      }
    }
  }

  /**
   * DFSClient只有构造了ShortCircuitReplica对象才可以执行短路读取操作。
   * DFSClient在构造ShortCircuitReplica对象前，需要先调用DataTransferProtocol.requestShortCircuitFds()方法向Datanode申请数据块文件以及校验和文件的文件描述符，并且同步共享内存中Slot对象的状态。
   * 客户端在调用requestShortCircuitFds()方法前，会在DFSClient侧的共享内存中为数据块申请一个槽位并构造Slot对象，之后在调用requestShortCircuitFds()方法时传入该槽位的信息，
   * Datanode的DataXceiver.requestShortCircuitFds()方法会响应这个请求。
   *
   * DataXceiver.requestShortCircuitFds()方法首先会调用ShortCircuitRegistry.registerSlot()方法在Datanode侧的共享内存中创建槽位信息对应的Slot对象。
   * 然后DataXceiver.requestShortCircuitFds()方法会调用DataNode.requestShortCircuitFdsForRead()方法获取数据块文件以及校验和文件的文件描述符，并通过DomainSocket发送给客户端。
   *
   * DFSClient的BlockReaderFactory对象成功地从domainSocket接收了数据块文件和校验和文件的文件描述符之后，就可以初始化数据块文件和校验和文件的输入流并构造ShortCircuitReplica对象了。
   */
  @Override
  public void requestShortCircuitFds(final ExtendedBlock blk, final Token<BlockTokenIdentifier> token, SlotId slotId, int maxVersion, boolean supportsReceiptVerification) throws IOException {
    updateCurrentThreadName("Passing file descriptors for block " + blk);
    DataOutputStream out = getBufferedOutputStream();
    checkAccess(out, true, blk, token, Op.REQUEST_SHORT_CIRCUIT_FDS, BlockTokenSecretManager.AccessMode.READ);
    BlockOpResponseProto.Builder bld = BlockOpResponseProto.newBuilder();
    FileInputStream fis[] = null;
    SlotId registeredSlotId = null;
    boolean success = false;
    try {
      try {
        // 如果底层不是DomainSocket,则抛出异常
        if (peer.getDomainSocket() == null) {
          throw new IOException("You cannot pass file descriptors over " + "anything but a UNIX domain socket.");
        }
        if (slotId != null) {
          boolean isCached = datanode.data.isCached(blk.getBlockPoolId(), blk.getBlockId()); // 块缓存机制
          // 调用 shortCircuitRegistry.registerSlot() 方法在Datanode的共享内存中注册这个Slot对象
          datanode.shortCircuitRegistry.registerSlot(ExtendedBlockId.fromExtendedBlock(blk), slotId, isCached); //
          registeredSlotId = slotId;
        }
        // 获取数据块文件以及校验和文件的文件描述符
        fis = datanode.requestShortCircuitFdsForRead(blk, token, maxVersion); //
        Preconditions.checkState(fis != null);
        // 构造响应消息
        bld.setStatus(SUCCESS);
        bld.setShortCircuitAccessVersion(DataNode.CURRENT_BLOCK_FORMAT_VERSION);
      } catch (ShortCircuitFdsVersionException e) {
        bld.setStatus(ERROR_UNSUPPORTED);
        bld.setShortCircuitAccessVersion(DataNode.CURRENT_BLOCK_FORMAT_VERSION);
        bld.setMessage(e.getMessage());
      } catch (ShortCircuitFdsUnsupportedException e) {
        bld.setStatus(ERROR_UNSUPPORTED);
        bld.setMessage(e.getMessage());
      } catch (IOException e) {
        bld.setStatus(ERROR);
        bld.setMessage(e.getMessage());
      }
      // 发回成功的响应消息
      bld.build().writeDelimitedTo(socketOut);
      if (fis != null) {
        FileDescriptor fds[] = new FileDescriptor[fis.length];
        for (int i = 0; i < fds.length; i++) {
          fds[i] = fis[i].getFD();
        }
        byte buf[] = new byte[1];
        if (supportsReceiptVerification) {
          buf[0] = (byte)USE_RECEIPT_VERIFICATION.getNumber();
        } else {
          buf[0] = (byte)DO_NOT_USE_RECEIPT_VERIFICATION.getNumber();
        }
        // 通过 DomainSocket 将数据块文件和校验和文件的文件描述符发送给客户端
        DomainSocket sock = peer.getDomainSocket();
        sock.sendFileDescriptors(fds, buf, 0, buf.length);

        if (supportsReceiptVerification) {
          LOG.trace("Reading receipt verification byte for " + slotId);
          int val = sock.getInputStream().read();
          if (val < 0) {
            throw new EOFException();
          }
        } else {
          LOG.trace("Receipt verification is not enabled on the DataNode.  " + "Not verifying " + slotId);
        }
        success = true;
      }
    } finally {
      if ((!success) && (registeredSlotId != null)) {
        LOG.info("Unregistering " + registeredSlotId + " because the " + "requestShortCircuitFdsForRead operation failed.");
        datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
      }
      if (fis != null) {
        IOUtils.cleanup(LOG, fis);
      }
    }
  }

  /**
   * 当客户端完成了对副本的短路读操作时，就需要关闭ShortCircuitReplica对象，
   * 同时调用DataTransferProtocol.releaseShortCircuitFds()方法删除Datanode侧共享内存中的Slot对象，这个请求是由DataXceiver.releaseShortCircuitFds()方法响应的。
   *
   * DataXceiver.releaseShortCircuitFds()方法会从Datanode侧的共享内存中释放这个Slot对象，之后发回响应消息。
   * DFSClient接收到响应消息后，也会在自己的共享内存中释放对应的Slot对象，完成整个读入操作。
   */
  @Override
  public void releaseShortCircuitFds(SlotId slotId) throws IOException {
    boolean success = false;
    try {
      String error;
      Status status;
      try {
        // 释放共享内存中的槽位
        datanode.shortCircuitRegistry.unregisterSlot(slotId);
        error = null;
        status = Status.SUCCESS;
      } catch (UnsupportedOperationException e) {
        error = "unsupported operation";
        status = Status.ERROR_UNSUPPORTED;
      } catch (Throwable e) {
        error = e.getMessage();
        status = Status.ERROR_INVALID;
      }
      // 构造响应消息
      ReleaseShortCircuitAccessResponseProto.Builder bld = ReleaseShortCircuitAccessResponseProto.newBuilder();
      bld.setStatus(status);
      if (error != null) {
        bld.setError(error);
      }
      // 发回响应消息
      bld.build().writeDelimitedTo(socketOut);
      success = true;
    } finally {
      if (ClientTraceLog.isInfoEnabled()) {
        BlockSender.ClientTraceLog.info(String.format(
            "src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," +
            " shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
            slotId.getShmId().getHi(), slotId.getShmId().getLo(),
            slotId.getSlotIdx(), datanode.getDatanodeUuid(), success));
      }
    }
  }

  private void sendShmErrorResponse(Status status, String error)
      throws IOException {
    ShortCircuitShmResponseProto.newBuilder().setStatus(status).
        setError(error).build().writeDelimitedTo(socketOut);
  }

  private void sendShmSuccessResponse(DomainSocket sock, NewShmInfo shmInfo)
      throws IOException {
    DataNodeFaultInjector.get().sendShortCircuitShmResponse();
    ShortCircuitShmResponseProto.newBuilder().setStatus(SUCCESS).
        setId(PBHelper.convert(shmInfo.shmId)).build().
        writeDelimitedTo(socketOut);
    // Send the file descriptor for the shared memory segment.
    byte buf[] = new byte[] { (byte)0 };
    FileDescriptor shmFdArray[] =
        new FileDescriptor[] { shmInfo.stream.getFD() };
    sock.sendFileDescriptors(shmFdArray, buf, 0, buf.length);
  }

  /**
   * FSClient在执行任何短路读取操作之前，需要先申请一段共享内存保存短路读取副本的状态。
   * DFSClient会调用DataTransferProtocol.requestShortCircuitShm()方法向Datanode发起申请共享内存的请求，
   * Datanode的DataXceiver.requestShortCircuitShm()方法会响应这个请求.
   *
   * DataXceiver.requestShortCircuitShm()会调用ShortCircuitRegistry.createNewMemorySegment()方法创建共享内存段，
   * createNewMemorySegment()方法会将共享内存文件映射到Datanode的内存中，然后构造RegisteredShm类管理这段共享内存。
   * 之后DataXceiver.requestShortCircuitShm()方法会调用sendShmSuccessResponse()方法将共享内存文件的文件描述符通过domainSocket发回客户端。
   *
   * DFSClient的DfsClientShmManager对象从domainSocket接收了共享内存文件的文件描述符后，会打开共享内存文件并将该文件映射到DFSClient的内存中，
   * 之后创建DfsClientShm对象管理这段共享内存，并将这个DfsClientShm对象保存在DfsClientShmManager的对应字段中。
   */
  @Override
  public void requestShortCircuitShm(String clientName) throws IOException {
    NewShmInfo shmInfo = null;
    boolean success = false;
    // 获取底层的DomainSocket对象
    DomainSocket sock = peer.getDomainSocket();
    try {
      // 如果 DataTransferProtocol底层不是 DomainSocket，则发回异常
      if (sock == null) {
        sendShmErrorResponse(ERROR_INVALID, "Bad request from " + peer + ": must request a shared " + "memory segment over a UNIX domain socket.");
        return;
      }
      try {
        // 调用 shortCircuitRegistry.createNewMemorySegment() 方法创建共享内存段
        shmInfo = datanode.shortCircuitRegistry.createNewMemorySegment(clientName, sock); // TODO 核心代码
        releaseSocket();
      } catch (UnsupportedOperationException e) {
        // 抛出异常，则响应异常消息
        sendShmErrorResponse(ERROR_UNSUPPORTED,  "This datanode has not been configured to support " + "short-circuit shared memory segments.");
        return;
      } catch (IOException e) {
        // 抛出异常，则响应异常消息
        sendShmErrorResponse(ERROR, "Failed to create shared file descriptor: " + e.getMessage());
        return;
      }
      // 调用 sendShmSuccessResponse() 方法将共享内存文件的文件描述符发回客户端
      sendShmSuccessResponse(sock, shmInfo);
      success = true;
    } finally {
      if (ClientTraceLog.isInfoEnabled()) {
        if (success) {
          BlockSender.ClientTraceLog.info(String.format(
              "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " + "op: REQUEST_SHORT_CIRCUIT_SHM," + " shmId: %016x%016x, srvID: %s, success: true",
              clientName, shmInfo.shmId.getHi(), shmInfo.shmId.getLo(), datanode.getDatanodeUuid()));
        } else {
          BlockSender.ClientTraceLog.info(String.format(
              "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " + "op: REQUEST_SHORT_CIRCUIT_SHM, " + "shmId: n/a, srvID: %s, success: false",
              clientName, datanode.getDatanodeUuid()));
        }
      }
      if ((!success) && (peer == null)) {
        try {
          LOG.warn("Failed to send success response back to the client.  " + "Shutting down socket for " + shmInfo.shmId + ".");
          sock.shutdown();
        } catch (IOException e) {
          LOG.warn("Failed to shut down socket in error handler", e);
        }
      }
      IOUtils.cleanup(null, shmInfo);
    }
  }

  void releaseSocket() {
    dataXceiverServer.releasePeer(peer);
    peer = null;
  }

  @Override
  public void readBlock(final ExtendedBlock block,
      final Token<BlockTokenIdentifier> blockToken,
      final String clientName,
      final long blockOffset,
      final long length,
      final boolean sendChecksum,
      final CachingStrategy cachingStrategy) throws IOException {
    previousOpClientName = clientName;
    updateCurrentThreadName("Sending block " + block);
    OutputStream baseStream = getOutputStream();
    DataOutputStream out = getBufferedOutputStream();
    checkAccess(out, true, block, blockToken, Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ);
  
    // send the block
    BlockSender blockSender = null;
    DatanodeRegistration dnR =  datanode.getDNRegistrationForBP(block.getBlockPoolId());
    final String clientTraceFmt =
      clientName.length() > 0 && ClientTraceLog.isInfoEnabled()
        ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "%d", "HDFS_READ", clientName, "%d",
            dnR.getDatanodeUuid(), block, "%d") : dnR + " Served block " + block + " to " + remoteAddress;

    try {
      try {
        blockSender = new BlockSender(block, blockOffset, length, // TODO
                true, false, sendChecksum, datanode, clientTraceFmt, cachingStrategy);
      } catch(IOException e) {
        String msg = "opReadBlock " + block + " received exception " + e; 
        LOG.info(msg);
        sendResponse(ERROR, msg);
        throw e;
      }
      
      // send op status
      writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream()));

      // 执行 blockSender的方法，不断的读取本地磁盘block文件的数据，然后写给 hdfs客户端
      long read = blockSender.sendBlock(out, baseStream, null); // TODO 核心代码

      if (blockSender.didSendEntireByteRange()) {
        // If we sent the entire range, then we should expect the client
        // to respond with a Status enum.
        try {
          ClientReadStatusProto stat = ClientReadStatusProto.parseFrom(
              PBHelper.vintPrefixed(in));
          if (!stat.hasStatus()) {
            LOG.warn("Client " + peer.getRemoteAddressString() +
                " did not send a valid status code after reading. " +
                "Will close connection.");
            IOUtils.closeStream(out);
          }
        } catch (IOException ioe) {
          LOG.debug("Error reading client status response. Will close connection.", ioe);
          IOUtils.closeStream(out);
        }
      } else {
        IOUtils.closeStream(out);
      }
      datanode.metrics.incrBytesRead((int) read);
      datanode.metrics.incrBlocksRead();
    } catch ( SocketException ignored ) {
      if (LOG.isTraceEnabled()) {
        LOG.trace(dnR + ":Ignoring exception while serving " + block + " to " +
            remoteAddress, ignored);
      }
      // Its ok for remote side to close the connection anytime.
      datanode.metrics.incrBlocksRead();
      IOUtils.closeStream(out);
    } catch ( IOException ioe ) {
      /* What exactly should we do here?
       * Earlier version shutdown() datanode if there is disk error.
       */
      LOG.warn(dnR + ":Got exception while serving " + block + " to "
          + remoteAddress, ioe);
      throw ioe;
    } finally {
      IOUtils.closeStream(blockSender);
    }

    //update metrics
    datanode.metrics.addReadBlockOp(elapsed());
    datanode.metrics.incrReadsFromClient(peer.isLocal());
  }

  @Override
  public void writeBlock(final ExtendedBlock block,
      final StorageType storageType, 
      final Token<BlockTokenIdentifier> blockToken,
      final String clientname,
      final DatanodeInfo[] targets,
      final StorageType[] targetStorageTypes, 
      final DatanodeInfo srcDataNode,
      final BlockConstructionStage stage,
      final int pipelineSize,
      final long minBytesRcvd,
      final long maxBytesRcvd,
      final long latestGenerationStamp,
      DataChecksum requestedChecksum,
      CachingStrategy cachingStrategy,
      final boolean allowLazyPersist) throws IOException {

    previousOpClientName = clientname;
    updateCurrentThreadName("Receiving block " + block);

    // 第1部分
    // 开始部分定义了三个boolean类型的变量，用于控制处理流程。
    // 对于客户端发起的写数据块请求，isDatanode=false，isClient=true，isTransfer=false，

    // isDatanode  表示 当提前写操作是否是DFSClient发起的
    final boolean isDatanode = clientname.length() == 0;
    // isClient  与 isDatanode相反，表示是Datanode触发的写操作
    final boolean isClient = !isDatanode;
    // isTransfer 表示 当前的写操作是否为数据块复制操作，利用数据流管道状态来判断
    final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW || stage == BlockConstructionStage.TRANSFER_FINALIZED;

    // 第2部分
    // Datanode与数据流管道中的上游节点通信用到了输入流in以及输出流replyOut，与数据流管道中的下游节点通信则用到了输入流mirrorIn以及输出流mirrorOut。
    // writeBlock()方法的第2部分就是初始化这两组输入/输出流，并向下游节点发送数据包写入请求，然后等待下游节点的请求确认。
    // 如果下游节点确认了请求，则向上游节点返回这个确认请求；如果抛出了异常，则向上游节点发送异常响应。

    // 创建 replyOut输出流
    final DataOutputStream replyOut = getBufferedOutputStream();
    checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK, BlockTokenSecretManager.AccessMode.WRITE);
    // check single target for transfer-RBW/Finalized 
    if (isTransfer && targets.length > 0) {
      throw new IOException(stage + " does not support multiple targets " + Arrays.asList(targets));
    }

    // We later mutate block's generation stamp and length,
    // but we need to forward the original version of the block to downstream mirrors, so make a copy here.
    final ExtendedBlock originalBlock = new ExtendedBlock(block);
    if (block.getNumBytes() == 0) {
      block.setNumBytes(dataXceiverServer.estimateBlockSize);
    }
    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: " + localAddress);

    DataOutputStream mirrorOut = null;  // 到下游数据节点的输出流
    DataInputStream mirrorIn = null;    // 下游数据节点的输入流
    Socket mirrorSock = null;           // 到下游节点的Socket
    String mirrorNode = null;           // 下游节点的名称：端口
    String firstBadLink = "";           // 数据流管道中的第1个失败的Datanode
    Status mirrorInStatus = SUCCESS;
    final String storageUuid;           // 保存这个数据块的Datanode存储的id

    try {
      if (isDatanode ||  stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
        // 打开一个 BlockReceiver，用于从上有节点接收数据块
        // 干的第1件事，就是初始化一个关键性的组件，就是BlockReceiver，这个组件在后面就会负责接收上有传递过来的packeet数据包，将这些packet数据包存储在自己的本地磁盘文件里
        // 同时还会将这些packet数据包发送到下游的datanode里面去
        blockReceiver = new BlockReceiver(block, storageType, in, //
            peer.getRemoteAddressString(),
            peer.getLocalAddressString(),
            stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd,
            clientname, srcDataNode, datanode, requestedChecksum,
            cachingStrategy, allowLazyPersist);

        storageUuid = blockReceiver.getStorageUuid();
      } else {
        storageUuid = datanode.data.recoverClose( block, latestGenerationStamp, minBytesRcvd);
      }

      // 连接到下游节点
      if (targets.length > 0) {
        // 下面的代码，就是建立好跟下游第二个datanode的一个socket连接以及各种流

        InetSocketAddress mirrorTarget = null;
        // Connect to backup machine
        mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
        mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
        mirrorSock = datanode.newSocket();
        try {
          DataNodeFaultInjector.get().failMirrorConnection();
          int timeoutValue = dnConf.socketTimeout + (HdfsServerConstants.READ_TIMEOUT_EXTENSION * targets.length);
          int writeTimeout = dnConf.socketWriteTimeout +   (HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
          // 建立到下游节点的Socket连接
          NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
          mirrorSock.setSoTimeout(timeoutValue);
          mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
          
          OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock, writeTimeout);
          InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
          DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
          IOStreamPair saslStreams = datanode.saslClient.socketSend(mirrorSock, unbufMirrorOut, unbufMirrorIn, keyFactory, blockToken, targets[0]);
          unbufMirrorOut = saslStreams.out;
          unbufMirrorIn = saslStreams.in;
          // 创建 mirrorOut 和 mirrorIn 建立到下游节点的输出流以及输入流
          mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut, HdfsConstants.SMALL_BUFFER_SIZE));
          mirrorIn = new DataInputStream(unbufMirrorIn);

          // 向下游节点发送数据块写入请求
          // 给下个节点发送空块
          new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
              blockToken, clientname, targets, targetStorageTypes, srcDataNode,
              stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
              latestGenerationStamp, requestedChecksum, cachingStrategy, false);
          mirrorOut.flush();

          // 接收来自下游节点的请求确认，并记录请求确认状态
          if (isClient) {
            // 等待获取datanode那边返回的建立好连接的ack消息
            BlockOpResponseProto connectAck = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(mirrorIn));
            mirrorInStatus = connectAck.getStatus();
            firstBadLink = connectAck.getFirstBadLink();
          }

        } catch (IOException e) {
          // 出现异常，向上游节点发送异常响应
          if (isClient) {
            BlockOpResponseProto.newBuilder() .setStatus(ERROR) .setFirstBadLink(targets[0].getXferAddr()) .build() .writeDelimitedTo(replyOut);
            replyOut.flush();
          }
          // 关闭到下游节点的Socket 、输入流以及输出流
          IOUtils.closeStream(mirrorOut);
          mirrorOut = null;
          IOUtils.closeStream(mirrorIn);
          mirrorIn = null;
          IOUtils.closeSocket(mirrorSock);
          mirrorSock = null;
          if (isClient) {
            LOG.error(datanode + ":Exception transfering block " + block + " to mirror " + mirrorNode + ": " + e);
            throw e;
          } else {
            LOG.info(datanode + ":Exception transfering " + block + " to mirror " + mirrorNode + "- continuing without the mirror", e);
          }
        }
      }

      // send connect-ack to source for clients and not transfer-RBW/Finalized
      if (isClient && !isTransfer) {
        // 向上游节点返回请求确认
        BlockOpResponseProto.newBuilder().setStatus(mirrorInStatus) .setFirstBadLink(firstBadLink) .build() .writeDelimitedTo(replyOut);
        replyOut.flush();
      }

      // 成功建立了与下游节点的输入/输出流后，writeBlock()方法就会调用blockReceiver.receiveBlock()方法从上游节点接收数据块，然后将数据块发送到下游节点。
      // 同时blockReceiver对象还会从下游节点接收数据块中数据包的确认消息，并且将这个确认消息转发到上游节点。
      if (blockReceiver != null) {
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        // 调用blockReceiver.receiveBlock() 从上游节点接收数据块，然后将数据块发送到下游节点
        // 在这个方法里面，一定是读到一个packet,就写入本地的block磁盘文件
        // 而且还会将这个packet数据包传输到下游的datanode节点里去，完成block的复制
        // 它一定会把一个block对应的所有packet都干一样的事情，直到最后，它读取到了一个空的packet,就知道block已经传输完毕了
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets, false); // TODO 核心代码

        // 对于复制操作，不需要向下游节点转发数据块，也不需要接收下游节点的确认
        // 所以成功接收完数据块之后，在当前节点直接返回确认信息
        if (isTransfer) {
          // 如果一个datanode判断读取完了一个block所有的packet之后，
          // hdfs客户端最后一定会发送一个空的packet过来，标识说ok,这个block的packet都传输完毕了
          // 此时 BlockReveiver.receiveBlock() 方法一定退出
          writeResponse(SUCCESS, null, replyOut);
        }
      }

      // 成功执行了BlockReceiver.receiveBlock()之后，writeBlock()方法就会更新当前数据节点上新写入数据块副本的时间戳、副本文件长度等信息。

      // 对于客户端发起的写数据请求，在PacketResponder线程中已经通过调用Datanode.closeBlock()方法关闭了数据块
      if (isClient &&  stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
        block.setGenerationStamp(latestGenerationStamp);
        block.setNumBytes(minBytesRcvd);
      }

      // 如果是数据流管道关闭的恢复操作或者是数据块的复制操作，则调用Datanode.closeBlock()方法向Namenode汇报Datanode接收了新的数据块，
      // Datanode.closeBlock()调用BPOfferService.notifyNamenodeReceivedBlock()通知Namenode。
      if (isDatanode || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
        // 在写数据流程中，另一处调用DataNode.closeBlock()方法是在DataXceiver.writeBlock()中，
        // 当这个写操作是由数据块复制操作触发，并且Datanode已经调用blockReceiver.receiveBlock()成功保存了数据块时，会调用这个方法通知名字节点。
        datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT, storageUuid); //
        LOG.info("Received " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes());
      }

    } catch (IOException ioe) {
      LOG.info("opWriteBlock " + block + " received exception " + ioe);
      throw ioe;
    }
    // DataXceiver.writeBlock()完成上述操作后，会在finally中关闭上下游节点的输入/输出流，同时关闭blockReceiver对象。
    finally {
      IOUtils.closeStream(mirrorOut);
      IOUtils.closeStream(mirrorIn);
      IOUtils.closeStream(replyOut);
      IOUtils.closeSocket(mirrorSock);
      IOUtils.closeStream(blockReceiver);
      blockReceiver = null;
    }

    //update metrics
    datanode.metrics.addWriteBlockOp(elapsed());
    datanode.metrics.incrWritesFromClient(peer.isLocal());
  }

  @Override
  public void transferBlock(final ExtendedBlock blk,
      final Token<BlockTokenIdentifier> blockToken,
      final String clientName,
      final DatanodeInfo[] targets,
      final StorageType[] targetStorageTypes) throws IOException {
    previousOpClientName = clientName;
    updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

    final DataOutputStream out = new DataOutputStream(
        getOutputStream());
    checkAccess(out, true, blk, blockToken,
        Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
    try {
      datanode.transferReplicaForPipelineRecovery(blk, targets,
          targetStorageTypes, clientName);
      writeResponse(Status.SUCCESS, null, out);
    } finally {
      IOUtils.closeStream(out);
    }
  }

  private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
      long requestLength, DataChecksum checksum, DataInputStream checksumIn)
      throws IOException {
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final int csize = checksum.getChecksumSize();
    final byte[] buffer = new byte[4*1024];
    MessageDigest digester = MD5Hash.getDigester();

    long remaining = requestLength / bytesPerCRC * csize;
    for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
      toDigest = checksumIn.read(buffer, 0,
          (int) Math.min(remaining, buffer.length));
      if (toDigest < 0) {
        break;
      }
      digester.update(buffer, 0, toDigest);
    }
    
    int partialLength = (int) (requestLength % bytesPerCRC);
    if (partialLength > 0) {
      byte[] buf = new byte[partialLength];
      final InputStream blockIn = datanode.data.getBlockInputStream(block,
          requestLength - partialLength);
      try {
        // Get the CRC of the partialLength.
        IOUtils.readFully(blockIn, buf, 0, partialLength);
      } finally {
        IOUtils.closeStream(blockIn);
      }
      checksum.update(buf, 0, partialLength);
      byte[] partialCrc = new byte[csize];
      checksum.writeValue(partialCrc, 0, true);
      digester.update(partialCrc);
    }
    return new MD5Hash(digester.digest());
  }

  @Override
  public void blockChecksum(final ExtendedBlock block,
      final Token<BlockTokenIdentifier> blockToken) throws IOException {
    updateCurrentThreadName("Getting checksum for block " + block);
    final DataOutputStream out = new DataOutputStream(
        getOutputStream());
    checkAccess(out, true, block, blockToken,
        Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
    // client side now can specify a range of the block for checksum
    long requestLength = block.getNumBytes();
    Preconditions.checkArgument(requestLength >= 0);
    long visibleLength = datanode.data.getReplicaVisibleLength(block);
    boolean partialBlk = requestLength < visibleLength;

    final LengthInputStream metadataIn = datanode.data
        .getMetaDataInputStream(block);
    
    final DataInputStream checksumIn = new DataInputStream(
        new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
    try {
      //read metadata file
      final BlockMetadataHeader header = BlockMetadataHeader
          .readHeader(checksumIn);
      final DataChecksum checksum = header.getChecksum();
      final int csize = checksum.getChecksumSize();
      final int bytesPerCRC = checksum.getBytesPerChecksum();
      final long crcPerBlock = csize <= 0 ? 0 : 
        (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

      final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
          calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
            : MD5Hash.digest(checksumIn);
      if (LOG.isDebugEnabled()) {
        LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
            + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
      }

      //write reply
      BlockOpResponseProto.newBuilder()
        .setStatus(SUCCESS)
        .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
          .setBytesPerCrc(bytesPerCRC)
          .setCrcPerBlock(crcPerBlock)
          .setMd5(ByteString.copyFrom(md5.getDigest()))
          .setCrcType(PBHelper.convert(checksum.getChecksumType())))
        .build()
        .writeDelimitedTo(out);
      out.flush();
    } finally {
      IOUtils.closeStream(out);
      IOUtils.closeStream(checksumIn);
      IOUtils.closeStream(metadataIn);
    }

    //update metrics
    datanode.metrics.addBlockChecksumOp(elapsed());
  }

  /**
   * datanode会发送数据到另一个datanode让它拷贝一个block数据
   */
  @Override
  public void copyBlock(final ExtendedBlock block, final Token<BlockTokenIdentifier> blockToken) throws IOException {
    updateCurrentThreadName("Copying block " + block);
    DataOutputStream reply = getBufferedOutputStream();
    checkAccess(reply, true, block, blockToken, Op.COPY_BLOCK, BlockTokenSecretManager.AccessMode.COPY);

    // 会限制只能是 指定数量的线程使用限流组件，如果超过了一定的线程数量使用限流组件，会导致直接报错
    if (!dataXceiverServer.balanceThrottler.acquire()) { // 限流  TODO 核心代码
      String msg = "Not able to copy block " + block.getBlockId() + " " + "to " + peer.getRemoteAddressString() + " because threads " + "quota is exceeded.";
      LOG.info(msg);
      sendResponse(ERROR, msg);
      return;
    }

    BlockSender blockSender = null;
    boolean isOpSuccess = true;

    try {
      blockSender = new BlockSender(block, 0, -1, false, false, true, datanode,  null, CachingStrategy.newDropBehind());

      OutputStream baseStream = getOutputStream();

      // send status first
      writeSuccessWithChecksumInfo(blockSender, reply);
      // 在发送block数据过去的时候，会传入进去一个限流组件，
      long read = blockSender.sendBlock(reply, baseStream,  dataXceiverServer.balanceThrottler); // TODO 核心代码

      datanode.metrics.incrBytesRead((int) read);
      datanode.metrics.incrBlocksRead();
      
      LOG.info("Copied " + block + " to " + peer.getRemoteAddressString());
    } catch (IOException ioe) {
      isOpSuccess = false;
      LOG.info("opCopyBlock " + block + " received exception " + ioe);
      throw ioe;
    } finally {
      dataXceiverServer.balanceThrottler.release();
      if (isOpSuccess) {
        try {
          reply.writeChar('d');
        } catch (IOException ignored) {
        }
      }
      IOUtils.closeStream(reply);
      IOUtils.closeStream(blockSender);
    }

    //update metrics    
    datanode.metrics.addCopyBlockOp(elapsed());
  }

  /**
   * balance 工具会发送 replace block的指令过来
   * 让这个datanode从另一个datanode那里来拷贝数据，这个datanode会发送copy block指令过去
   * blockSender发送数据，自己是BlockReceiver接收数据
   *
   * 对balance进行限流，主要限流的就是 BLockSender发送数据过程要限流，避免把发送数据的datanode机器代码给打满，
   * 在BlockReceiver接收数据过来也要限流，避免把接收数据的datanode机器带宽打满
   */
  @Override
  public void replaceBlock(final ExtendedBlock block,
      final StorageType storageType, 
      final Token<BlockTokenIdentifier> blockToken,
      final String delHint,
      final DatanodeInfo proxySource) throws IOException {

    updateCurrentThreadName("Replacing block " + block + " from " + delHint);
    DataOutputStream replyOut = new DataOutputStream(getOutputStream());
    checkAccess(replyOut, true, block, blockToken, Op.REPLACE_BLOCK, BlockTokenSecretManager.AccessMode.REPLACE);

    /* read header */
    block.setNumBytes(dataXceiverServer.estimateBlockSize);

    if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
      String msg = "Not able to receive block " + block.getBlockId() + " from " + peer.getRemoteAddressString() + " because threads " + "quota is exceeded.";
      LOG.warn(msg);
      sendResponse(ERROR, msg);
      return;
    }

    Socket proxySock = null;
    DataOutputStream proxyOut = null;
    Status opStatus = SUCCESS;
    String errMsg = null;
    BlockReceiver blockReceiver = null;
    DataInputStream proxyReply = null;
    try {
      // get the output stream to the proxy
      final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
      InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
      proxySock = datanode.newSocket();
      NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
      proxySock.setSoTimeout(dnConf.socketTimeout);

      OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock, dnConf.socketWriteTimeout);
      InputStream unbufProxyIn = NetUtils.getInputStream(proxySock);
      DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
      IOStreamPair saslStreams = datanode.saslClient.socketSend(proxySock, unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource);
      unbufProxyOut = saslStreams.out;
      unbufProxyIn = saslStreams.in;
      
      proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut,  HdfsConstants.SMALL_BUFFER_SIZE));
      proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn, HdfsConstants.IO_FILE_BUFFER_SIZE));

      /* send request to the proxy */
      new Sender(proxyOut).copyBlock(block, blockToken); // TODO

      // receive the response from the proxy
      
      BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom( PBHelper.vintPrefixed(proxyReply));

      if (copyResponse.getStatus() != SUCCESS) {
        if (copyResponse.getStatus() == ERROR_ACCESS_TOKEN) {
          throw new IOException("Copy block " + block + " from " + proxySock.getRemoteSocketAddress() + " failed due to access token error");
        }
        throw new IOException("Copy block " + block + " from " + proxySock.getRemoteSocketAddress() + " failed");
      }
      
      // get checksum info about the block we're copying
      ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
      DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum());
      // open a block receiver and check if the block does not exist
      blockReceiver = new BlockReceiver(block, storageType,
          proxyReply, proxySock.getRemoteSocketAddress().toString(),
          proxySock.getLocalSocketAddress().toString(),
          null, 0, 0, 0, "", null, datanode, remoteChecksum,
          CachingStrategy.newDropBehind(), false);

      // receive a block
      blockReceiver.receiveBlock(null, null, replyOut, null,   dataXceiverServer.balanceThrottler, null, true);// TODO
                    
      // notify name node
      datanode.notifyNamenodeReceivedBlock( block, delHint, blockReceiver.getStorageUuid());

      LOG.info("Moved " + block + " from " + peer.getRemoteAddressString() + ", delHint=" + delHint);
      
    } catch (IOException ioe) {
      opStatus = ERROR;
      errMsg = "opReplaceBlock " + block + " received exception " + ioe; 
      LOG.info(errMsg);
      throw ioe;
    } finally {
      // receive the last byte that indicates the proxy released its thread resource
      if (opStatus == SUCCESS) {
        try {
          proxyReply.readChar();
        } catch (IOException ignored) {
        }
      }
      
      // now release the thread resource
      dataXceiverServer.balanceThrottler.release();
      
      // send response back
      try {
        sendResponse(opStatus, errMsg);
      } catch (IOException ioe) {
        LOG.warn("Error writing reply back to " + peer.getRemoteAddressString());
      }
      IOUtils.closeStream(proxyOut);
      IOUtils.closeStream(blockReceiver);
      IOUtils.closeStream(proxyReply);
      IOUtils.closeStream(replyOut);
    }

    //update metrics
    datanode.metrics.addReplaceBlockOp(elapsed());
  }

  /**
   * Separated for testing.
   * @return
   */
  DataOutputStream getBufferedOutputStream() {
    return new DataOutputStream(
        new BufferedOutputStream(getOutputStream(),
        HdfsConstants.SMALL_BUFFER_SIZE));
  }

  private long elapsed() {
    return now() - opStartTime;
  }

  /**
   * Utility function for sending a response.
   * 
   * @param status status message to write
   * @param message message to send to the client or other DN
   */
  private void sendResponse(Status status,
      String message) throws IOException {
    writeResponse(status, message, getOutputStream());
  }

  private static void writeResponse(Status status, String message, OutputStream out)
  throws IOException {
    BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
      .setStatus(status);
    if (message != null) {
      response.setMessage(message);
    }
    response.build().writeDelimitedTo(out);
    out.flush();
  }
  
  private void writeSuccessWithChecksumInfo(BlockSender blockSender,
      DataOutputStream out) throws IOException {

    ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
      .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
      .setChunkOffset(blockSender.getOffset())
      .build();
      
    BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setReadOpChecksumInfo(ckInfo)
      .build();
    response.writeDelimitedTo(out);
    out.flush();
  }
  

  /**
   * Wait until the BP is registered, upto the configured amount of time.
   * Throws an exception if times out, which should fail the client request.
   * @param the requested block
   */
  void checkAndWaitForBP(final ExtendedBlock block)
      throws IOException {
    String bpId = block.getBlockPoolId();

    // The registration is only missing in relatively short time window.
    // Optimistically perform this first.
    try {
      datanode.getDNRegistrationForBP(bpId);
      return;
    } catch (IOException ioe) {
      // not registered
    }

    // retry
    long bpReadyTimeout = dnConf.getBpReadyTimeout() * 1000;
    long startTime = Time.monotonicNow();
    while (Time.monotonicNow() - startTime <= bpReadyTimeout) {
      try {
        datanode.getDNRegistrationForBP(bpId);
        return;
      } catch (IOException ioe) {
        // not registered
      }
      // sleep before trying again
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        throw new IOException("Interrupted while serving request. Aborting.");
      }
    }
    // failed to obtain registration.
    throw new IOException("Not ready to serve the block pool, " + bpId + ".");
  }

  private void checkAccess(OutputStream out, final boolean reply, 
      final ExtendedBlock blk,
      final Token<BlockTokenIdentifier> t,
      final Op op,
      final BlockTokenSecretManager.AccessMode mode) throws IOException {
    checkAndWaitForBP(blk);
    if (datanode.isBlockTokenEnabled) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Checking block access token for block '" + blk.getBlockId()
            + "' with mode '" + mode + "'");
      }
      try {
        datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode);
      } catch(InvalidToken e) {
        try {
          if (reply) {
            BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder()
              .setStatus(ERROR_ACCESS_TOKEN);
            if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
              DatanodeRegistration dnR = 
                datanode.getDNRegistrationForBP(blk.getBlockPoolId());
              // NB: Unconditionally using the xfer addr w/o hostname
              resp.setFirstBadLink(dnR.getXferAddr());
            }
            resp.build().writeDelimitedTo(out);
            out.flush();
          }
          LOG.warn("Block token verification failed: op=" + op
              + ", remoteAddress=" + remoteAddress
              + ", message=" + e.getLocalizedMessage());
          throw e;
        } finally {
          IOUtils.closeStream(out);
        }
      }
    }
  }
}
