package org.apache.hadoop.hdfs.server.datanode;

import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;

import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.EOFException;
import java.io.FileDescriptor;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.zip.Checksum;

import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;

import com.google.common.annotations.VisibleForTesting;

/**
 * 这是一个组件，负责通过socket输入流，从上游节点读取block的数据，一个一个的packet数据包，
 * 它每次接收到一个packet数据包，都会写入自己本地的磁盘文件
 * 同时也会将这个packet复制一份，发送到pipeline下游的datanode里去
 */
class BlockReceiver implements Closeable {
  public static final Log LOG = DataNode.LOG;
  static final Log ClientTraceLog = DataNode.ClientTraceLog;

  @VisibleForTesting
  static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024;
  private final long datanodeSlowLogThresholdMs;
  private DataInputStream in = null; // from where data are read
  private DataChecksum clientChecksum; // checksum used by client
  private DataChecksum diskChecksum; // checksum we write to disk
  
  /**
   * In the case that the client is writing with a different checksum polynomial than the block is stored with on disk,
   * the DataNode needs to recalculate checksums before writing.
   */
  private final boolean needsChecksumTranslation;
  private OutputStream out = null; // to block file at local disk
  private FileDescriptor outFd;
  private DataOutputStream checksumOut = null; // to crc file at local disk
  private final int bytesPerChecksum;
  private final int checksumSize;
  
  private final PacketReceiver packetReceiver = new PacketReceiver(false);
  
  protected final String inAddr;
  protected final String myAddr;
  private String mirrorAddr;
  private DataOutputStream mirrorOut;
  private Daemon responder = null;
  private DataTransferThrottler throttler;
  private ReplicaOutputStreams streams;
  private DatanodeInfo srcDataNode = null;
  private final DataNode datanode;
  volatile private boolean mirrorError;

  // Cache management state
  private boolean dropCacheBehindWrites;
  private long lastCacheManagementOffset = 0;
  private boolean syncBehindWrites;
  private boolean syncBehindWritesInBackground;

  /** The client name.  It is empty if a datanode is the client */
  private final String clientname;
  private final boolean isClient; 
  private final boolean isDatanode;

  /** the block to receive */
  private final ExtendedBlock block; 
  /** the replica to write */
  private final ReplicaInPipelineInterface replicaInfo;
  /** pipeline stage */
  private final BlockConstructionStage stage;
  private final boolean isTransfer;

  /** block接收完毕之后是否要立马fsync数据到磁盘文件上 */
  private boolean dirFsyncOnBlockeceived;
  private boolean syncOnClose;
  private long restartBudget;

  /** for replaceBlock response */
  private final long responseInterval;
  private long lastResponseTime = 0;
  private boolean isReplaceBlock = false;
  private DataOutputStream replyOut = null;
  private long lastSentTime;
  private long maxSendIdleTime;

  /**
   * 一个block对应一个BlockReceiver，
   * 所以对一个block最大的初始化的一个操作，是为这个block初始化一个BlockReceiver组件
   * 这个BlockReceiver组件专门负责接收这个block的packet数据包
   */
  BlockReceiver(final ExtendedBlock block, final StorageType storageType,
      final DataInputStream in, final String inAddr, final String myAddr, final BlockConstructionStage stage,
      final long newGs, final long minBytesRcvd, final long maxBytesRcvd,  final String clientname, final DatanodeInfo srcDataNode,
      final DataNode datanode, DataChecksum requestedChecksum, CachingStrategy cachingStrategy, final boolean allowLazyPersist) throws IOException {

    try{
      this.block = block;
      this.in = in;
      this.inAddr = inAddr;
      this.myAddr = myAddr;
      this.srcDataNode = srcDataNode;
      this.datanode = datanode;

      this.clientname = clientname;
      this.isDatanode = clientname.length() == 0;
      this.isClient = !this.isDatanode;
      this.restartBudget = datanode.getDnConf().restartReplicaExpiry;
      this.datanodeSlowLogThresholdMs = datanode.getDnConf().datanodeSlowIoWarningThresholdMs;
      // For replaceBlock() calls response should be sent to avoid socketTimeout at clients. So sending with the interval of 0.5 * socketTimeout
      final long readTimeout = datanode.getDnConf().socketTimeout;
      this.responseInterval = (long) (readTimeout * 0.5);
      //for datanode, we have
      // 1: clientName.length() == 0, and
      // 2: stage == null or PIPELINE_SETUP_CREATE
      this.stage = stage;
      this.isTransfer = stage == BlockConstructionStage.TRANSFER_RBW || stage == BlockConstructionStage.TRANSFER_FINALIZED;

      this.lastSentTime = Time.monotonicNow();
      // Downstream will timeout in readTimeout on receiving the next packet.
      // If there is no data traffic, a heartbeat packet is sent at
      // the interval of 0.5*readTimeout. Here, we set 0.9*readTimeout to be
      // the threshold for detecting congestion.
      this.maxSendIdleTime = (long) (readTimeout * 0.9);

      // Open local disk out
      if (isDatanode) { //replication or move
        replicaInfo = datanode.data.createTemporary(storageType, block);
      } else {
        switch (stage) {
          // 刚开始的一个阶段，应该是 PIPELIENT_SETUP_CREATE
          // 数据滚到初始化的一个阶段
        case PIPELINE_SETUP_CREATE:
          // 来了一个block, 除了初始化 BlockReceiver准备接收packet外，还要干点别的事情
          // FSDataSet专门管理本地的磁盘文件
          replicaInfo = datanode.data.createRbw(storageType, block, allowLazyPersist); //
          // 还调用了一下namenode接口，通知人家，自己正在接收block
          // namenode接收到这个东西，先记录一下，有一个block正处于under construction，正在构造，正在接收数据中
          datanode.notifyNamenodeReceivingBlock( block, replicaInfo.getStorageUuid()); //
          break;
        case PIPELINE_SETUP_STREAMING_RECOVERY:
          replicaInfo = datanode.data.recoverRbw( block, newGs, minBytesRcvd, maxBytesRcvd);
          block.setGenerationStamp(newGs);
          break;
        case PIPELINE_SETUP_APPEND:
          replicaInfo = datanode.data.append(block, newGs, minBytesRcvd);
          if (datanode.blockScanner != null) { // remove from block scanner
            datanode.blockScanner.deleteBlock(block.getBlockPoolId(), block.getLocalBlock());
          }
          block.setGenerationStamp(newGs);
          datanode.notifyNamenodeReceivingBlock( block, replicaInfo.getStorageUuid());
          break;
        case PIPELINE_SETUP_APPEND_RECOVERY:
          replicaInfo = datanode.data.recoverAppend(block, newGs, minBytesRcvd);
          if (datanode.blockScanner != null) { // remove from block scanner
            datanode.blockScanner.deleteBlock(block.getBlockPoolId(), block.getLocalBlock());
          }
          block.setGenerationStamp(newGs);
          datanode.notifyNamenodeReceivingBlock(
              block, replicaInfo.getStorageUuid());
          break;
        case TRANSFER_RBW:
        case TRANSFER_FINALIZED:
          // this is a transfer destination
          replicaInfo = datanode.data.createTemporary(storageType, block);
          break;
        default: throw new IOException("Unsupported stage " + stage + 
              " while receiving block " + block + " from " + inAddr);
        }
      }
      this.dropCacheBehindWrites = (cachingStrategy.getDropBehind() == null) ? datanode.getDnConf().dropCacheBehindWrites : cachingStrategy.getDropBehind();
      this.syncBehindWrites = datanode.getDnConf().syncBehindWrites;
      this.syncBehindWritesInBackground = datanode.getDnConf().syncBehindWritesInBackground;
      
      final boolean isCreate = isDatanode || isTransfer  || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
      streams = replicaInfo.createStreams(isCreate, requestedChecksum);
      assert streams != null : "null streams!";

      // read checksum meta information
      this.clientChecksum = requestedChecksum;
      this.diskChecksum = streams.getChecksum();
      this.needsChecksumTranslation = !clientChecksum.equals(diskChecksum);
      this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
      this.checksumSize = diskChecksum.getChecksumSize();

      this.out = streams.getDataOut();
      if (out instanceof FileOutputStream) {
        this.outFd = ((FileOutputStream)out).getFD();
      } else {
        LOG.warn("Could not get file descriptor for outputstream of class " + out.getClass());
      }
      this.checksumOut = new DataOutputStream(new BufferedOutputStream( streams.getChecksumOut(), HdfsConstants.SMALL_BUFFER_SIZE));
      // write data chunk header if creating a new replica
      if (isCreate) {
        BlockMetadataHeader.writeHeader(checksumOut, diskChecksum);
      } 
    } catch (ReplicaAlreadyExistsException bae) {
      throw bae;
    } catch (ReplicaNotFoundException bne) {
      throw bne;
    } catch(IOException ioe) {
      IOUtils.closeStream(this);
      cleanupBlock();
      
      // check if there is a disk error
      IOException cause = DatanodeUtil.getCauseIfDiskError(ioe);
      DataNode.LOG.warn("IOException in BlockReceiver constructor. Cause is ", cause);
      
      if (cause != null) { // possible disk error
        ioe = cause;
        datanode.checkDiskErrorAsync();
      }
      
      throw ioe;
    }
  }

  /** Return the datanode object. */
  DataNode getDataNode() {return datanode;}

  String getStorageUuid() {
    return replicaInfo.getStorageUuid();
  }

  /**
   * close files.
   */
  @Override
  public void close() throws IOException {
    packetReceiver.close();

    IOException ioe = null;
    if (syncOnClose && (out != null || checksumOut != null)) {
      datanode.metrics.incrFsyncCount();      
    }
    long flushTotalNanos = 0;
    boolean measuredFlushTime = false;
    // close checksum file
    try {
      if (checksumOut != null) {
        long flushStartNanos = System.nanoTime();
        checksumOut.flush();
        long flushEndNanos = System.nanoTime();
        if (syncOnClose) {
          long fsyncStartNanos = flushEndNanos;
          streams.syncChecksumOut();
          datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
        }
        flushTotalNanos += flushEndNanos - flushStartNanos;
        measuredFlushTime = true;
        checksumOut.close();
        checksumOut = null;
      }
    } catch(IOException e) {
      ioe = e;
    }
    finally {
      IOUtils.closeStream(checksumOut);
    }
    // close block file
    try {
      if (out != null) {
        long flushStartNanos = System.nanoTime();
        out.flush();
        long flushEndNanos = System.nanoTime();
        if (syncOnClose) {
          long fsyncStartNanos = flushEndNanos;
          streams.syncDataOut();
          datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
        }
        flushTotalNanos += flushEndNanos - flushStartNanos;
        measuredFlushTime = true;
        out.close();
        out = null;
      }
    } catch (IOException e) {
      ioe = e;
    }
    finally{
      IOUtils.closeStream(out);
    }
    if (measuredFlushTime) {
      datanode.metrics.addFlushNanos(flushTotalNanos);
    }
    // disk check
    if(ioe != null) {
      datanode.checkDiskErrorAsync();
      throw ioe;
    }
  }

  synchronized void setLastSentTime(long sentTime) {
    lastSentTime = sentTime;
  }

  /**
   * It can return false if
   * - upstream did not send packet for a long time
   * - a packet was received but got stuck in local disk I/O.
   * - a packet was received but got stuck on send to mirror.
   */
  synchronized boolean packetSentInTime() {
    long diff = Time.monotonicNow() - lastSentTime;
    if (diff > maxSendIdleTime) {
      LOG.info("A packet was last sent " + diff + " milliseconds ago.");
      return false;
    }
    return true;
  }

  /**
   * Flush block data and metadata files to disk.
   * @throws IOException
   */
  void flushOrSync(boolean isSync) throws IOException {
    long flushTotalNanos = 0;
    long begin = Time.monotonicNow();

    if (checksumOut != null) {
      long flushStartNanos = System.nanoTime();
      checksumOut.flush();
      long flushEndNanos = System.nanoTime();
      if (isSync) {
        long fsyncStartNanos = flushEndNanos;
        streams.syncChecksumOut();
        datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
      }
      flushTotalNanos += flushEndNanos - flushStartNanos;
    }

    if (out != null) {
      long flushStartNanos = System.nanoTime();
      out.flush(); // TODO 核心代码 将内存缓冲区的数据 写入磁盘或网络
      long flushEndNanos = System.nanoTime();
      if (isSync) {
        long fsyncStartNanos = flushEndNanos;
        streams.syncDataOut();
        datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
      }
      flushTotalNanos += flushEndNanos - flushStartNanos;
    }

    if (checksumOut != null || out != null) {
      datanode.metrics.addFlushNanos(flushTotalNanos);
      if (isSync) {
    	  datanode.metrics.incrFsyncCount();      
      }
    }

    long duration = Time.monotonicNow() - begin;
    if (duration > datanodeSlowLogThresholdMs) {
      LOG.warn("Slow flushOrSync took " + duration + "ms (threshold="+ datanodeSlowLogThresholdMs + "ms), isSync:" + isSync + ", flushTotalNanos="+ flushTotalNanos + "ns");
    }
  }

  /**
   * While writing to mirrorOut, failure to write to mirror should not
   * affect this datanode unless it is caused by interruption.
   */
  private void handleMirrorOutError(IOException ioe) throws IOException {
    String bpid = block.getBlockPoolId();
    LOG.info(datanode.getDNRegistrationForBP(bpid)
        + ":Exception writing " + block + " to mirror " + mirrorAddr, ioe);
    if (Thread.interrupted()) { // shut down if the thread is interrupted
      throw ioe;
    } else { // encounter an error while writing to mirror
      // continue to run even if can not write to mirror
      // notify client of the error
      // and wait for the client to shut down the pipeline
      mirrorError = true;
    }
  }
  
  /**
   * Verify multiple CRC chunks. 
   */
  private void verifyChunks(ByteBuffer dataBuf, ByteBuffer checksumBuf)
      throws IOException {
    try {
      clientChecksum.verifyChunkedSums(dataBuf, checksumBuf, clientname, 0);
    } catch (ChecksumException ce) {
      LOG.warn("Checksum error in block " + block + " from " + inAddr, ce);
      // No need to report to namenode when client is writing.
      if (srcDataNode != null && isDatanode) {
        try {
          LOG.info("report corrupt " + block + " from datanode " +
                    srcDataNode + " to namenode");
          datanode.reportRemoteBadBlock(srcDataNode, block);
        } catch (IOException e) {
          LOG.warn("Failed to report bad " + block + 
                    " from datanode " + srcDataNode + " to namenode");
        }
      }
      throw new IOException("Unexpected checksum mismatch while writing "
          + block + " from " + inAddr);
    }
  }
  
    
  /**
   * Translate CRC chunks from the client's checksum implementation
   * to the disk checksum implementation.
   * 
   * This does not verify the original checksums, under the assumption
   * that they have already been validated.
   */
  private void translateChunks(ByteBuffer dataBuf, ByteBuffer checksumBuf) {
    diskChecksum.calculateChunkedSums(dataBuf, checksumBuf);
  }

  /** 
   * Check whether checksum needs to be verified.
   * Skip verifying checksum iff this is not the last one in the 
   * pipeline and clientName is non-null. i.e. Checksum is verified
   * on all the datanodes when the data is being written by a 
   * datanode rather than a client. Whe client is writing the data, 
   * protocol includes acks and only the last datanode needs to verify 
   * checksum.
   * @return true if checksum verification is needed, otherwise false.
   */
  private boolean shouldVerifyChecksum() {
    return (mirrorOut == null || isDatanode || needsChecksumTranslation);
  }

  /** 
   * 接收和处理一个packet, 一个packet可以包含很多个chunk,最后这个方法它会返回这个packet所拥有的数据的字节。
   *
   * 1. receivePacket()方法首先调用packetReceiver.receiveNextPacket()方法从输入流中读入一个数据包（packet），并将这个数据包放入ByteBuffer缓冲区curPacketBuf中。
   *     readNextPacket()方法的实现比较简单，就是按照数据包格式从输入流中读取数据并放入指定的ByteBuffer缓冲区中。
   * 2. receivePacket()成功接收数据包后，会判断当前节点是否是数据流管道中的最后一个节点，或者是输入流启动了sync标识（syncBlock）要求Datanode立即将数据包同步到磁盘。
   *     在这两种情况下，Datanode会先将数据写入磁盘，然后再通知PacketResponder处理确认（ACK）消息；
   *     否则，receivePacket()方法接收完数据包后会立即通知PacketResponder处理确认消息。
   * 3. 接下来receivePacket()会将数据包发送给数据流管道中的下游节点，然后就可以将数据块文件和校验文件写入数据节点的磁盘了。
   * 4. 写入磁盘之后，receivePacket()方法需要调用flushOrSync()方法将输出流缓存中的数据全部同步到磁盘，最后还需要调用manageWriterOsCache（清理）操作系统缓存中的数据。
   *     需要注意的是，如果当前节点是数据流管道中的最后一个节点，则在写入磁盘前，需要先对数据块中的所有数据包进行校验。
   */
  private int receivePacket() throws IOException {
    // 在这里基于IO流网络通信，
    // 通过这个输入流可以读取到连续的两个packet的数据，但是在这里，它会自动做一个区分
    // 在这里，会自动区分出来哪个packet是那部分的数据
    // 在输入流里接收到数据的时候，肯定可以读取都一些特殊的字符，packet header

    // 在这里，通过 packetReceiver这个组件，通过输入流，仅仅是读取到一个packet数据出来
    // 调用packetReceiver.receiveNextPacket()方法从输入流中读入一个数据包（packet）
    packetReceiver.receiveNextPacket(in); //TODO 核心方法

    // 从packetHeader中就可以获取到一个packet元数据，
    // 比如这个packet是block的第几个block，它包含的数据大小是多少？是否是block中的最后一个packet
    // 如果是最后一个packet, 那么必然是一个空的packet
    PacketHeader header = packetReceiver.getHeader();

    // Sanity check the header
    if (header.getOffsetInBlock() > replicaInfo.getNumBytes()) {
      throw new IOException("Received an out-of-sequence packet for " + block + 
          "from " + inAddr + " at offset " + header.getOffsetInBlock() + ". Expecting packet starting at " + replicaInfo.getNumBytes());
    }
    if (header.getDataLen() < 0) {
      throw new IOException("Got wrong length during writeBlock(" + block +  ") from " + inAddr + " at offset " +  header.getOffsetInBlock() + ": " + header.getDataLen());
    }

    long offsetInBlock = header.getOffsetInBlock();
    long seqno = header.getSeqno();
    // boolean, 布尔值，表示了这个packet是否是block中最后一个packet, 空的packet
    boolean lastPacketInBlock = header.isLastPacketInBlock();
    final int len = header.getDataLen();
    boolean syncBlock = header.getSyncBlock();

    // avoid double sync'ing on close
    // 这里可以判断，接收到了最后一个packet,代表block接收完毕了
    if (syncBlock && lastPacketInBlock) {
      this.syncOnClose = false;
      this.dirFsyncOnBlockeceived = true;
    }

    // update received bytes
    final long firstByteInBlock = offsetInBlock;
    offsetInBlock += len;
    if (replicaInfo.getNumBytes() < offsetInBlock) {
      replicaInfo.setNumBytes(offsetInBlock);
    }
    
    // 如果不是数据流管道的最后一个数据节点，则立即处理响应消息
    if (responder != null && !syncBlock && !shouldVerifyChecksum()) {
      ((PacketResponder) responder.getRunnable()).enqueue(seqno, lastPacketInBlock, offsetInBlock, Status.SUCCESS); // TODO
    }

    // Drop heartbeat for testing.
    if (seqno < 0 && len == 0 && DataNodeFaultInjector.get().dropHeartbeatPacket()) {
      return 0;
    }

    // 向下游节点发送数据包
    // 首先将packet写入镜像：
    if (mirrorOut != null && !mirrorError) {
      try {
        long begin = Time.monotonicNow();
        DataNodeFaultInjector.get().stopSendingPacketDownstream();
        packetReceiver.mirrorPacketTo(mirrorOut); //TODO 核心方法
        mirrorOut.flush(); //TODO 核心方法

        long now = Time.monotonicNow();
        setLastSentTime(now);
        long duration = now - begin;
        if (duration > datanodeSlowLogThresholdMs) {
          LOG.warn("Slow BlockReceiver write packet to mirror took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
        }
      } catch (IOException e) {
        handleMirrorOutError(e);
      }
    }
    
    ByteBuffer dataBuf = packetReceiver.getDataSlice();
    ByteBuffer checksumBuf = packetReceiver.getChecksumSlice();

    // 最后一个packet, 或者是空的packet(最后一个packet是空的)
    if (lastPacketInBlock || len == 0) {
      // 如果接收了完整的数据块，并且启动了 sync标识，则立即将数据同步到磁盘
      if (syncBlock) {
        flushOrSync(true); //TODO 核心方法
      }
    } else {
      final int checksumLen = diskChecksum.getChecksumSize(len);
      final int checksumReceivedLen = checksumBuf.capacity();

      if (checksumReceivedLen > 0 && checksumReceivedLen != checksumLen) {
        throw new IOException("Invalid checksum length: received length is " + checksumReceivedLen + " but expected length is " + checksumLen);
      }

      // 如果当前节点是数据流管道中的最后一个节点，则验证数据包的校验和
      if (checksumReceivedLen > 0 && shouldVerifyChecksum()) {
        try {
          // hdfs客户端传输过来的时候，有一个chunk -> checksum(基于chunk的内容用crc算法算出来的)
          // 现在check传输到了datanode这边了，datanode为了确保传输的过程中chunk数据没有破损
          // 就需要重新基于chunk的内容算一下checksum，跟hdfs客户端发送过来的checksum对比一下，看看是否一致
          // 正常情况下如果chunk的内容都是一样的，肯定checksum是一样的
          // 如果过程中，chunk的内容被修改了，那么checksum肯定是不一样的
          verifyChunks(dataBuf, checksumBuf); // 调用 verifyChunks() 验证数据包校验和
        } catch (IOException ioe) {
          // checksum error detected locally. there is no reason to continue.
          if (responder != null) {
            try {
              //  验证出现异常，则向上游客户端发送校验异常信息
              ((PacketResponder) responder.getRunnable()).enqueue(seqno, lastPacketInBlock, offsetInBlock, Status.ERROR_CHECKSUM); //TODO 核心方法
              // Wait until the responder sends back the response and interrupt this thread.
              Thread.sleep(3000);
            } catch (InterruptedException e) { }
          }
          throw new IOException("Terminating due to a checksum error." + ioe);
        }
 
        if (needsChecksumTranslation) {
          // 如果客户端发送的数据校验方式和当前数据节点的不一致，则转换校验和
          translateChunks(dataBuf, checksumBuf);
        }
      }

      if (checksumReceivedLen == 0 && !streams.isTransientStorage()) {
        // checksum is missing, need to calculate it
        checksumBuf = ByteBuffer.allocate(checksumLen);
        diskChecksum.calculateChunkedSums(dataBuf, checksumBuf);
      }
      
      // by this point, the data in the buffer uses the disk checksum
      final boolean shouldNotWriteChecksum = checksumReceivedLen == 0 && streams.isTransientStorage();
      try {
        long onDiskLen = replicaInfo.getBytesOnDisk();
        if (onDiskLen<offsetInBlock) {
          long partialChunkSizeOnDisk = onDiskLen % bytesPerChecksum;
          long lastChunkBoundary = onDiskLen - partialChunkSizeOnDisk;
          boolean alignedOnDisk = partialChunkSizeOnDisk == 0;
          boolean alignedInPacket = firstByteInBlock % bytesPerChecksum == 0;

          boolean overwriteLastCrc = !alignedOnDisk && !shouldNotWriteChecksum;
          boolean doCrcRecalc = overwriteLastCrc && (lastChunkBoundary != firstByteInBlock);

          if (!alignedInPacket && len > bytesPerChecksum) {
            throw new IOException("Unexpected packet data length for " +  block + " from " + inAddr + ": a partial chunk must be " + " sent in an individual packet (data length = " + len +  " > bytesPerChecksum = " + bytesPerChecksum + ")");
          }

          Checksum partialCrc = null;
          if (doCrcRecalc) {
            long offsetInChecksum = BlockMetadataHeader.getHeaderSize() + onDiskLen / bytesPerChecksum * checksumSize;
            partialCrc = computePartialChunkCrc(onDiskLen, offsetInChecksum);
          }

          int startByteToDisk = (int)(onDiskLen-firstByteInBlock)  + dataBuf.arrayOffset() + dataBuf.position();

          int numBytesToDisk = (int)(offsetInBlock-onDiskLen);
          
          long begin = Time.monotonicNow();
          // 最最核心的代码，直接将packet数据全部输入本地磁盘的blk_000001文件里面去
          out.write(dataBuf.array(), startByteToDisk, numBytesToDisk); //TODO 核心方法 写入数据
          long duration = Time.monotonicNow() - begin;
          if (duration > datanodeSlowLogThresholdMs) {
            LOG.warn("Slow BlockReceiver write data to disk cost:" + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
          }

          final byte[] lastCrc;
          if (shouldNotWriteChecksum) {
            lastCrc = null;
          } else {
            int skip = 0;
            byte[] crcBytes = null;

            if (overwriteLastCrc) { // not chunk-aligned on disk
              adjustCrcFilePosition();
            }

            if (doCrcRecalc) {
              int bytesToReadForRecalc = (int)(bytesPerChecksum - partialChunkSizeOnDisk);
              if (numBytesToDisk < bytesToReadForRecalc) {
                bytesToReadForRecalc = numBytesToDisk;
              }

              partialCrc.update(dataBuf.array(), startByteToDisk, bytesToReadForRecalc);
              byte[] buf = FSOutputSummer.convertToByteStream(partialCrc, checksumSize);
              crcBytes = copyLastChunkChecksum(buf, checksumSize, buf.length);
              checksumOut.write(buf);  //TODO 核心方法  写入校验和数据
              skip++; //  For the partial chunk that was just read.
            }

            long skippedDataBytes = lastChunkBoundary - firstByteInBlock;

            if (skippedDataBytes > 0) {
              skip += (int)(skippedDataBytes / bytesPerChecksum) + ((skippedDataBytes % bytesPerChecksum == 0) ? 0 : 1);
            }
            skip *= checksumSize; // Convert to number of bytes

            // write the rest of checksum
            final int offset = checksumBuf.arrayOffset() + checksumBuf.position() + skip;
            final int end = offset + checksumLen - skip;
            if (offset >= end && doCrcRecalc) {
              lastCrc = crcBytes;
            } else {
              final int remainingBytes = checksumLen - skip;
              lastCrc = copyLastChunkChecksum(checksumBuf.array(), checksumSize, end);
              checksumOut.write(checksumBuf.array(), offset, remainingBytes);
            }
          }

          // 将数据和校验数据同步到磁盘
          flushOrSync(syncBlock);
          replicaInfo.setLastChecksumAndDataLen(offsetInBlock, lastCrc);
          datanode.metrics.incrBytesWritten(len);
          // 清除操作系统缓存
          manageWriterOsCache(offsetInBlock);
        }
      } catch (IOException iex) {
        datanode.checkDiskErrorAsync();
        throw iex;
      }
    }

    // 如果是最后一个节点，或者是 syncBlock方式，则在数据写完磁盘以后在对ack进行处理
    if (responder != null && (syncBlock || shouldVerifyChecksum())) {
      ((PacketResponder) responder.getRunnable()).enqueue(seqno, lastPacketInBlock, offsetInBlock, Status.SUCCESS);
    }

    if (isReplaceBlock && (Time.monotonicNow() - lastResponseTime > responseInterval)) {
      BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder().setStatus(Status.IN_PROGRESS);
      response.build().writeDelimitedTo(replyOut);
      replyOut.flush();

      lastResponseTime = Time.monotonicNow();
    }

    // 节流器控制
    if (throttler != null) { // throttle I/O
      throttler.throttle(len); // 每次接受了一个64kb的packet之后，都会进行限流
      // 如果500ms内，接收的数据超过了5mb, 此时BlockReceiver线程就会wait，停止接收数据，起到了限流的作用
      // 可以保证进行balance的机器每秒传输的数据不会超过你指定的数量，避免balance任务打满带宽
    }
    
    return lastPacketInBlock?-1:len;
  }

  private static byte[] copyLastChunkChecksum(byte[] array, int size, int end) {
    return Arrays.copyOfRange(array, end - size, end);
  }

  private void manageWriterOsCache(long offsetInBlock) {
    try {
      if (outFd != null &&
          offsetInBlock > lastCacheManagementOffset + CACHE_DROP_LAG_BYTES) {
        long begin = Time.monotonicNow();
        //
        // For SYNC_FILE_RANGE_WRITE, we want to sync from
        // lastCacheManagementOffset to a position "two windows ago"
        //
        //                         <========= sync ===========>
        // +-----------------------O--------------------------X
        // start                  last                      curPos
        // of file                 
        //
        if (syncBehindWrites) {
          if (syncBehindWritesInBackground) {
            this.datanode.getFSDataset().submitBackgroundSyncFileRangeRequest(
                block, outFd, lastCacheManagementOffset,
                offsetInBlock - lastCacheManagementOffset,
                NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
          } else {
            NativeIO.POSIX.syncFileRangeIfPossible(outFd,
                lastCacheManagementOffset, offsetInBlock - lastCacheManagementOffset,
                NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
          }
        }
        //
        // For POSIX_FADV_DONTNEED, we want to drop from the beginning 
        // of the file to a position prior to the current position.
        //
        // <=== drop =====> 
        //                 <---W--->
        // +--------------+--------O--------------------------X
        // start        dropPos   last                      curPos
        // of file             
        //                     
        long dropPos = lastCacheManagementOffset - CACHE_DROP_LAG_BYTES;
        if (dropPos > 0 && dropCacheBehindWrites) {
          NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible( block.getBlockName(), outFd, 0, dropPos, NativeIO.POSIX.POSIX_FADV_DONTNEED);
        }
        lastCacheManagementOffset = offsetInBlock;
        long duration = Time.monotonicNow() - begin;
        if (duration > datanodeSlowLogThresholdMs) {
          LOG.warn("Slow manageWriterOsCache took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
        }
      }
    } catch (Throwable t) {
      LOG.warn("Error managing cache for writer of block " + block, t);
    }
  }
  
  public void sendOOB() throws IOException, InterruptedException {
    ((PacketResponder) responder.getRunnable()).sendOOBResponse(PipelineAck.getRestartOOBStatus());
  }

  /**
   * 1. 它先启动 PacketResponder线程 负责接收并转发下游节点发送的确认数据包的ACK消息。
   * 2. 之后 receiveBlock()方法循环调用 receivePacket()方法 接收上游写入的数据包并转发这个数据包到下游节点。
   * 3. 成功完成整个数据块的写入操作后，receiveBlock()方法关闭PacketResponder线程。
   */
  void receiveBlock(
      DataOutputStream mirrOut, // output to next datanode
      DataInputStream mirrIn,   // input from next datanode
      DataOutputStream replyOut,  // output to previous datanode
      String mirrAddr, DataTransferThrottler throttlerArg,
      DatanodeInfo[] downstreams,
      boolean isReplaceBlock) throws IOException {

      syncOnClose = datanode.getDnConf().syncOnClose;
      boolean responderClosed = false;
      mirrorOut = mirrOut;
      mirrorAddr = mirrAddr;
      throttler = throttlerArg;

      this.replyOut = replyOut;
      this.isReplaceBlock = isReplaceBlock;

    try {
      if (isClient && !isTransfer) {
        // 启动PacketResponder线程处理确认包的接收和转发
        responder = new Daemon(datanode.threadGroup,  new PacketResponder(replyOut, mirrIn, downstreams)); // TODO
        responder.start(); // start thread to processes responses
      }

      // 循环调用 receivePacket() 接收并转发数据块中的所有数据包
      // 核心的接收packet的逻辑，封装在这里有一个receivePacket()方法，这里面都是网络的输入输出流，做了一个封装
      // 分布式系统通信,一般都是这几种，rpc, tcp, 流式，接口，http
      // 在这里，如果接收到了最后一个packet之后，dirFsyncOnBlockeceived = true
      while (receivePacket() >= 0) { // TODO 核心代码
        /* Receive until the last packet */
      }

      // 往下走，是不是就代表下面的代码都是在处理一个block洗完以后的事情

      if (responder != null) {
        // 完成数据块的写入操作后，结束PacketResponder线程
        ((PacketResponder)responder.getRunnable()).close();
        responderClosed = true;
      }

      if (isDatanode || isTransfer) {
        close();
        block.setNumBytes(replicaInfo.getNumBytes());

        if (stage == BlockConstructionStage.TRANSFER_RBW) {
          datanode.data.convertTemporaryToRbw(block);
        } else {
          datanode.data.finalizeBlock(block, this.dirFsyncOnBlockeceived); // TODO 核心代码
        }
        datanode.metrics.incrBlocksWritten();
      }

    } catch (IOException ioe) {
      replicaInfo.releaseAllBytesReserved();
      if (datanode.isRestarting()) {
        LOG.info("Shutting down for restart (" + block + ").");
      } else {
        LOG.info("Exception for " + block, ioe);
        throw ioe;
      }
    } finally {
      Thread.interrupted();

      if (!responderClosed) { // Data transfer was not complete.
        if (responder != null) {
          if (datanode.isRestarting() && isClient && !isTransfer) {
            File blockFile = ((ReplicaInPipeline)replicaInfo).getBlockFile();
            File restartMeta = new File(blockFile.getParent()  +  File.pathSeparator + "." + blockFile.getName() + ".restart");
            if (restartMeta.exists() && !restartMeta.delete()) {
              LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath());
            }
            FileWriter out = null;
            try {
              out = new FileWriter(restartMeta);
              out.write(Long.toString(Time.now() + restartBudget));
              out.flush();
            } catch (IOException ioe) {
            } finally {
              IOUtils.cleanup(LOG, out);
            }
            try {              
              Thread.sleep(1000);
            } catch (InterruptedException ie) {
            }
          }
          responder.interrupt();
        }
        IOUtils.closeStream(this);
        cleanupBlock();
      }
      if (responder != null) {
        try {
          responder.interrupt();
          long joinTimeout = datanode.getDnConf().getXceiverStopTimeout();
          joinTimeout = joinTimeout > 1  ? joinTimeout*8/10 : joinTimeout;
          responder.join(joinTimeout);
          if (responder.isAlive()) {
            String msg = "Join on responder thread " + responder + " timed out";
            LOG.warn(msg + "\n" + StringUtils.getStackTrace(responder));
            throw new IOException(msg);
          }
        } catch (InterruptedException e) {
          responder.interrupt();
          if (!datanode.isRestarting()) {
            throw new IOException("Interrupted receiveBlock");
          }
        }
        responder = null;
      }
    }
  }

  /** Cleanup a partial block 
   * if this write is for a replication request (and not from a client)
   */
  private void cleanupBlock() throws IOException {
    if (isDatanode) {
      datanode.data.unfinalizeBlock(block);
    }
  }

  /**
   * Adjust the file pointer in the local meta file so that the last checksum
   * will be overwritten.
   */
  private void adjustCrcFilePosition() throws IOException {
    if (out != null) {
     out.flush();
    }
    if (checksumOut != null) {
      checksumOut.flush();
    }

    // rollback the position of the meta file
    datanode.data.adjustCrcChannelPosition(block, streams, checksumSize);
  }

  /**
   * Convert a checksum byte array to a long
   */
  static private long checksum2long(byte[] checksum) {
    long crc = 0L;
    for(int i=0; i<checksum.length; i++) {
      crc |= (0xffL&checksum[i])<<((checksum.length-i-1)*8);
    }
    return crc;
  }

  /**
   * reads in the partial crc chunk and computes checksum
   * of pre-existing data in partial chunk.
   */
  private Checksum computePartialChunkCrc(long blkoff, long ckoff)
      throws IOException {

    // find offset of the beginning of partial chunk.
    //
    int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
    blkoff = blkoff - sizePartialChunk;
    if (LOG.isDebugEnabled()) {
      LOG.debug("computePartialChunkCrc for " + block
          + ": sizePartialChunk=" + sizePartialChunk
          + ", block offset=" + blkoff
          + ", metafile offset=" + ckoff);
    }

    // create an input stream from the block file
    // and read in partial crc chunk into temporary buffer
    //
    byte[] buf = new byte[sizePartialChunk];
    byte[] crcbuf = new byte[checksumSize];
    ReplicaInputStreams instr = null;
    try { 
      instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff);
      IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

      // open meta file and read in crc value computer earlier
      IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
    } finally {
      IOUtils.closeStream(instr);
    }

    // compute crc of partial chunk from data read in the block file.
    final Checksum partialCrc = DataChecksum.newDataChecksum(
        diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
    partialCrc.update(buf, 0, sizePartialChunk);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Read in partial CRC chunk from disk for " + block);
    }

    // paranoia! verify that the pre-computed crc matches what we
    // recalculated just now
    if (partialCrc.getValue() != checksum2long(crcbuf)) {
      String msg = "Partial CRC " + partialCrc.getValue() +
                   " does not match value computed the " +
                   " last time file was closed " +
                   checksum2long(crcbuf);
      throw new IOException(msg);
    }
    return partialCrc;
  }
  
  private static enum PacketResponderType {
    NON_PIPELINE, LAST_IN_PIPELINE, HAS_DOWNSTREAM_IN_PIPELINE
  }
  
  private static final Status[] MIRROR_ERROR_STATUS = {Status.SUCCESS, Status.ERROR};
  
  /**
   * Processes responses from downstream datanodes in the pipeline
   * and sends back replies to the originator.
   */
  class PacketResponder implements Runnable, Closeable {   
    /** queue for packets waiting for ack - synchronization using monitor lock */
    private final LinkedList<Packet> ackQueue = new LinkedList<Packet>(); 
    /** the thread that spawns this responder */
    private final Thread receiverThread = Thread.currentThread();
    /** is this responder running? - synchronization using monitor lock */
    private volatile boolean running = true;
    /** input from the next downstream datanode */
    private final DataInputStream downstreamIn;
    /** output to upstream datanode/client */
    private final DataOutputStream upstreamOut;
    /** The type of this responder */
    private final PacketResponderType type;
    /** for log and error messages */
    private final String myString; 
    private boolean sending = false;

    @Override
    public String toString() {
      return myString;
    }

    PacketResponder(final DataOutputStream upstreamOut, final DataInputStream downstreamIn, final DatanodeInfo[] downstreams) {
      this.downstreamIn = downstreamIn;
      this.upstreamOut = upstreamOut;

      this.type = downstreams == null? PacketResponderType.NON_PIPELINE
          : downstreams.length == 0? PacketResponderType.LAST_IN_PIPELINE
              : PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE;

      final StringBuilder b = new StringBuilder(getClass().getSimpleName()).append(": ").append(block).append(", type=").append(type);
      if (type != PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
        b.append(", downstreams=").append(downstreams.length)
            .append(":").append(Arrays.asList(downstreams));
      }
      this.myString = b.toString();
    }

    private boolean isRunning() {
      // When preparing for a restart, it should continue to run until interrupted by the receiver thread.
      return running && (datanode.shouldRun || datanode.isRestarting());
    }
    
    /**
     * BlockReceiver完成对指定数据包的处理之后，会调用enqueue()方法通知PacketResponder类处理这个数据包的响应。
     *
     * enqueue()方法的实现比较简单，它将等待PacketResponder类处理的数据包加入ackQueue队列中，ackQueue队列中保存的所有数据包都会由 PacketResponder 的run()方法处理。
     * 成功将数据包添加到ackQueue队列后，enqueue()方法会调用notify()方法通知run()方法处理数据包。
     * 可以看到，ackQueue是一个典型的生产者-消费者队列。
     */
    void enqueue(final long seqno, final boolean lastPacketInBlock, final long offsetInBlock, final Status ackStatus) {
      final Packet p = new Packet(seqno, lastPacketInBlock, offsetInBlock, System.nanoTime(), ackStatus);
      synchronized(ackQueue) {
        if (running) {
          ackQueue.addLast(p);
          ackQueue.notifyAll();
        }
      }
    }

    /**
     * Send an OOB response. If all acks have been sent already for the block
     * and the responder is about to close, the delivery is not guaranteed.
     * This is because the other end can close the connection independently.
     * An OOB coming from downstream will be automatically relayed upstream
     * by the responder. This method is used only by originating datanode.
     *
     * @param ackStatus the type of ack to be sent
     */
    void sendOOBResponse(final Status ackStatus) throws IOException,
        InterruptedException {
      if (!running) {
        LOG.info("Cannot send OOB response " + ackStatus + 
            ". Responder not running.");
        return;
      }

      synchronized(this) {
        if (sending) {
          wait(PipelineAck.getOOBTimeout(ackStatus));
          // Didn't get my turn in time. Give up.
          if (sending) {
            throw new IOException("Could not send OOB reponse in time: " + ackStatus);
          }
        }
        sending = true;
      }

      LOG.info("Sending an out of band ack of type " + ackStatus);
      try {
        sendAckUpstreamUnprotected(null, PipelineAck.UNKOWN_SEQNO, 0L, 0L, ackStatus);
      } finally {
        // Let others send ack. Unless there are miltiple OOB send
        // calls, there can be only one waiter, the responder thread.
        // In any case, only one needs to be notified.
        synchronized(this) {
          sending = false;
          notify();
        }
      }
    }
    
    /** Wait for a packet with given {@code seqno} to be enqueued to ackQueue */
    Packet waitForAckHead(long seqno) throws InterruptedException {
      synchronized(ackQueue) {
        while (isRunning() && ackQueue.size() == 0) {
          if (LOG.isDebugEnabled()) {
            LOG.debug(myString + ": seqno=" + seqno +
                      " waiting for local datanode to finish write.");
          }
          ackQueue.wait();
        }
        return isRunning() ? ackQueue.getFirst() : null;
      }
    }

    /**
     * wait for all pending packets to be acked. Then shutdown thread.
     */
    @Override
    public void close() {
      synchronized(ackQueue) {
        while (isRunning() && ackQueue.size() != 0) {
          try {
            ackQueue.wait();
          } catch (InterruptedException e) {
            running = false;
            Thread.currentThread().interrupt();
          }
        }
        if(LOG.isDebugEnabled()) {
          LOG.debug(myString + ": closing");
        }
        running = false;
        ackQueue.notifyAll();
      }

      synchronized(this) {
        running = false;
        notifyAll();
      }
    }

    /**
     * run()方法会循环对数据块中的所有数据包执行确认响应的逻辑，
     * 如果抛出异常，则将PacketResponder.running设置为false，使得while循环的isRunning()判断为false，PacketResponder线程也就会结束。
     * 如果PacketResponder阻塞了，则通过interrupt()方法中断PacketResponder线程。
     */
    @Override
    public void run() {
      boolean lastPacketInBlock = false;
      final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
      while (isRunning() && !lastPacketInBlock) {
        long totalAckTimeNanos = 0;
        boolean isInterrupted = false;

        try {
          // 执行数据包响应处理逻辑
          // run()方法首先会从下游节点的输入流中读取一个响应，并判断这个响应中是否有OOB消息
          //     （Datanode在写操作时被触发重启的情况下，会通过数据流管道逆向发送一个OOB响应消息给客户端，由客户端处理数据流管道中Datanode节点重启的情况），
          // 如果有OOB消息，则立即将这个消息转发给上游数据节点。
          // 接下来run()方法会在ackQueue队列上等待需要处理的数据包，然后判断从下游节点接收的数据包响应与从ackQueue队列中取出的待处理数据包是否匹配， 如果不匹配则抛出异常。
          //     这里需要注意，如果PacketResponder在从下游节点读入ack的过程中出现异常，则将mirrorError字段设置为true，run()方法会在后续向上游节点发送的响应中携带错误信息。

          Packet pkt = null;  // 记录当前处理的数据包
          long expected = -2; // 数据包的序列化

          PipelineAck ack = new PipelineAck(); // 数据包响应消息
          long seqno = PipelineAck.UNKOWN_SEQNO; // 数据包响应的序列号
          long ackRecvNanoTime = 0;
          try {
            if (type != PacketResponderType.LAST_IN_PIPELINE && !mirrorError) {
              // 它会从下游读取那个packet的ack消息
              ack.readFields(downstreamIn); // 从下游节点的输出流中读入一个响应
              ackRecvNanoTime = System.nanoTime();
              // 判断响应消息是否是 OOB消息，例如下游节点重启
              Status oobStatus = ack.getOOBStatus();

              if (oobStatus != null) {
                // 将OOB消息转发给上游节点处理
                LOG.info("Relaying an out of band ack of type " + oobStatus);
                sendAckUpstream(ack, PipelineAck.UNKOWN_SEQNO, 0L, 0L, Status.SUCCESS); // TODO  核心方法
                continue;
              }

              seqno = ack.getSeqno();
            }

            if (seqno != PipelineAck.UNKOWN_SEQNO || type == PacketResponderType.LAST_IN_PIPELINE) {
              // 从ackQueue队列中取出待处理数据包
              pkt = waitForAckHead(seqno);
              if (!isRunning()) {
                break;
              }
              expected = pkt.seqno;
              // 判断当前接收的数据包响应是否匹配待处理数据包
              if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE && seqno != expected) {
                // 如果不匹配则抛出异常
                throw new IOException(myString + "seqno: expected=" + expected + ", received=" + seqno);
              }
              if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
                totalAckTimeNanos = ackRecvNanoTime - pkt.ackEnqueueNanoTime;
                long ackTimeNanos = totalAckTimeNanos - ack.getDownstreamAckTimeNanos();
                if (ackTimeNanos < 0) {
                } else {
                  datanode.metrics.addPacketAckRoundTripTimeNanos(ackTimeNanos);
                }
              }
              // 最后一个datanode如果收到的是空包
              // 此时这个lastPacketInBlock标志位，就是true, 表示当前收到的就是空包
              lastPacketInBlock = pkt.lastPacketInBlock;
            }

          } catch (InterruptedException ine) {
            isInterrupted = true;
          } catch (IOException ioe) {
            if (Thread.interrupted()) {
              isInterrupted = true;
            } else if (ioe instanceof EOFException && !packetSentInTime()) {
              LOG.warn("The downstream error might be due to congestion in " + "upstream including this node. Propagating the error: ",  ioe);
              throw ioe;
            } else {
              // 如果从下游节点读取数据时抛出异常，则将 mirrorError设置为True
              mirrorError = true;
              LOG.info(myString, ioe);
            }
          }

          if (Thread.interrupted() || isInterrupted) {
            // 对于线程中断，则将 running设置为 false, 停止当前线程运行
            LOG.info(myString + ": Thread is interrupted.");
            running = false;
            continue;
          }

          // 完成上述操作后，PacketResponder会判断当前接收的数据包响应是否为数据块中最后一个数据包的响应，如果是，则调用finalizeBlock()方法向Namenode提交这个数据块。
          // 接下来run()方法会调用sendAckUpstream()方法复制下游节点的数据包响应，并在该响应中加入当前节点的状态，然后构造新的响应发送给上游节点。
          // 完成数据包响应的处理后，从ackQueue队列中移除这个数据包。

          // 如果收到的是一个空包，说明是最后一个数据包
          // 如果数据包响应是数据块的最后一个数据包响应，则提交这个数据块
          if (lastPacketInBlock) {
            // 彻底结束掉接收这个block需要的各种IO资源、线程资源，关闭对磁盘文件的输出流，将block对应的数据加入管理，通知namenode新增了一个block。
            finalizeBlock(startTime); // TODO 核心方法
          }

          // 复制下节点的数据包响应，加入当前数据节点的状态，并发送给上游节点
          sendAckUpstream(ack, expected, totalAckTimeNanos, (pkt != null ? pkt.offsetInBlock : 0),  (pkt != null ? pkt.ackStatus : Status.SUCCESS)); // TODO  核心方法
          if (pkt != null) {
            // 已经完成响应处理的数据包，从 ackQueue队列中移除
            removeAckHead();
          }
        } catch (IOException e) {
          LOG.warn("IOException in BlockReceiver.run(): ", e);
          if (running) {
            // 检测当前节点上的数据是否有错
            datanode.checkDiskErrorAsync();
            LOG.info(myString, e);
            // 设置running标志位为false, 停止 PackageResponder 线程的执行
            running = false;
            if (!Thread.interrupted()) {
              // 中断 PackageResponder 线程
              receiverThread.interrupt();
            }
          }
        } catch (Throwable e) {
          if (running) {
            LOG.info(myString, e);
            running = false; // 设置 running 标志位为false
            receiverThread.interrupt(); // 中断 PackageResponder 线程
          }
        }
      }
      LOG.info(myString + " terminating");
    }
    
    /**
     * 完成block并关闭block文件
     *
     * 不同于数据块的读取操作，Datanode完成数据块的写入操作后需要向Namenode汇报这个新的数据块，
     *     以方便Namenode更新命名空间。PacketResponder在确认数据块中所有数据包的响应都正确处理之后，会调用BlockReceiver.finalizeBlock()方法通知名字节点当前Datanode成功接收了这个数据块。
     * finalizeBlock()方法又会调用DataNode.closeBlock()方法，DataNode.closeBlock()在底层调用了BPOfferService.notifyNamenodeReceivedBlock()方法通知Namenode。
     */
    private void finalizeBlock(long startTime) throws IOException {
      // 关闭 BlockReceiver
      BlockReceiver.this.close();
      final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
      block.setNumBytes(replicaInfo.getNumBytes());
      datanode.data.finalizeBlock(block); //
      datanode.closeBlock( block, DataNode.EMPTY_DEL_HINT, replicaInfo.getStorageUuid()); //

      if (ClientTraceLog.isInfoEnabled() && isClient) {
        long offset = 0;
        DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block.getBlockPoolId());
        ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr, myAddr, block.getNumBytes(), "HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(), block, endTime - startTime));
      } else {
        LOG.info("Received " + block + " size " + block.getNumBytes() + " from " + inAddr);
      }
    }
    
    /**
     * sendAckUpstream()方法会调用sendAckUpstreamUnprotected()方法发送响应给上游节点。
     * 向上游节点发送的响应分为如下几种情况。
     *     1. 下游节点发送的是OOB消息，则将下游节点的数据包响应原封不动地保存。
     *     2. 如果从下游节点读取响应异常，也就是mirrorError为true，则将异常Status.MIRROR_ERROR_STATUS记录在数据包响应中。
     *     3. 对于其他流程，则将当前Datanode的状态放入数据包响应中。
     * 然后构造新的数据包响应，并将这个响应发送到上游数据节点
     */
    private void sendAckUpstream(PipelineAck ack, long seqno, long totalAckTimeNanos, long offsetInBlock, Status myStatus) throws IOException {
      try {
        // Wait for other sender to finish. Unless there is an OOB being sent, the responder won't have to wait.
        synchronized(this) {
          while(sending) {
            wait();
          }
          sending = true;
        }

        try {
          if (!running) {
            return;
          }
          sendAckUpstreamUnprotected(ack, seqno, totalAckTimeNanos, offsetInBlock, myStatus); // TODO 核心方法
        } finally {
          synchronized(this) {
            sending = false;
            notify();
          }
        }
      } catch (InterruptedException ie) {
        // The responder was interrupted. Make it go down without interrupting the receiver(writer) thread.
        running = false;
      }
    }

    /**
     * 这里需要特别注意的是，
     * 如果发现下游节点汇报了数据包的校验和错误，则抛出异常停止当前节点的BlockReceiver以及PacketResponder线程；
     * 如果是当前节点出现了数据包的校验和错误，则将错误消息发送给上游节点，然后抛出异常停止当前节点的BlockReceiver以及PacketResponder线程。
     */
    private void sendAckUpstreamUnprotected(PipelineAck ack, long seqno, long totalAckTimeNanos, long offsetInBlock, Status myStatus) throws IOException {
      Status[] replies = null;
      // 如果下游接档发送的是OOB消息，则保留下游节点消息内容
      if (ack == null) {
        replies = new Status[1];
        replies[0] = myStatus;
      } else if (mirrorError) { // 如果是当前节点从下游节点读取响应消息异常
        // 则将当前 Datanode的错误 记录在响应消息中
        replies = MIRROR_ERROR_STATUS;
      } else { // 其他流程
        short ackLen = type == PacketResponderType.LAST_IN_PIPELINE ? 0 : ack.getNumOfReplies();
        replies = new Status[1 + ackLen];
        replies[0] = myStatus; // 将当前数据的状态放入响应中
        for (int i = 0; i < ackLen; i++) { // 将下游数据节点的状态放入响应中
          replies[i + 1] = ack.getReply(i);
        }
        // 如果上游节点检测到校验和错误，那么当前节点发送的数据有错误， 则停止 BlockReceiver 以及 PacketResponder线程
        if (ackLen > 0 && replies[1] == Status.ERROR_CHECKSUM) {
          throw new IOException("Shutting down writer and responder " + "since the down streams reported the data sent by this " + "thread is corrupt");
        }
      }
      // 构建新的数据包响应消息
      PipelineAck replyAck = new PipelineAck(seqno, replies, totalAckTimeNanos);
      if (replyAck.isSuccess() && offsetInBlock > replicaInfo.getBytesAcked()) {
        replicaInfo.setBytesAcked(offsetInBlock);
      }
      // 将数据包响应消息发送给上游节点
      long begin = Time.monotonicNow();
      replyAck.write(upstreamOut);
      // 构造了一个packet已经处理成功的ack消息给上游的节点
      upstreamOut.flush();

      long duration = Time.monotonicNow() - begin;
      // 如果当前节点检测到校验和错误，则停止BlockReceiver以及PacketResponder线程
      if (duration > datanodeSlowLogThresholdMs) {
        LOG.warn("Slow PacketResponder send ack to upstream took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " + myString + ", replyAck=" + replyAck);
      } else if (LOG.isDebugEnabled()) {
        LOG.debug(myString + ", replyAck=" + replyAck);
      }

      if (myStatus == Status.ERROR_CHECKSUM) {
        throw new IOException("Shutting down writer and responder " + "due to a checksum error in received data. The error " + "response has been sent upstream.");
      }
    }
    
    /**
     * Remove a packet from the head of the ack queue
     * 
     * This should be called only when the ack queue is not empty
     */
    private void removeAckHead() {
      synchronized(ackQueue) {
        ackQueue.removeFirst();
        ackQueue.notifyAll();
      }
    }
  }
  
  /**
   * This information is cached by the Datanode in the ackQueue.
   */
  private static class Packet {
    final long seqno;
    final boolean lastPacketInBlock;
    final long offsetInBlock;
    final long ackEnqueueNanoTime;
    final Status ackStatus;

    Packet(long seqno, boolean lastPacketInBlock, long offsetInBlock,
        long ackEnqueueNanoTime, Status ackStatus) {
      this.seqno = seqno;
      this.lastPacketInBlock = lastPacketInBlock;
      this.offsetInBlock = offsetInBlock;
      this.ackEnqueueNanoTime = ackEnqueueNanoTime;
      this.ackStatus = ackStatus;
    }

    @Override
    public String toString() {
      return getClass().getSimpleName() + "(seqno=" + seqno
        + ", lastPacketInBlock=" + lastPacketInBlock
        + ", offsetInBlock=" + offsetInBlock
        + ", ackEnqueueNanoTime=" + ackEnqueueNanoTime
        + ", ackStatus=" + ackStatus
        + ")";
    }
  }
}
