package org.apache.hadoop.hdfs.server.namenode;

import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.Time.now;

import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CloseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CreateSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.LogSegmentOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetStoragePolicyOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.token.delegation.DelegationKey;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;

/**
 * FSEditLog 维护namespace(命名空间，元数据，文件目录树) 一系列的修改。
 */
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class FSEditLog implements LogsPurgeable {

  static final Log LOG = LogFactory.getLog(FSEditLog.class);

  /**
   * - UNINITIALIZED: editlog的初始状态。
   * - BETWEEN_LOG_SEGMENTS: editlog的前一个segment已经关闭，新的还没开始。
   * - IN_SEGMENT: editlog处于可写状态。
   * - OPEN_FOR_READING: editlog处于可读状态。
   * - CLOSED: editlog处于关闭状态。
   *
   * 对于非HA机制的情况:
   *
   * FSEditLog应该开始于UNINITIALIZED或者CLOSED状态 (因为在构造FSEditLog对象时，FSEditLog的成员变量state默认为State.UNINITIALIZED)
   * FSEditLog初始化完成之后进入BETWEEN_LOG_SEGMENTS 状态， 表示前一个segment已经关闭，新的还没开始，日志已经做好准备了。
   * 当打开日志服务时，改变FSEditLog状态为IN_SEGMENT状态，表示可以写editlog文件了。
   *
   * 对于HA机制的情况:
   *
   * FSEditLog同样应该开始于UNINITIALIZED或者CLOSED状态，
   * 但是在完成初始化后FSEditLog并不进入BETWEEN_LOG_SEGMENTS状态， 而是进入OPEN_FOR_READING状态
   * ( 因为目前Namenode启动时都是以Standby模式启动的， 然后通过DFSHAAdmin发送命令把其中一个Standby NameNode转换成Active Namenode )。
   */
  private enum State {
    UNINITIALIZED,  // editlog的初始状态。
    BETWEEN_LOG_SEGMENTS, // editlog的前一个segment已经关闭， 新的还没开始
    IN_SEGMENT,  // editlog处于可写状态
    OPEN_FOR_READING, // editlog处于可读状态
    CLOSED;  // editlog处于关闭状态
  }  
  private State state = State.UNINITIALIZED;
  
  //initialize
  private JournalSet journalSet = null;
  // 连蒙带猜，EditLogOutputStream 一定是一个包装类，装饰流，里面封装了好几个其他的底层的流
  // EditLogOutputStream里，封装了一个写本地磁盘的流，还封装了一个写JournalNode的流
  // 在这里写一条 edit log的时候，EditLogOutputStream 就会把这条数据通过两个流写到两个地方
  private EditLogOutputStream editLogStream = null;

  // a monotonically increasing counter that represents transactionIds.
  private long txid = 0;

  // stores the last synced transactionId.
  private long synctxid = 0;

  // the first txid of the log that's currently open for writing.
  // If this value is N, we are currently writing to edits_inprogress_N
  private long curSegmentTxId = HdfsConstants.INVALID_TXID;

  // the time of printing the statistics to the log file.
  private long lastPrintTime;

  // is a sync currently running?
  private volatile boolean isSyncRunning;

  // is an automatic sync scheduled?
  private volatile boolean isAutoSyncScheduled = false;
  
  // these are statistics counters.
  private long numTransactions;        // number of transactions
  private long numTransactionsBatchedInSync;
  private long totalTimeTransactions;  // total time for all transactions
  private NameNodeMetrics metrics;

  private final NNStorage storage;
  private final Configuration conf;
  
  private final List<URI> editsDirs;

  private final ThreadLocal<OpInstanceCache> cache =
      new ThreadLocal<OpInstanceCache>() {
    @Override
    protected OpInstanceCache initialValue() {
      return new OpInstanceCache();
    }
  };
  
  /**
   * The edit directories that are shared between primary and secondary.
   */
  private final List<URI> sharedEditsDirs;

  /**
   * Take this lock when adding journals to or closing the JournalSet. Allows
   * us to ensure that the JournalSet isn't closed or updated underneath us
   * in selectInputStreams().
   */
  private final Object journalSetLock = new Object();

  /**
   * TransactionId与客户端每次发起的RPC操作相关，
   * 当客户端发起一次RPC请求对Namenode的命名空间修改后，
   * Namenode就会在editlog中发起一个新的transaction用于记录这次操作，
   * 每个transaction会用一个唯一的transactionId标识。
   */
  private static class TransactionId {
    public long txid;

    TransactionId(long value) {
      this.txid = value;
    }
  }

  /**
   * 存储此线程的最新transactionId。
   * ThreadLocal，即每个线程都可以有一个副本，每个线程在同步代码块里，通过txid++全局唯一递增之后，
   * 将这个递增好的txid放入自己的ThreadLocal变量副本中，后面它就用自己的ThreadLocal中的txid就可以了，如果有其他线程再次对txid++，无所谓
   */
  private static final ThreadLocal<TransactionId> myTransactionId = new ThreadLocal<TransactionId>() {
    @Override
    protected synchronized TransactionId initialValue() {
      return new TransactionId(Long.MAX_VALUE);
    }
  };

  /**
   * Constructor for FSEditLog. Underlying journals are constructed, but 
   * no streams are opened until open() is called.
   * 
   * @param conf The namenode configuration
   * @param storage Storage object used by namenode
   * @param editsDirs List of journals to use
   */
  FSEditLog(Configuration conf, NNStorage storage, List<URI> editsDirs) {
    isSyncRunning = false;
    this.conf = conf;
    this.storage = storage;
    metrics = NameNode.getNameNodeMetrics();
    lastPrintTime = now();
     
    // 我们推测，这个位置就是namenode是将edits log写入到自己本地磁盘的目录中，默认 hadoop.tmp.dir
    this.editsDirs = Lists.newArrayList(editsDirs);
    // 这个是我们自己指定的就是将数据写入都哪些journal node集群上去
    this.sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
  }

  /**
   *  iniJournalsForWrite()方法是FSEditLog的public方法，
   *  调用这个方法会将FSEditLog从 UNINITIALIZED状态转换为BETWEEN_LOG_SEGMENTS状态。
   */
  public synchronized void initJournalsForWrite() {
    Preconditions.checkState(state == State.UNINITIALIZED || state == State.CLOSED, "Unexpected state: %s", state);

    // 调用initJournals()方法进行初始化操作

    // initJournals()方法会根据传入的dirs变量 (保存的是editlog文件的存储位置，都是URI)
    // 初始化journalSet字段 (JournalManager对象的集合)。
    // 初始化之后，FSEditLog就可以调用journalSet对象的方法向多个日志存储位置写editlog文件了。
    initJournals(this.editsDirs);

    // 状态转换为BETWEEN_LOG_SEGMENTS
    state = State.BETWEEN_LOG_SEGMENTS;
  }

  /**
   * initSharedJournalsForRead()方法是FSEditLog的public方法，
   * 用在HA情况下。 调用这个方法会将FSEditLog从UNINITIALIZED状态转换为OPEN_FOR_READING状态。
   *
   * 与initJournalsForWrite()方法相同， initSharedJournalsForRead()方法也调用了initJournals()方法执行初始化操作， 只不过editlog文件的存储位置不同，
   * 在HA的情况下，editlog文件的存储目录为共享存储目录， 这个共享存储目录由Active Namenode和StandbyNamenode共享读取。
   */
  public synchronized void initSharedJournalsForRead() {
    if (state == State.OPEN_FOR_READING) {
      LOG.warn("Initializing shared journals for READ, already open for READ", new Exception());
      return;
    }
    Preconditions.checkState(state == State.UNINITIALIZED || state == State.CLOSED);

    // 对于HA的情况，editlog的日志存储目录为共享的目录sharedEditsDirs
    initJournals(this.sharedEditsDirs);
    state = State.OPEN_FOR_READING;
  }
  
  private synchronized void initJournals(List<URI> dirs) {
    // dfs.namenode.edits.dir.minimum 默认值: 1
    int minimumRedundantJournals = conf.getInt( DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT);

    synchronized(journalSetLock) {
      // 初始化journalSet集合，存放存储路径对应的所有JournalManager对象
      journalSet = new JournalSet(minimumRedundantJournals);

      // 根据传入的URI获取对应的JournalManager对象
      for (URI u : dirs) {
        boolean required = FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains(u);
        // 如果当前传进来的URI，是本地系统的话
        if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
          StorageDirectory sd = storage.getStorageDirectory(u);
          if (sd != null) {
            // 就会创建 FileJournalManager, 它是专门负责将edits log写入本地磁盘的
            journalSet.add(new FileJournalManager(conf, sd, storage), required, sharedEditsDirs.contains(u));
          }
        }
        // 如果不是本地文件系统的话，就会走createJournal()
        // 会创建出来 QuorumJournalManager，它是专门负责将edits log写入到JournalNode上去的
        else {
          journalSet.add(createJournal(u), required, sharedEditsDirs.contains(u));
        }
      }
    }
 
    if (journalSet.isEmpty()) {
      LOG.error("No edits directories configured!");
    } 
  }

  /**
   * Get the list of URIs the editlog is using for storage
   * @return collection of URIs in use by the edit log
   */
  Collection<URI> getEditURIs() {
    return editsDirs;
  }

  /**
   * openForWrite()方法用于初始化editlog文件的输出流， 并且打开第一个日志段落（log segment） 。
   * 在非HA机制下， 调用这个方法会完成BETWEEN_LOG_SEGMENTS状态到IN_SEGMENT状态的转换
   */
  synchronized void openForWrite() throws IOException {
    Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS, "Bad state: %s", state);

    // 返回最后一个写入log的transactionId+1，作为本次操作的transactionId , 假设当前的transactionId为31
    long segmentTxId = getLastWrittenTxId() + 1;  // TODO
    List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
    // 传入了参数segmentTxId，
    // 这个参数会作为这次 操作的transactionId，
    // 值为editlog已经记录的最新的transactionId加1(这里是 31+1=32)。
    //
    // selectInputStreams()方法会判断有没有一个以segmentTxId(32)开始的日志，如果没有则表示当前transactionId 的值选择正确，可以打开新的editlog文件记录以segmentTxId开始的日志段落。
    // 如果方法找到了包含这个transactionId的editlog文件，则表示出现了两个日志 transactionId交叉的情况，抛出异常。
    journalSet.selectInputStreams(streams, segmentTxId, true); // TODO
    // 这里判断，有没有包含这个新的segmentTxId的editlog文件，如果有则抛出异常
    if (!streams.isEmpty()) {
      String error = String.format("Cannot start writing at txid %s " + "when there is a stream available for read: %s", segmentTxId, streams.get(0));
      IOUtils.cleanup(LOG, streams.toArray(new EditLogInputStream[0]));
      throw new IllegalStateException(error);
    }

    // 核心方法：写入日志
    startLogSegment(segmentTxId, true); // TODO 核心代码
    assert state == State.IN_SEGMENT : "Bad state: " + state;
  }
  
  /**
   * @return true if the log is currently open in write mode, regardless
   * of whether it actually has an open segment.
   */
  synchronized boolean isOpenForWrite() {
    return state == State.IN_SEGMENT ||
      state == State.BETWEEN_LOG_SEGMENTS;
  }
  
  /**
   * @return true if the log is open in write mode and has a segment open
   * ready to take edits.
   */
  synchronized boolean isSegmentOpen() {
    return state == State.IN_SEGMENT;
  }

  /**
   * @return true if the log is open in read mode.
   */
  public synchronized boolean isOpenForRead() {
    return state == State.OPEN_FOR_READING;
  }

  /**
   * close()方法用于关闭editlog文件的存储， 完成了IN_SEGMENT到CLOSED状态的改变。
   * close()会首先等待sync操作完成， 然后调用endCurrentLogSegment()方法， 将当前正在进行写操作的日志段落结束。
   * 之后close()方法会关闭journalSet对象， 并将FSEditLog状态机转变为CLOSED状态。
   */
  synchronized void close() {
    if (state == State.CLOSED) {
      LOG.debug("Closing log when already closed");
      return;
    }

    try {
      if (state == State.IN_SEGMENT) {
        assert editLogStream != null;
        // 如果有sync操作， 则等待sync操作完成
        waitForSyncToFinish();
        // 结束当前logSegment
        endCurrentLogSegment(true);
      }
    } finally {
      // 关闭journalSet
      if (journalSet != null && !journalSet.isEmpty()) {
        try {
          synchronized(journalSetLock) {
            journalSet.close();
          }
        } catch (IOException ioe) {
          LOG.warn("Error closing journalSet", ioe);
        }
      }
      // 将状态机更改为CLOSED状态
      state = State.CLOSED;
    }
  }


  /**
   * Format all configured journals which are not file-based.
   * 
   * File-based journals are skipped, since they are formatted by the
   * Storage format code.
   */
  synchronized void formatNonFileJournals(NamespaceInfo nsInfo) throws IOException {
    Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
        "Bad state: %s", state);
    
    for (JournalManager jm : journalSet.getJournalManagers()) {
      if (!(jm instanceof FileJournalManager)) {
        jm.format(nsInfo);
      }
    }
  }
  
  synchronized List<FormatConfirmable> getFormatConfirmables() {
    Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
        "Bad state: %s", state);

    List<FormatConfirmable> ret = Lists.newArrayList();
    for (final JournalManager jm : journalSet.getJournalManagers()) {
      // The FJMs are confirmed separately since they are also
      // StorageDirectories
      if (!(jm instanceof FileJournalManager)) {
        ret.add(jm);
      }
    }
    return ret;
  }

  /**
   * 基本上所有的log*()方法（例如logDelete()、 logCloseFile()） 在底层都调用了logEdit()方法来执行记录操作，
   * 这里会传入一个FSEditLogOp对象来标识当前需要被记录的操作类型以及操作的信息。
   *
   * 1. 调用beginTransaction()方法在editlog文件中开启一个新的transaction，
   * 2. 然后使用editlog输入流写入要被记录的操作，
   * 3. 接下来调用endTransaction()方法关闭这个transaction，
   * 4. 最后调用logSync()方法将写入的信息同步到磁盘上。
   *
   * logEdit()方法调用beginTransaction()、 editLogStream.write()以及endTransaction()三个方法时使用了synchronized关键字进行同步操作，
   * 这样就保证了多个线程调用FSEditLog.log*()方法向editlog文件中写数据时， editlog文件记录的内容不会相互影响。
   * 同时， 也保证了这几个并发线程保存操作对应的transactionId（通过调用beginTransaction()方法获得） 是唯一并递增的。
   */
  void logEdit(final FSEditLogOp op) {
    // 同步操作, 即使是多个线程, 依旧会进行同步操作. txid 不会错乱，保证了多个线程调用FSEditLog.log*()方法向editlog文件中写数据时， editlog文件记录的内容不会相互影响。
    // 同时， 也保证了这几个并发线程保存操作对应的transactionId（通过调用beginTransaction()方法获得。

    // 写edits log的一个主要流程
    // FSEditlog组件，在namenode里全局唯一的，它是一个核心组件，synchronized (this),基本上就是保证多线程并发写edits log的时候，一定是同步的

    // 如何保证transactionId必须是全局唯一递增？
    // 秘诀只有一个：为了避免多线程并发冲突的问题，导致transactionId有问题，直接synchronized重量级锁，把这段代码块给锁住了。
    // 多线程过来，如果要记录日志的话，多个线程也许可以并发的记录修改元数据。
    // 元数据不仅是文件目录树，还有很多其他东西，比如修改配置
    // 多个线程要去写edit log的话，在这里就会被卡住，这是重量级同步，
    // 整个namenode中，统一时间，只能有一个线程进这段代码，去尝试修改edit log。
    synchronized (this) {
      assert isOpenForWrite() : "bad state: " + state;

      // 一开始不需要等待， 如果自动同步开启， 则等待 fsync 完成. 默认是false
      // 下一个非常重要的结论：当有一个线程开始尝试要将写满512KB的缓冲区的数据flush到磁盘的时候，后面的线程全部会开在这个地方，多个线程可能会进入这块代码这里
      // 但是无一例外，都是会陷入一个while循环，不断的去等待 isAutoSyncScheduled 标志位 = false
      waitIfAutoSyncScheduled(); // TODO

      //最重要的就是生成了全局唯一的事务ID（日志）

      // name    editlog_0000000000_0000000012.log
      //         editlog_0000000013_0000000022.log
      // 步骤1：获取当前的独一无二的事务ID

      // 开启一个新的transaction , 这里一定会分配一个全局唯一的transactionId,
      // txid, 全局，唯一，递增
      // 在执行这个方法的时候，它在分配transactionId的手，外面的那个synchronized同步代码块，
      // 其实已经保证了人家是可以，只有一个线程在这里执行这个方法
      long start = beginTransaction(); // TODO 核心代码
      // 此时还是处于synchronized锁定代码块中，所以此时txid++后是不会再次改变的
      op.setTransactionId(txid);

      try {
        // 步骤2：把元数据写入到内存缓冲
        // 用 EditLogOutputStream 对外输出操作日志
        // 同时干了2件事情，1)将editlog写入内存文件缓冲 ，2）将edit log写入JournalNode，
        // 后面standby namenode 会从 journalnode来同步edits log。
        // FSEditLog初始化的时候，肯定会初始化好这个 EditLogOutputStream

        // 无论你是将edits log写入磁盘，还是写入网络给 journalnode，
        // 在这个步骤里，其实都是将数据写入双缓冲的其中一块区域的
        editLogStream.write(op); // TODO 核心代码
      } catch (IOException ex) {
      }

      // 结束当前的transaction
      endTransaction(start);// TODO 核心代码

      // 判断一下，当前是否需要将内存缓冲区里的数据刷入磁盘呢？
      // 如果不需要，就直接return。 如果需要刷数据到磁盘，再继续往下执行
      if (!shouldForceSync()) {
        // 如果bufCurrent缓冲区没写满512KB的话，此时写完内存buffer之后直接返回了
        return;
      }

      // 如果有一个线程发现bufCurrent缓冲区里的数据超过了512KB之后，代码就会运行到这里，
      // 如果到这儿就说明 缓冲区存满了
      isAutoSyncScheduled = true;
    }

    /**
     * logEdit()方法中调用logSync()方法执行刷新操作的语句并不在synchronized代码段中。
     * 这是因为调用logSync()方法必然会触发写editlog文件的磁盘操作， 这是一个非常耗时的操作， 如果放入同步模块中会造成其他调用FSEditLog.log*()线程的等待时间过长。
     * 所以， HDFS设计者将需要进行同步操作的synchronized代码段放入logSync()方法中， 也就让输出日志记录和刷新缓冲区数据到磁盘这两个操作分离了。
     * 同时， 利用EditLogOutputStream的两个缓冲区， 使得日志记录和刷新缓冲区数据这两个操作可以并发执行， 大大地提高了Namenode的吞吐量。
     */

    // 核心代码：在synchronized同步代码块之外，调用logSync()方法，将edit log 强制同步到磁盘
    // edit log刚开始是写入内存缓冲里，然后写完内存缓冲之后，可以一次性将内存缓冲中的edit log 都写入到磁盘文件里去，做一次sync同步到磁盘操作。
    logSync(); // TODO 核心代码
  }

  /**
   * Wait if an automatic sync is scheduled
   */
  synchronized void waitIfAutoSyncScheduled() {
    try {
      while (isAutoSyncScheduled) {
        this.wait(1000);
      }
    } catch (InterruptedException e) {
    }
  }
  
  /**
   * Signal that an automatic sync scheduling is done if it is scheduled
   */
  synchronized void doneWithAutoSyncScheduling() {
    if (isAutoSyncScheduled) {
      isAutoSyncScheduled = false;
      notifyAll();
    }
  }
  
  /**
   * Check if should automatically sync buffered edits to 
   * persistent store
   * 
   * @return true if any of the edit stream says that it should sync
   */
  private boolean shouldForceSync() {
    return editLogStream.shouldForceSync(); //
  }

  /**
   * logEdit()方法会调用beginTransaction()方法开启一个新的transaction， 也就是将FSEditLog.txid字段增加1并作为当前操作的transactionId。
   * FSEditLog.txid字段维护了一个全局递增的transactionId， 这样也就保证了FSEditLog为所有操作分配的transactionId是唯一且递增的。
   * 调用beginTransaction()方法之后会将新申请的transactionId放入ThreadLocal的变量my TransactionId中，
   * myTransactionId保存了当前线程记录操作对应的transactionId， 方便了以后线程做sync同步操作。
   *
   * 对于FSEditLog类， 可能同时有多个线程并发地调用log*()方法执行日志记录操作，
   * 所以FSEditLog类使用了一个ThreadLocal变量myTransactionId为每个调用log*()操作的线程保存独立的txid， 这个txid为当前线程记录操作对应的transactionId。
   */
  private long beginTransaction() {
    // 判断当前线程是否持有锁
    assert Thread.holdsLock(this);
    // 保证全局唯一，有序
    // 全局的transactionId ++
    // 如果对Java并发编程有了解，应该知道，比如txid这种long类型的数据，如果要保证全局唯一的递增，不一定要用synchronized去锁定，
    // 很多开源框架会用 AtomicLong这个数据类型，保证全局唯一的递增。
    txid++;

    // 线程1：txid++, 此时txid = 76
    // 线程2：又进来了，txid++, 此时txid = 77

    // ThreadLocal<TransactionId> myTransactionId
    // 每一个线程进来，每个线程自己都有的一个副本。
    // 使用ThreadLocal变量保存当前线程持有的transactionId
    TransactionId id = myTransactionId.get();
    // 每个线程都会有自己的关于日志的事务ID。
    id.txid = txid;

    // 将txid=76放入自己的ThreadLocal本地变量副本中
    // 线程1，是无所谓的，线程1后面在任何时刻如果要取自己最大的txid,就是从ThreadLocal中获取本地变量副本就可以了, 一直看到的就是txid=76

    return now();
  }

  /**
   * logEdit()方法会调用endTransaction()方法结束一个transaction， 这个方法就是更改一些统计数据
   */
  private void endTransaction(long start) {
    assert Thread.holdsLock(this);
    
    // update statistics
    long end = now();
    numTransactions++;
    totalTimeTransactions += (end-start);
    if (metrics != null) {
      metrics.addTransaction(end-start);
    }
  }

  /**
   * Return the transaction ID of the last transaction written to the log.
   */
  public synchronized long getLastWrittenTxId() {
    return txid;
  }
  
  /**
   * @return the first transaction ID in the current log segment
   */
  synchronized long getCurSegmentTxId() {
    Preconditions.checkState(isSegmentOpen(),
        "Bad state: %s", state);
    return curSegmentTxId;
  }
  
  /**
   * Set the transaction ID to use for the next transaction written.
   */
  synchronized void setNextTxId(long nextTxId) {
    Preconditions.checkArgument(synctxid <= txid &&
       nextTxId >= txid,
       "May not decrease txid." +
      " synctxid=%s txid=%s nextTxId=%s",
      synctxid, txid, nextTxId);
      
    txid = nextTxId - 1;
  }
    
  /**
   * Blocks until all ongoing edits have been synced to disk.
   * This differs from logSync in that it waits for edits that have been
   * written by other threads, not just edits from the calling thread.
   *
   * NOTE: this should be done while holding the FSNamesystem lock, or
   * else more operations can start writing while this is in progress.
   */
  void logSyncAll() {
    // Record the most recent transaction ID as our own id
    synchronized (this) {
      TransactionId id = myTransactionId.get();
      id.txid = txid;
    }
    // Then make sure we're synced up to this point
    logSync();
  }
  
  /**
   * logEdit()方法通过调用beginTransaction()方法成功地获取一个transactionId之后， 就会通过输出流向editlog文件写数据以记录当前的操作，
   * 但是写入的这些数据并没有直接保存在editlog文件中， 而是暂存在输出流的缓冲区中。
   * 所以当logEdit()方法将一个完整的操作写入输出流后， 需要调用logSync()方法同步当前线程对editlog文件所做的修改。
   *
   * editlog同步策略:
   * - 每个edits log都会被串行化同步写入一个内存缓冲区，而且都会被分配一个全局唯一递增的transactionId。
   * - 当一个线程要将它的操作同步到editlog文件中时，
   *      logSync()方法会使用ThreadLocal变量myTransactionId获取该线程需要同步的transactionId，
   *      然后对比这个transactionId和已经同步到editlog文件中的transactionId。
   *      如果当前线程的transactionId大于editlog文件中的transactionId， 则表明editlog文件中记录的数据不是最新的， 同时如果当前没有别的线程执行同步操作， 则开始同步操作将输出流缓存中的数据写入editlog文件中。
   *      需要注意的是， 由于editlog输出流使用了双buffer的结构， 所以在进行sync操作的同时， 并不影响editlog输出流的使用。
   * - 在logSync()方法中使用isSyncRunning变量标识当前是否有线程正在进行同步操作， 这里注意isSyncRunning是一个volatile的boolean类型变量。
   *
   * logSync()方法分为以下三个部分， 并分开进行加锁操作， 这样的设计提高了并发的程度:
   * - 判断当前操作是否已经同步到了editlog文件中，
   *     如果还没有同步， 则将editlog的双buffer调换位置， 为同步操作做准备， 同时将isSyncRunning标志位设置为true， 这部分代码需要进行synchronized加锁操作。
   * - 调用logStream.flush()方法将缓存的数据持久化到存储上， 这部分代码不需要进行加锁操作，
   *     因为在上一段同步代码中已经将双buffer调换了位置， 不会有线程向用于刷新数据的缓冲区中写入数据， 所以调用flush()操作并不需要加锁。
   * - 重置isSyncRunning标志位， 并且通知等待的线程， 这部分代码需要进行synchronized加锁操作。
   *
   */
  public void logSync() {
    long syncStart = 0;
    long mytxid = myTransactionId.get().txid;
    
    boolean sync = false;
    try {
      EditLogOutputStream logStream = null;
      // 如果说没有 isAutoSyncSchedule 标识位的控制的话
      // 第1个问题：可能会导致bufCurrent缓冲区写满了之后，还是继续有线程在写入数据到bufCurrent缓冲区里去
      // 第2个问题：可能会导致多个线程都执行到这里，尝试竞争加锁来flush数据到磁盘上去

      // 如果有一个线程在这里拿到锁之后，就会尝试执行 flush操作，其他线程可能会依次进入synchronized锁代码块里，在while循环里卡住，阻塞等待
      synchronized (this) {
        try {
          //第一部分， 头部代码 打印统计信息
          printStatistics(false);

          // 当前txid大于editlog中已经同步的txid， 并且有线程正在同步， 则等待
          // 如果有人已经在刷磁盘了，当前线程就不用刷写磁盘了
          // 同一时间只能有一个线程里执行同步内存buffer到磁盘的操作，
          // 这里有一个标志位 isSyncRunning，如果是true,说明某个线程现在正在同步buffer到磁盘上去
          // while true 的循环和等待，等前面的那个线程先同步完了，它再来同步

          // 很多线程会卡在这里
          while (mytxid > synctxid && isSyncRunning) {
            try {
              wait(1000);
            } catch (InterruptedException ie) {
            }
          }

          // 如果txid小于editlog中已经同步的txid， 则表明当前操作已经被同步到存储上， 不需要再次同步
          if (mytxid <= synctxid) {
            numTransactionsBatchedInSync++;
            if (metrics != null) { metrics.incrTransactionsBatchedInSync(); }
            return;
          }

          //  开始同步操作， 将isSyncRunning标志位设置为true
          syncStart = txid;
          isSyncRunning = true;
          sync = true;
  
          try {
            if (journalSet.isEmpty()) {
              throw new IOException("No journals available to flush");
            }
            // 核心代码：通过调用setReadyToFlush()方法将两个缓冲区互换， 为同步做准备
            editLogStream.setReadyToFlush(); // TODO 核心代码
          } catch (IOException e) {
            final String msg = "Could not sync enough journals to persistent storage " + "due to " + e.getMessage() + ". " + "Unsynced transactions: " + (txid - synctxid);
            LOG.fatal(msg, new Exception());
            synchronized(journalSetLock) {
              IOUtils.cleanup(LOG, journalSet);
            }
            terminate(1, msg);
          }
        } finally {
          // 防止其他log edit 写入阻塞, 引起的RuntimeException
          doneWithAutoSyncScheduling(); // 将 isAutoSyncScheduled 改为 false
        }
        logStream = editLogStream;
      }

      // 释放锁

      // 第二部分， 调用flush()方法， 将缓存中的数据同步到editlog文件中
      long start = now();
      try {
        if (logStream != null) {
          // 核心代码：把数据写到磁盘，默默的在刷写磁盘就可以。因为这个是比较耗费时间的操作，有可能耗费几十毫秒。
          // 在logSync()方法中，调用了 EditLogOutputStream.flush()方法，它会在底层调用所有流的flush()方法，
          // 将之前写入缓冲区的数据，此时刷新到磁盘或者网络（journalnode）

          // 如果这里加synchronized，此时每个线程，从内存buffer,一直都刷新到磁盘，都是串行化的
          logStream.flush(); // TODO 核心代码 将内存缓存里的数据flush到磁盘上去，此时不持有锁
        }
      } catch (IOException ex) {
        synchronized (this) {
          // 无法将一个edits log 给写入到大多数的journal nodes集群里去，意味着一条edits log 持久化存储失败了
          final String msg = "Could not sync enough journals to persistent storage. " + "Unsynced transactions: " + (txid - synctxid);
          // 如果我们的程序里面发生了fatal 级别日志，这个错误 就是灾难型的错误。
          LOG.fatal(msg, new Exception());
          synchronized(journalSetLock) {
            IOUtils.cleanup(LOG, journalSet);
          }
          // 终止
          terminate(1, msg); //
        }
      }
      long elapsed = now() - start;
  
      if (metrics != null) {
        metrics.addSync(elapsed);
      }
      
    } finally {
      // 第三部分， 恢复标志位
      synchronized (this) {
        if (sync) {
          // 已同步txid赋值为开始sync操作的txid
          synctxid = syncStart;
          // 恢复标志位
          isSyncRunning = false;
        }
        // 唤醒线程
        this.notifyAll();
     }
    }
  }

  //
  // print statistics every 1 minute.
  //
  private void printStatistics(boolean force) {
    long now = now();
    if (lastPrintTime + 60000 > now && !force) {
      return;
    }
    lastPrintTime = now;
    StringBuilder buf = new StringBuilder();
    buf.append("Number of transactions: ");
    buf.append(numTransactions);
    buf.append(" Total time for transactions(ms): ");
    buf.append(totalTimeTransactions);
    buf.append(" Number of transactions batched in Syncs: ");
    buf.append(numTransactionsBatchedInSync);
    buf.append(" Number of syncs: ");
    buf.append(editLogStream.getNumSync());
    buf.append(" SyncTimes(ms): ");
    buf.append(journalSet.getSyncTimes());
    LOG.info(buf);
  }

  /** Record the RPC IDs if necessary */
  private void logRpcIds(FSEditLogOp op, boolean toLogRpcIds) {
    if (toLogRpcIds) {
      op.setRpcClientId(Server.getClientId());
      op.setRpcCallId(Server.getCallId());
    }
  }
  
  /** 
   * Add open lease record to edit log. 
   * Records the block locations of the last block.
   */
  public void logOpenFile(String path, INodeFile newNode, boolean overwrite,
      boolean toLogRpcIds) {
    Preconditions.checkArgument(newNode.isUnderConstruction());
    PermissionStatus permissions = newNode.getPermissionStatus();
    AddOp op = AddOp.getInstance(cache.get())
      .reset()
      .setInodeId(newNode.getId())
      .setPath(path)
      .setReplication(newNode.getFileReplication())
      .setModificationTime(newNode.getModificationTime())
      .setAccessTime(newNode.getAccessTime())
      .setBlockSize(newNode.getPreferredBlockSize())
      .setBlocks(newNode.getBlocks())
      .setPermissionStatus(permissions)
      .setClientName(newNode.getFileUnderConstructionFeature().getClientName())
      .setClientMachine(
          newNode.getFileUnderConstructionFeature().getClientMachine())
      .setOverwrite(overwrite)
      .setStoragePolicyId(newNode.getLocalStoragePolicyID());

    AclFeature f = newNode.getAclFeature();
    if (f != null) {
      op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
    }

    XAttrFeature x = newNode.getXAttrFeature();
    if (x != null) {
      op.setXAttrs(x.getXAttrs());
    }

    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }

  /** 
   * Add close lease record to edit log.
   */
  public void logCloseFile(String path, INodeFile newNode) {
    CloseOp op = CloseOp.getInstance(cache.get())
      .setPath(path)
      .setReplication(newNode.getFileReplication())
      .setModificationTime(newNode.getModificationTime())
      .setAccessTime(newNode.getAccessTime())
      .setBlockSize(newNode.getPreferredBlockSize())
      .setBlocks(newNode.getBlocks())
      .setPermissionStatus(newNode.getPermissionStatus());
    
    logEdit(op);
  }
  
  public void logAddBlock(String path, INodeFile file) {
    Preconditions.checkArgument(file.isUnderConstruction());
    BlockInfo[] blocks = file.getBlocks();
    Preconditions.checkState(blocks != null && blocks.length > 0);
    BlockInfo pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
    BlockInfo lastBlock = blocks[blocks.length - 1];
    AddBlockOp op = AddBlockOp.getInstance(cache.get()).setPath(path)
        .setPenultimateBlock(pBlock).setLastBlock(lastBlock);
    logEdit(op);
  }
  
  public void logUpdateBlocks(String path, INodeFile file, boolean toLogRpcIds) {
    Preconditions.checkArgument(file.isUnderConstruction());
    UpdateBlocksOp op = UpdateBlocksOp.getInstance(cache.get())
      .setPath(path)
      .setBlocks(file.getBlocks());
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }
  
  /** 
   * 向 edit log 添加 创建目录 记录
   */
  public void logMkDir(String path, INode newNode) {
    PermissionStatus permissions = newNode.getPermissionStatus();
    // 构造器模式，构造了一个代表创建目录的一个对象
    MkdirOp op = MkdirOp.getInstance(cache.get())
      .reset()
      .setInodeId(newNode.getId())
      .setPath(path)
      .setTimestamp(newNode.getModificationTime())
      .setPermissionStatus(permissions);

    AclFeature f = newNode.getAclFeature();
    if (f != null) {
      op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
    }

    XAttrFeature x = newNode.getXAttrFeature();
    if (x != null) {
      op.setXAttrs(x.getXAttrs());
    }
    // 构造完之后，在这里，实际完成日志记录
    logEdit(op); // TODO 核心代码
  }
  
  /** 
   * Add rename record to edit log
   * TODO: use String parameters until just before writing to disk
   */
  void logRename(String src, String dst, long timestamp, boolean toLogRpcIds) {
    RenameOldOp op = RenameOldOp.getInstance(cache.get())
      .setSource(src)
      .setDestination(dst)
      .setTimestamp(timestamp);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }
  
  /** 
   * Add rename record to edit log
   */
  void logRename(String src, String dst, long timestamp, boolean toLogRpcIds,
      Options.Rename... options) {
    RenameOp op = RenameOp.getInstance(cache.get())
      .setSource(src)
      .setDestination(dst)
      .setTimestamp(timestamp)
      .setOptions(options);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }
  
  /** 
   * Add set replication record to edit log
   */
  void logSetReplication(String src, short replication) {
    SetReplicationOp op = SetReplicationOp.getInstance(cache.get())
      .setPath(src)
      .setReplication(replication);
    logEdit(op);
  }

  /** 
   * Add set storage policy id record to edit log
   */
  void logSetStoragePolicy(String src, byte policyId) {
    SetStoragePolicyOp op = SetStoragePolicyOp.getInstance(cache.get())
        .setPath(src).setPolicyId(policyId);
    logEdit(op);
  }

  /** Add set namespace quota record to edit log
   * 
   * @param src the string representation of the path to a directory
   * @param nsQuota namespace quota
   * @param dsQuota diskspace quota
   */
  void logSetQuota(String src, long nsQuota, long dsQuota) {
    SetQuotaOp op = SetQuotaOp.getInstance(cache.get())
      .setSource(src)
      .setNSQuota(nsQuota)
      .setDSQuota(dsQuota);
    logEdit(op);
  }

  /**  Add set permissions record to edit log */
  void logSetPermissions(String src, FsPermission permissions) {
    SetPermissionsOp op = SetPermissionsOp.getInstance(cache.get())
      .setSource(src)
      .setPermissions(permissions);
    logEdit(op);
  }

  /**  Add set owner record to edit log */
  void logSetOwner(String src, String username, String groupname) {
    SetOwnerOp op = SetOwnerOp.getInstance(cache.get())
      .setSource(src)
      .setUser(username)
      .setGroup(groupname);
    logEdit(op);
  }
  
  /**
   * concat(trg,src..) log
   */
  void logConcat(String trg, String[] srcs, long timestamp, boolean toLogRpcIds) {
    ConcatDeleteOp op = ConcatDeleteOp.getInstance(cache.get())
      .setTarget(trg)
      .setSources(srcs)
      .setTimestamp(timestamp);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }
  
  /** 
   * logDelete()方法用于在editlog文件中记录删除HDFS文件的操作。
   *
   * logDelete()方法首先会构造一个DeleteOp对象， 这个DeleteOp类是FSEditLogOp类的子类， 用于记录删除操作的相关信息， 包括了ClientProtocol.delete()调用中所有参数携带的信息。
   * 构造DeleteOp对象后， logDelete()方法会调用logRpcIds()方法在DeleteOp对象中添加RPC调用相关信息，
   * 之后logDelete()方法会调用logEdit()方法在editlog文件中记录这次删除操作。
   */
  void logDelete(String src, long timestamp, boolean toLogRpcIds) {
    // 构造DeleteOp对象
    DeleteOp op = DeleteOp.getInstance(cache.get())
      .setPath(src)
      .setTimestamp(timestamp);

    // 记录RPC调用相关信息
    logRpcIds(op, toLogRpcIds);
    // 调用logEdit()方法记录删除操作
    logEdit(op);
  }

  /**
   * Add legacy block generation stamp record to edit log
   */
  void logGenerationStampV1(long genstamp) {
    SetGenstampV1Op op = SetGenstampV1Op.getInstance(cache.get())
        .setGenerationStamp(genstamp);
    logEdit(op);
  }

  /**
   * Add generation stamp record to edit log
   */
  void logGenerationStampV2(long genstamp) {
    SetGenstampV2Op op = SetGenstampV2Op.getInstance(cache.get())
        .setGenerationStamp(genstamp);
    logEdit(op);
  }

  /**
   * Record a newly allocated block ID in the edit log
   */
  void logAllocateBlockId(long blockId) {
    AllocateBlockIdOp op = AllocateBlockIdOp.getInstance(cache.get())
      .setBlockId(blockId);
    logEdit(op);
  }

  /** 
   * Add access time record to edit log
   */
  void logTimes(String src, long mtime, long atime) {
    TimesOp op = TimesOp.getInstance(cache.get())
      .setPath(src)
      .setModificationTime(mtime)
      .setAccessTime(atime);
    logEdit(op);
  }

  /** 
   * Add a create symlink record.
   */
  void logSymlink(String path, String value, long mtime, long atime,
      INodeSymlink node, boolean toLogRpcIds) {
    SymlinkOp op = SymlinkOp.getInstance(cache.get())
      .setId(node.getId())
      .setPath(path)
      .setValue(value)
      .setModificationTime(mtime)
      .setAccessTime(atime)
      .setPermissionStatus(node.getPermissionStatus());
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }
  
  /**
   * log delegation token to edit log
   * @param id DelegationTokenIdentifier
   * @param expiryTime of the token
   */
  void logGetDelegationToken(DelegationTokenIdentifier id,
      long expiryTime) {
    GetDelegationTokenOp op = GetDelegationTokenOp.getInstance(cache.get())
      .setDelegationTokenIdentifier(id)
      .setExpiryTime(expiryTime);
    logEdit(op);
  }
  
  void logRenewDelegationToken(DelegationTokenIdentifier id,
      long expiryTime) {
    RenewDelegationTokenOp op = RenewDelegationTokenOp.getInstance(cache.get())
      .setDelegationTokenIdentifier(id)
      .setExpiryTime(expiryTime);
    logEdit(op);
  }
  
  void logCancelDelegationToken(DelegationTokenIdentifier id) {
    CancelDelegationTokenOp op = CancelDelegationTokenOp.getInstance(cache.get())
      .setDelegationTokenIdentifier(id);
    logEdit(op);
  }
  
  void logUpdateMasterKey(DelegationKey key) {
    UpdateMasterKeyOp op = UpdateMasterKeyOp.getInstance(cache.get())
      .setDelegationKey(key);
    logEdit(op);
  }

  void logReassignLease(String leaseHolder, String src, String newHolder) {
    ReassignLeaseOp op = ReassignLeaseOp.getInstance(cache.get())
      .setLeaseHolder(leaseHolder)
      .setPath(src)
      .setNewHolder(newHolder);
    logEdit(op);
  }
  
  void logCreateSnapshot(String snapRoot, String snapName, boolean toLogRpcIds) {
    CreateSnapshotOp op = CreateSnapshotOp.getInstance(cache.get())
        .setSnapshotRoot(snapRoot).setSnapshotName(snapName);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }
  
  void logDeleteSnapshot(String snapRoot, String snapName, boolean toLogRpcIds) {
    DeleteSnapshotOp op = DeleteSnapshotOp.getInstance(cache.get())
        .setSnapshotRoot(snapRoot).setSnapshotName(snapName);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }
  
  void logRenameSnapshot(String path, String snapOldName, String snapNewName,
      boolean toLogRpcIds) {
    RenameSnapshotOp op = RenameSnapshotOp.getInstance(cache.get())
        .setSnapshotRoot(path).setSnapshotOldName(snapOldName)
        .setSnapshotNewName(snapNewName);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }
  
  void logAllowSnapshot(String path) {
    AllowSnapshotOp op = AllowSnapshotOp.getInstance(cache.get())
        .setSnapshotRoot(path);
    logEdit(op);
  }

  void logDisallowSnapshot(String path) {
    DisallowSnapshotOp op = DisallowSnapshotOp.getInstance(cache.get())
        .setSnapshotRoot(path);
    logEdit(op);
  }

  /**
   * Log a CacheDirectiveInfo returned from
   */
  void logAddCacheDirectiveInfo(CacheDirectiveInfo directive,
      boolean toLogRpcIds) {
    AddCacheDirectiveInfoOp op =
        AddCacheDirectiveInfoOp.getInstance(cache.get())
            .setDirective(directive);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }

  void logModifyCacheDirectiveInfo(
      CacheDirectiveInfo directive, boolean toLogRpcIds) {
    ModifyCacheDirectiveInfoOp op =
        ModifyCacheDirectiveInfoOp.getInstance(
            cache.get()).setDirective(directive);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }

  void logRemoveCacheDirectiveInfo(Long id, boolean toLogRpcIds) {
    RemoveCacheDirectiveInfoOp op =
        RemoveCacheDirectiveInfoOp.getInstance(cache.get()).setId(id);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }

  void logAddCachePool(CachePoolInfo pool, boolean toLogRpcIds) {
    AddCachePoolOp op =
        AddCachePoolOp.getInstance(cache.get()).setPool(pool);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }

  void logModifyCachePool(CachePoolInfo info, boolean toLogRpcIds) {
    ModifyCachePoolOp op =
        ModifyCachePoolOp.getInstance(cache.get()).setInfo(info);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }

  void logRemoveCachePool(String poolName, boolean toLogRpcIds) {
    RemoveCachePoolOp op =
        RemoveCachePoolOp.getInstance(cache.get()).setPoolName(poolName);
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }

  void logStartRollingUpgrade(long startTime) {
    RollingUpgradeOp op = RollingUpgradeOp.getStartInstance(cache.get());
    op.setTime(startTime);
    logEdit(op);
  }

  void logFinalizeRollingUpgrade(long finalizeTime) {
    RollingUpgradeOp op = RollingUpgradeOp.getFinalizeInstance(cache.get());
    op.setTime(finalizeTime);
    logEdit(op);
  }

  void logSetAcl(String src, List<AclEntry> entries) {
    SetAclOp op = SetAclOp.getInstance();
    op.src = src;
    op.aclEntries = entries;
    logEdit(op);
  }
  
  void logSetXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) {
    final SetXAttrOp op = SetXAttrOp.getInstance();
    op.src = src;
    op.xAttrs = xAttrs;
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }
  
  void logRemoveXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) {
    final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
    op.src = src;
    op.xAttrs = xAttrs;
    logRpcIds(op, toLogRpcIds);
    logEdit(op);
  }

  /**
   * Get all the journals this edit log is currently operating on.
   */
  synchronized List<JournalAndStream> getJournals() {
    return journalSet.getAllJournalStreams();
  }
  
  /**
   * Used only by tests.
   */
  @VisibleForTesting
  synchronized public JournalSet getJournalSet() {
    return journalSet;
  }
  
  @VisibleForTesting
  synchronized void setJournalSetForTesting(JournalSet js) {
    this.journalSet = js;
  }
  
  /**
   * Used only by tests.
   */
  @VisibleForTesting
  void setMetricsForTests(NameNodeMetrics metrics) {
    this.metrics = metrics;
  }
  
  /**
   * Return a manifest of what finalized edit logs are available
   */
  public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId)
      throws IOException {
    return journalSet.getEditLogManifest(fromTxId);
  }
 
  /**
   * Finalizes the current edit log and opens a new log segment.
   * @return the transaction id of the BEGIN_LOG_SEGMENT transaction
   * in the new log.
   */
  synchronized long rollEditLog() throws IOException {
    LOG.info("Rolling edit logs");
    endCurrentLogSegment(true);
    
    long nextTxId = getLastWrittenTxId() + 1;
    startLogSegment(nextTxId, true);
    
    assert curSegmentTxId == nextTxId;
    return nextTxId;
  }
  
  /**
   * 这个方法调用了 journalSet.startLogSegment()方法在所有editlog文件的存储路径上构造输出流，
   * 并将这些输出流保存在FSEditLog的字段journalSet.journals中
   *
   * 开启一个新的 log segment，日志段，edits log是分段存储的，基于txid分段存储的。
   *
   * 不管怎么说，在namenode启动的时候，人家加载磁盘上的fsimage和edits合并之后，会写fsimage到磁盘上去，
   * 同时人家还会打开一个新的edits log文件供后续来写
   * 在这里应该就会调用到FSEditLog的startLogSegment()方法里来，在这里会初始化EditLogOutputStream
   *
   * startLogSegment()方法会构造一个JournalSetOutputStream对象，并将这个对象保存在FSEditLog的editLogStream字段中，
   * FSEditLog之后进行的所有写操作都是通过editLogStream引用的JournalSetOutputStream对象进行的。
   * JournalSetOutputStream类是EditLogOutputStream的子类，
   * 在JournalSetOutputStream对象上调用的所有接口方法都会被前转到journalSet.journals字段中保存的editlog文件在所有存储路径上的输出流对象上（通过调用mapJournalsAndReportErrors()方法实现，。
   * JournalSet OutputStream类的实现我们会在EditLogOutputStream小节中介绍。journalSet就是通过这种方式，将多个存储位置上的输出流对外封装成了一个输出流，大大方便了调用。
   * startLogSegment()方法最后会将FSEditlog.curSegmentTxId字段（FSEditlog当前正在写入txid）设置为传入的segmentTxid，同时将editlog的状态更改为IN_SEGMENT状态
   */
  synchronized void startLogSegment(final long segmentTxId, boolean writeHeaderTxn) throws IOException {
    // 检查条件代码
    LOG.info("Starting log segment at " + segmentTxId);
    Preconditions.checkArgument(segmentTxId > 0, "Bad txid: %s", segmentTxId);
    Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS, "Bad state: %s", state);
    Preconditions.checkState(segmentTxId > curSegmentTxId, "Cannot start writing to log segment " + segmentTxId + " when previous log segment started at " + curSegmentTxId);
    Preconditions.checkArgument(segmentTxId == txid + 1, "Cannot start log segment at txid %s when next expected " +  "txid is %s", segmentTxId, txid + 1);
    
    numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0;

    storage.attemptRestoreRemovedStorage();

    // 初始化 editLogStream
    try {
      // 核心方法：在这里调用 journalSet初始化了一个新的 EditLogOutputStream，
      // 这个 OutputStream 在 write()时候，就会依次调用底层封装的FileJournalManager写入磁盘，
      // 同时也会调用QuorumJournalManager写入到JournlManager里去，
      editLogStream = journalSet.startLogSegment(segmentTxId, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); // TODO
    } catch (IOException ex) {
      throw new IOException("Unable to start log segment " + segmentTxId + ": too few journals successfully started.", ex);
    }

    // 当前正在写入txid设置为segmentTxId
    curSegmentTxId = segmentTxId;
    state = State.IN_SEGMENT;

    if (writeHeaderTxn) {
      logEdit(LogSegmentOp.getInstance(cache.get(), FSEditLogOpCodes.OP_START_LOG_SEGMENT));
      logSync();
    }
  }

  /**
   * endCurrentLogSegment()会将当前正在写入的日志段落关闭，
   * 它调用了journalSet.finalizeLogSegment()方法将curSegmentTxid -> lastTxId之间的操作持久化到磁盘上。
   *
   * 这个方法会将FSEditLog状态机更改为BETWEEN_LOG_SEGMENTS状态.
   *
   * 如上例中， 调用endCurrentLogSegment()方法就会产生editlog文件edits_0032-0034。
   * 同时这个方法会将FSEditLog状态机更改为BETWEEN_LOG_SEGMENTS状态
   */
  public synchronized void endCurrentLogSegment(boolean writeEndTxn) {
    LOG.info("Ending log segment " + curSegmentTxId);
    Preconditions.checkState(isSegmentOpen(), "Bad state: %s", state);
    
    if (writeEndTxn) {
      logEdit(LogSegmentOp.getInstance(cache.get(),  FSEditLogOpCodes.OP_END_LOG_SEGMENT));
      logSync();
    }

    printStatistics(true);

    // 获取当前写入的最后一个id
    final long lastTxId = getLastWrittenTxId();
    
    try {
      // 调用journalSet.finalizeLogSegment将curSegmentTxid -> lastTxId之间的操作 写入磁盘(例如editlog文件edits_0032-0034)
      journalSet.finalizeLogSegment(curSegmentTxId, lastTxId); // TODO 核心代码
      editLogStream = null;
    } catch (IOException e) {
    }

    // 更改状态机的状态
    state = State.BETWEEN_LOG_SEGMENTS;
  }
  
  /**
   * Abort all current logs. Called from the backup node.
   */
  synchronized void abortCurrentLogSegment() {
    try {
      //Check for null, as abort can be called any time.
      if (editLogStream != null) {
        editLogStream.abort();
        editLogStream = null;
        state = State.BETWEEN_LOG_SEGMENTS;
      }
    } catch (IOException e) {
      LOG.warn("All journals failed to abort", e);
    }
  }

  /**
   * Archive any log files that are older than the given txid.
   * 
   * If the edit log is not open for write, then this call returns with no effect.
   */
  @Override
  public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) {
    // Should not purge logs unless they are open for write.
    // This prevents the SBN from purging logs on shared storage, for example.
    if (!isOpenForWrite()) {
      return;
    }
    
    assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op
      minTxIdToKeep <= curSegmentTxId :
      "cannot purge logs older than txid " + minTxIdToKeep +
      " when current segment starts at " + curSegmentTxId;
    if (minTxIdToKeep == 0) {
      return;
    }
    
    // This could be improved to not need synchronization. But currently,
    // journalSet is not threadsafe, so we need to synchronize this method.
    try {
      journalSet.purgeLogsOlderThan(minTxIdToKeep);
    } catch (IOException ex) {
      //All journals have failed, it will be handled in logSync.
    }
  }

  
  /**
   * The actual sync activity happens while not synchronized on this object.
   * Thus, synchronized activities that require that they are not concurrent
   * with file operations should wait for any running sync to finish.
   */
  synchronized void waitForSyncToFinish() {
    while (isSyncRunning) {
      try {
        wait(1000);
      } catch (InterruptedException ie) {}
    }
  }

  /**
   * Return the txid of the last synced transaction.
   */
  public synchronized long getSyncTxId() {
    return synctxid;
  }


  // sets the initial capacity of the flush buffer.
  synchronized void setOutputBufferCapacity(int size) {
    journalSet.setOutputBufferCapacity(size);
  }

  /**
   * Create (or find if already exists) an edit output stream, which
   * streams journal records (edits) to the specified backup node.<br>
   * 
   * The new BackupNode will start receiving edits the next time this
   * NameNode's logs roll.
   * 
   * @param bnReg the backup node registration information.
   * @param nnReg this (active) name-node registration.
   * @throws IOException
   */
  synchronized void registerBackupNode(
      NamenodeRegistration bnReg, // backup node
      NamenodeRegistration nnReg) // active name-node
  throws IOException {
    if(bnReg.isRole(NamenodeRole.CHECKPOINT))
      return; // checkpoint node does not stream edits
    
    JournalManager jas = findBackupJournal(bnReg);
    if (jas != null) {
      // already registered
      LOG.info("Backup node " + bnReg + " re-registers");
      return;
    }
    
    LOG.info("Registering new backup node: " + bnReg);
    BackupJournalManager bjm = new BackupJournalManager(bnReg, nnReg);
    synchronized(journalSetLock) {
      journalSet.add(bjm, false);
    }
  }
  
  synchronized void releaseBackupStream(NamenodeRegistration registration)
      throws IOException {
    BackupJournalManager bjm = this.findBackupJournal(registration);
    if (bjm != null) {
      LOG.info("Removing backup journal " + bjm);
      synchronized(journalSetLock) {
        journalSet.remove(bjm);
      }
    }
  }
  
  /**
   * Find the JournalAndStream associated with this BackupNode.
   * 
   * @return null if it cannot be found
   */
  private synchronized BackupJournalManager findBackupJournal(
      NamenodeRegistration bnReg) {
    for (JournalManager bjm : journalSet.getJournalManagers()) {
      if ((bjm instanceof BackupJournalManager)
          && ((BackupJournalManager) bjm).matchesRegistration(bnReg)) {
        return (BackupJournalManager) bjm;
      }
    }
    return null;
  }

  /**
   * Write an operation to the edit log. Do not sync to persistent
   * store yet.
   */   
  synchronized void logEdit(final int length, final byte[] data) {
    long start = beginTransaction();

    try {
      editLogStream.writeRaw(data, 0, length);
    } catch (IOException ex) {
      // All journals have failed, it will be handled in logSync.
    }
    endTransaction(start);
  }

  /**
   * Run recovery on all journals to recover any unclosed segments
   */
  synchronized void recoverUnclosedStreams() {
    Preconditions.checkState(
        state == State.BETWEEN_LOG_SEGMENTS,
        "May not recover segments - wrong state: %s", state);
    try {
      journalSet.recoverUnfinalizedSegments();
    } catch (IOException ex) {
      // All journals have failed, it is handled in logSync.
      // TODO: are we sure this is OK?
    }
  }

  public long getSharedLogCTime() throws IOException {
    for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
      if (jas.isShared()) {
        return jas.getManager().getJournalCTime();
      }
    }
    throw new IOException("No shared log found.");
  }

  public synchronized void doPreUpgradeOfSharedLog() throws IOException {
    for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
      if (jas.isShared()) {
        jas.getManager().doPreUpgrade();
      }
    }
  }

  public synchronized void doUpgradeOfSharedLog() throws IOException {
    for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
      if (jas.isShared()) {
        jas.getManager().doUpgrade(storage);
      }
    }
  }

  public synchronized void doFinalizeOfSharedLog() throws IOException {
    for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
      if (jas.isShared()) {
        jas.getManager().doFinalize();
      }
    }
  }

  public synchronized boolean canRollBackSharedLog(StorageInfo prevStorage,
      int targetLayoutVersion) throws IOException {
    for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
      if (jas.isShared()) {
        return jas.getManager().canRollBack(storage, prevStorage,
            targetLayoutVersion);
      }
    }
    throw new IOException("No shared log found.");
  }

  public synchronized void doRollback() throws IOException {
    for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
      if (jas.isShared()) {
        jas.getManager().doRollback();
      }
    }
  }

  public synchronized void discardSegments(long markerTxid)
      throws IOException {
    for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
      jas.getManager().discardSegments(markerTxid);
    }
  }

  @Override
  public void selectInputStreams(Collection<EditLogInputStream> streams,
      long fromTxId, boolean inProgressOk) throws IOException {
    journalSet.selectInputStreams(streams, fromTxId, inProgressOk);
  }

  public Collection<EditLogInputStream> selectInputStreams(
      long fromTxId, long toAtLeastTxId) throws IOException {
    return selectInputStreams(fromTxId, toAtLeastTxId, null, true);
  }
  
  /**
   * Select a list of input streams.
   * 
   * @param fromTxId first transaction in the selected streams
   * @param toAtLeastTxId the selected streams must contain this transaction
   * @param recovery recovery context
   * @param inProgressOk set to true if in-progress streams are OK
   */
  public Collection<EditLogInputStream> selectInputStreams(long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery, boolean inProgressOk) throws IOException {

    List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
    synchronized(journalSetLock) {
      Preconditions.checkState(journalSet.isOpen(), "Cannot call " + "selectInputStreams() on closed FSEditLog");
      selectInputStreams(streams, fromTxId, inProgressOk);
    }

    try {
      checkForGaps(streams, fromTxId, toAtLeastTxId, inProgressOk);
    } catch (IOException e) {
      if (recovery != null) {
        // If recovery mode is enabled, continue loading even if we know we can't load up to toAtLeastTxId.
        LOG.error(e);
      } else {
        closeAllStreams(streams);
        throw e;
      }
    }
    return streams;
  }
  
  /**
   * Check for gaps in the edit log input stream list.
   * Note: we're assuming that the list is sorted and that txid ranges don't
   * overlap.  This could be done better and with more generality with an
   * interval tree.
   */
  private void checkForGaps(List<EditLogInputStream> streams, long fromTxId, long toAtLeastTxId, boolean inProgressOk) throws IOException {
    Iterator<EditLogInputStream> iter = streams.iterator();
    long txId = fromTxId;
    while (true) {
      if (txId > toAtLeastTxId) {
        return;
      }
      if (!iter.hasNext()) {
        break;
      }
      EditLogInputStream elis = iter.next();
      if (elis.getFirstTxId() > txId) {
        break;
      }
      long next = elis.getLastTxId();
      if (next == HdfsConstants.INVALID_TXID) {
        if (!inProgressOk) {
          throw new RuntimeException("inProgressOk = false, but " + "selectInputStreams returned an in-progress edit " + "log input stream (" + elis + ")");
        }
        // We don't know where the in-progress stream ends.
        // It could certainly go all the way up to toAtLeastTxId.
        return;
      }
      txId = next + 1;
    }
    throw new IOException(String.format("Gap in transactions. Expected to "
        + "be able to read up until at least txid %d but unable to find any "
        + "edit logs containing txid %d", toAtLeastTxId, txId));
  }

  /** 
   * Close all the streams in a collection
   * @param streams The list of streams to close
   */
  static void closeAllStreams(Iterable<EditLogInputStream> streams) {
    for (EditLogInputStream s : streams) {
      IOUtils.closeStream(s);
    }
  }

  /**
   * Retrieve the implementation class for a Journal scheme.
   * @param conf The configuration to retrieve the information from
   * @param uriScheme The uri scheme to look up.
   * @return the class of the journal implementation
   * @throws IllegalArgumentException if no class is configured for uri
   */
  static Class<? extends JournalManager> getJournalClass(Configuration conf,
                               String uriScheme) {
    String key
      = DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + "." + uriScheme;
    Class <? extends JournalManager> clazz = null;
    try {
      clazz = conf.getClass(key, null, JournalManager.class);
    } catch (RuntimeException re) {
      throw new IllegalArgumentException(
          "Invalid class specified for " + uriScheme, re);
    }
      
    if (clazz == null) {
      LOG.warn("No class configured for " +uriScheme
               + ", " + key + " is empty");
      throw new IllegalArgumentException(
          "No class configured for " + uriScheme);
    }
    return clazz;
  }

  /**
   * Construct a custom journal manager.
   * The class to construct is taken from the configuration.
   * @param uri Uri to construct
   * @return The constructed journal manager
   * @throws IllegalArgumentException if no class is configured for uri
   */
  private JournalManager createJournal(URI uri) {
    Class<? extends JournalManager> clazz = getJournalClass(conf, uri.getScheme());

    try {
      Constructor<? extends JournalManager> cons = clazz.getConstructor(Configuration.class, URI.class, NamespaceInfo.class);
      return cons.newInstance(conf, uri, storage.getNamespaceInfo());
    } catch (Exception e) {
      throw new IllegalArgumentException("Unable to construct journal, " + uri, e);
    }
  }

}
