package org.apache.hadoop.hdfs.protocol;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.inotify.EventBatchList;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.AtMostOnce;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;

/**
 * ClientProtocol定义了所有由客户端发起的、由Namenode响应的操作。这个接口非常大，有80多个方法，我们把这个接口中的方法分为如下几类。
 * ■　HDFS文件读相关的操作。
 *        getBlockLocations()和reportBadBlocks()。
 * ■　HDFS文件写以及追加写的相关操作。
 *       在正常流程时，客户端写文件必须调用的方法。：create()、append()、addBlock()、complete()、
 *       写流程中出现故障,用于在异常情况下进行恢复操作:
 *           abandonBlock()方法用于处理客户端建立数据流管道时数据节点出现故障的情况。
 *           如果客户端已经成功建立了数据流管道，在客户端写某个数据块时，存储这个数据块副本的某个数据节点出现了错误该如何处理呢？
 *           1. 客户端首先会调用getAdditionalDatanode()方法向Namenode申请一个新的Datanode来替代出现故障的Datanode。
 *           2. 然后客户端会调用updateBlockForPipeline()方法向Namenode申请为这个数据块分配新的时间戳，这样故障节点上的没能写完整的数据块的时间戳就会过期，在后续的块汇报操作中会被删除。
 *           3. 最后客户端就可以使用新的时间戳建立新的数据流管道，来执行对数据块的写操作了。
 *           4. 数据流管道建立成功后，客户端还需要调用updatePipeline()方法更新Namenode中当前数据块的数据流管道信息。
 *           至此，一个完整的恢复操作结束。
 * ■　管理HDFS命名空间（namespace）的相关操作。
 * ■　系统问题与管理相关的操作。
 * ■　快照相关的操作。
 * ■　缓存相关的操作。
 * ■　其他操作。
 *
 * HDFS文件读操作、HDFS文件写与追加写操作，以及命名空间的管理操作，这三个部分都可以在FileSystem类中找到对应的方法，这些方法都是用来支持Hadoop文件系统实现的。
 * 对于系统问题与管理相关的操作，则是由DFSAdmin这个工具类发起的，其中的方法是用于支持管理员配置和管理HDFS的。
 * 而快照和缓存则都是Hadoop2.X中引入的新特性，ClientProtocol中也有对应的方法用于支持这两个新特性。
 * 当然，ClientProtocol中还包括安全、XAttr等方法，这部分不是重点，我们就不再详细介绍了。
 *
 * 2个子类
 * ■　ClientNamenodeProtocolTranslatorPB ：这个类是RPC客户端侧最重要的类之一，它将客户端的请求参数封装成可以序列化的protobuf格式，然后通过代理类（实现ClientNamenodeProtocolPB接口）发送出去。
 * ■　NameNodeRpcServer ：Namenode侧响应ClientProtocol调用的类，它会执行HDFS操作并将操作结果返回。
 */
@InterfaceAudience.Private
@InterfaceStability.Evolving
@KerberosInfo(serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
@TokenInfo(DelegationTokenSelector.class)
public interface ClientProtocol {

  /**
   * Until version 69, this class ClientProtocol served as both
   * the client interface to the NN AND the RPC protocol used to 
   * communicate with the NN.
   * 
   * This class is used by both the DFSClient and the 
   * NN server side to insulate from the protocol serialization.
   * 
   * If you are adding/changing this interface then you need to 
   * change both this class and ALSO related protocol buffer
   * wire protocol definition in ClientNamenodeProtocol.proto.
   * 
   * For more details on protocol buffer wire protocol, please see 
   * .../org/apache/hadoop/hdfs/protocolPB/overview.html
   * 
   * The log of historical changes can be retrieved from the svn).
   * 69: Eliminate overloaded method names.
   * 
   * 69L is the last version id when this class was used for protocols
   *  serialization. DO not update this version any further. 
   */
  public static final long versionID = 69L;
  
  ///////////////////////////////////////
  // File contents
  ///////////////////////////////////////
  /**
   * 客户端会调用ClientProtocol.getBlockLocations()方法 获取HDFS文件指定范围内所有数据块的位置信息。
   * 这个方法的参数是HDFS文件的文件名以及读取范围，返回值是文件指定范围内所有数据块的文件名以及它们的位置信息， 使用LocatedBlocks对象封装。
   * 每个数据块的位置信息指的是存储这个数据块副本的所有Datanode的信息， 这些Datanode会以与 当前客户端的距离远近排序。
   * 客户端读取数据时，会首先调用getBlockLocations()方法获 取HDFS文件的所有数据块的位置信息，然后客户端会根据这些位置信息从数据节点读取 数据块.
   */
  @Idempotent
  public LocatedBlocks getBlockLocations(String src, long offset, long length)  throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException;

  /**
   * Get server default values for a number of configuration params.
   * @return a set of server default configuration values
   * @throws IOException
   */
  @Idempotent
  public FsServerDefaults getServerDefaults() throws IOException;

  /**
   * create()方法用于在HDFS的文件系统目录树中创建一个新的空文件，创建的路径由src 参数指定。
   * 这个空文件创建后对于其他的客户端是“可读”的，但是这些客户端不能删除、重命名或者移动这个文件，直到这个文件被关闭或者租约过期。
   *
   * 客户端写一个新的文件 时，会首先调用create()方法在文件系统目录树中创建一个空文件，
   * 然后调用addBlock()方 法获取存储文件数据的数据块的位置信息，
   * 最后客户端就可以根据位置信息建立数据流管 道，向数据节点写入数据了.
   */
  @AtMostOnce
  public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag,
      boolean createParent, short replication, long blockSize,  CryptoProtocolVersion[] supportedVersions) throws IOException;

  /**
   * append()方法用于打开一个已有的文件，
   * 如果这个文件的最后一个数据块没有写满， 则返回这个数据块的位置信息(使用LocatedBlock对象封装);
   * 如果这个文件的最后一个 数据块正好写满，则创建一个新的数据块并添加到这个文件中，
   * 然后返回这个新添加的数 据块的位置信息。
   *
   * 客户端追加写一个已有文件时，会先调用append()方法获取最后一个可 写数据块的位置信息，
   * 然后建立数据流管道，并向数据节点写入追加的数据。
   * 如果客户端 将这个数据块写满，与create()方法一样，客户端会调用addBlock()方法获取新的数据块。
   */
  @AtMostOnce
  public LocatedBlock append(String src, String clientName) throws  IOException;

  /**
   * Set replication for an existing file.
   * <p>
   * The NameNode sets replication to the new value and returns.
   * The actual block replication is not expected to be performed during  
   * this method call. The blocks will be populated or removed in the 
   * background as the result of the routine block maintenance procedures.
   * 
   * @param src file name
   * @param replication new replication
   * 
   * @return true if successful;
   *         false if file does not exist or is a directory
   *
   * @throws AccessControlException If access is denied
   * @throws DSQuotaExceededException If replication violates disk space 
   *           quota restriction
   * @throws FileNotFoundException If file <code>src</code> is not found
   * @throws SafeModeException not allowed in safemode
   * @throws UnresolvedLinkException if <code>src</code> contains a symlink
   * @throws SnapshotAccessControlException if path is in RO snapshot
   * @throws IOException If an I/O error occurred
   */
  @Idempotent
  public boolean setReplication(String src, short replication)
      throws AccessControlException, DSQuotaExceededException,
      FileNotFoundException, SafeModeException, UnresolvedLinkException,
      SnapshotAccessControlException, IOException;

  /**
   * Get all the available block storage policies.
   * @return All the in-use block storage policies currently.
   */
  @Idempotent
  public BlockStoragePolicy[] getStoragePolicies() throws IOException;

  /**
   * Set the storage policy for a file/directory
   * @param src Path of an existing file/directory. 
   * @param policyName The name of the storage policy
   * @throws SnapshotAccessControlException If access is denied
   * @throws UnresolvedLinkException if <code>src</code> contains a symlink
   * @throws FileNotFoundException If file/dir <code>src</code> is not found
   * @throws QuotaExceededException If changes violate the quota restriction
   */
  @Idempotent
  public void setStoragePolicy(String src, String policyName)
      throws SnapshotAccessControlException, UnresolvedLinkException,
      FileNotFoundException, QuotaExceededException, IOException;

  /**
   * Set permissions for an existing file/directory.
   * 
   * @throws AccessControlException If access is denied
   * @throws FileNotFoundException If file <code>src</code> is not found
   * @throws SafeModeException not allowed in safemode
   * @throws UnresolvedLinkException If <code>src</code> contains a symlink
   * @throws SnapshotAccessControlException if path is in RO snapshot
   * @throws IOException If an I/O error occurred
   */
  @Idempotent
  public void setPermission(String src, FsPermission permission)
      throws AccessControlException, FileNotFoundException, SafeModeException,
      UnresolvedLinkException, SnapshotAccessControlException, IOException;

  /**
   * Set Owner of a path (i.e. a file or a directory).
   * The parameters username and groupname cannot both be null.
   * @param src file path
   * @param username If it is null, the original username remains unchanged.
   * @param groupname If it is null, the original groupname remains unchanged.
   *
   * @throws AccessControlException If access is denied
   * @throws FileNotFoundException If file <code>src</code> is not found
   * @throws SafeModeException not allowed in safemode
   * @throws UnresolvedLinkException If <code>src</code> contains a symlink
   * @throws SnapshotAccessControlException if path is in RO snapshot
   * @throws IOException If an I/O error occurred
   */
  @Idempotent
  public void setOwner(String src, String username, String groupname)
      throws AccessControlException, FileNotFoundException, SafeModeException,
      UnresolvedLinkException, SnapshotAccessControlException, IOException;

  /**
   * abandonBlock()方法用于处理客户端建立数据流管道时数据节点出现故障的情况。
   *
   * 客户端调用abandonBlock()方法放弃一个新申请的数据块。
   *
   * 问题1: 创建数据块失败 ???????
   *
   * 当客户端获取了一个新申请的数据块，发现无法建立到存储这个数据块副本的某些数据节点的连接时，
   * 会调用abandonBlock()方法通知名字节点放弃这个数据块，
   * 之后客户端会再次调 用addBlock()方法获取新的数据块，
   * 并在传入参数时将无法连接的数据节点放入 excludeNodes参数列表中，
   * 以避免Namenode将数据块的副本分配到该节点上，
   * 造成客户端 再次无法连接这个节点的情况。
   *
   * 问题2:  如果客户端已经成功建立了数据流管道， 在客户端写某个数据块时，存储这个数据块副本的某个数据节点出现了错误该如何处理呢???
   *
   * 客户端首先会调用getAdditionalDatanode()方法向Namenode申请一个新的Datanode来替代出现故障的 Datanode。
   * 然后客户端会调用updateBlockForPipeline()方法向Namenode申请为这个数据块 分配新的时间戳，
   * 这样故障节点上的没能写完整的数据块的时间戳就会过期，在后续的块 汇报操作中会被删除。
   * 最后客户端就可以使用新的时间戳建立新的数据流管道，来执行对 数据块的写操作了。
   * 数据流管道建立成功后，客户端还需要调用updatePipeline()方法更新
   * Namenode中当前数据块的数据流管道信息。
   * 至此，一个完整的恢复操作结束。
   *
   * 问题3: 在写数据的过程中，Client节点也有可能在任 意时刻发生故障 ???
   *
   * 对于任意一个Client打开的文件都需要Client定期调用ClientProtocol.renewLease()
   * 方法更新租约(关于租约请参考第3章中租约相关小节)。
   * 如果Namenode长时间没有收到Client的租约更新消息，
   * 就会认为Client发生故障，这时就 会触发一次租约恢复操作，
   * 关闭文件并且同步所有数据节点上这个文件数据块的状态，确 保HDFS系统中这个文件是正确且一致保存的。
   */
  @Idempotent
  public void abandonBlock(ExtendedBlock b, long fileId, String src, String holder) throws  IOException;

  /**
   * 客户端调用addBlock()方法向指定文件添加一个新的数据块，并获取存储这个数据块副本的所有数据节点的位置信息（使用LocatedBlock对象封装）。
   * 要特别注意的是，调用addBlock()方法时还要传入上一个数据块的引用。Namenode在分配新的数据块时，会顺便提交上一个数据块
   * @param previous 上一个数据块的引用。
   * @param excludeNodes 数据节点的黑名单，保存了客户端无法连接的一些数据节点，建议Namenode在分配保存数据块副本的数据节点时不要考虑这些节点。
   * @param favoredNodes 客户端所希望的保存数据块副本的数据节点的列表。
   *                     客户端调用addBlock()方法获取新的数据块的位置信息后，会建立到这些数据节点的数据流管道，并通过数据流管道将数据写入数据节点。
   */
  @Idempotent
  public LocatedBlock addBlock(String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,  String[] favoredNodes) throws  IOException;

  /** 
   * Get a datanode for an existing pipeline.
   * 
   * @param src the file being written
   * @param fileId the ID of the file being written
   * @param blk the block being written
   * @param existings the existing nodes in the pipeline
   * @param excludes the excluded nodes
   * @param numAdditionalNodes number of additional datanodes
   * @param clientName the name of the client
   * 
   * @return the located block.
   * 
   * @throws AccessControlException If access is denied
   * @throws FileNotFoundException If file <code>src</code> is not found
   * @throws SafeModeException create not allowed in safemode
   * @throws UnresolvedLinkException If <code>src</code> contains a symlink
   * @throws IOException If an I/O error occurred
   */
  @Idempotent
  public LocatedBlock getAdditionalDatanode(final String src,
      final long fileId, final ExtendedBlock blk,
      final DatanodeInfo[] existings,
      final String[] existingStorageIDs,
      final DatanodeInfo[] excludes,
      final int numAdditionalNodes, final String clientName
      ) throws AccessControlException, FileNotFoundException,
          SafeModeException, UnresolvedLinkException, IOException;

  /**
   * 当客户端完成了整个文件的写入操作后，会调用complete()方法通知Namenode。
   * 这个操作会提交新写入HDFS文件的所有数据块，
   * 当这些数据块的副本数量满足系统配置的最小副本系数（默认值为1），也就是该文件的所有数据块至少有一个有效副本时，complete()方法会返回true，这时Namenode中文件的状态也会从构建中状态转换为正常状态；
   * 否则，complete()会返回false，客户端就需要重复调用complete()操作，直至该方法返回true。
   */
  @Idempotent
  public boolean complete(String src, String clientName, ExtendedBlock last, long fileId) throws  IOException;

  /**
   * 客户端会调用ClientProtocol.reportBadBlocks()方法向Namenode汇报错误的数据块。
   * 当客户端从数据节点读取数据块且发现数据块的校验和并不正确时，就会调用这个方法向Namenode汇报这个错误的数据块信息。
   */
  @Idempotent
  public void reportBadBlocks(LocatedBlock[] blocks) throws IOException;

  ///////////////////////////////////////
  // Namespace management
  ///////////////////////////////////////
  /**
   * Rename an item in the file system namespace.
   * @param src existing file or directory name.
   * @param dst new name.
   * @return true if successful, or false if the old name does not exist or if the new name already belongs to the namespace.
   * 
   * @throws SnapshotAccessControlException if path is in RO snapshot
   * @throws IOException an I/O error occurred 
   */
  @AtMostOnce
  public boolean rename(String src, String dst)  throws UnresolvedLinkException, SnapshotAccessControlException, IOException;

  /**
   * Moves blocks from srcs to trg and delete srcs
   * 
   * @param trg existing file
   * @param srcs - list of existing files (same block size, same replication)
   * @throws IOException if some arguments are invalid
   * @throws UnresolvedLinkException if <code>trg</code> or <code>srcs</code> contains a symlink
   * @throws SnapshotAccessControlException if path is in RO snapshot
   */
  @AtMostOnce
  public void concat(String trg, String[] srcs)  throws IOException, UnresolvedLinkException, SnapshotAccessControlException;

  /**
   * 更改文件/目录名字
   *
   * <li>Fails if src is a file and dst is a directory.
   * <li>Fails if src is a directory and dst is a file.
   * <li>Fails if the parent of dst does not exist or is a file.
   *
   * Without OVERWRITE option, rename fails if the dst already exists.
   * With OVERWRITE option, rename overwrites the dst, if it is a file 
   * or an empty directory. Rename fails if dst is a non-empty directory.
   *
   * This implementation of rename is atomic.
   *
   * @param src existing file or directory name.
   * @param dst new name.
   * @param options Rename options
   * 
   * @throws AccessControlException If access is denied
   * @throws DSQuotaExceededException If rename violates disk space  quota restriction
   * @throws FileAlreadyExistsException If <code>dst</code> already exists and <code>options</options> has {@link Rename#OVERWRITE} option false.
   * @throws FileNotFoundException If <code>src</code> does not exist
   * @throws NSQuotaExceededException If rename violates namespace  quota restriction
   * @throws ParentNotDirectoryException If parent of <code>dst</code>  is not a directory
   * @throws SafeModeException rename not allowed in safemode
   * @throws UnresolvedLinkException If <code>src</code> or  <code>dst</code> contains a symlink
   * @throws SnapshotAccessControlException if path is in RO snapshot
   * @throws IOException If an I/O error occurred
   */
  @AtMostOnce
  public void rename2(String src, String dst, Options.Rename... options)
      throws AccessControlException, DSQuotaExceededException,
      FileAlreadyExistsException, FileNotFoundException,
      NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
      UnresolvedLinkException, SnapshotAccessControlException, IOException;
  
  /**
   * Delete the given file or directory from the file system.
   * <p>
   * same as delete but provides a way to avoid accidentally 
   * deleting non empty directories programmatically. 
   * @param src existing name
   * @param recursive if true deletes a non empty directory recursively,
   * else throws an exception.
   * @return true only if the existing file or directory was actually removed 
   * from the file system.
   * 
   * @throws AccessControlException If access is denied
   * @throws FileNotFoundException If file <code>src</code> is not found
   * @throws SafeModeException create not allowed in safemode
   * @throws UnresolvedLinkException If <code>src</code> contains a symlink
   * @throws SnapshotAccessControlException if path is in RO snapshot
   * @throws IOException If an I/O error occurred
   */
  @AtMostOnce
  public boolean delete(String src, boolean recursive)
      throws AccessControlException, FileNotFoundException, SafeModeException,
      UnresolvedLinkException, SnapshotAccessControlException, IOException;
  
  /**
   * Create a directory (or hierarchy of directories) with the given
   * name and permission.
   *
   * @param src The path of the directory being created
   * @param masked The masked permission of the directory being created
   * @param createParent create missing parent directory if true
   *
   * @return True if the operation success.
   *
   * @throws AccessControlException If access is denied
   * @throws FileAlreadyExistsException If <code>src</code> already exists
   * @throws FileNotFoundException If parent of <code>src</code> does not exist
   *           and <code>createParent</code> is false
   * @throws NSQuotaExceededException If file creation violates quota restriction
   * @throws ParentNotDirectoryException If parent of <code>src</code> 
   *           is not a directory
   * @throws SafeModeException create not allowed in safemode
   * @throws UnresolvedLinkException If <code>src</code> contains a symlink
   * @throws SnapshotAccessControlException if path is in RO snapshot
   * @throws IOException If an I/O error occurred.
   *
   * RunTimeExceptions:
   * @throws InvalidPathException If <code>src</code> is invalid
   */
  @Idempotent
  public boolean mkdirs(String src, FsPermission masked, boolean createParent)
      throws AccessControlException, FileAlreadyExistsException,
      FileNotFoundException, NSQuotaExceededException,
      ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
      SnapshotAccessControlException, IOException;

  /**
   * Get a partial listing of the indicated directory
   *
   * @param src the directory name
   * @param startAfter the name to start listing after encoded in java UTF8
   * @param needLocation if the FileStatus should contain block locations
   *
   * @return a partial listing starting after startAfter
   *
   * @throws AccessControlException permission denied
   * @throws FileNotFoundException file <code>src</code> is not found
   * @throws UnresolvedLinkException If <code>src</code> contains a symlink
   * @throws IOException If an I/O error occurred
   */
  @Idempotent
  public DirectoryListing getListing(String src,
                                     byte[] startAfter,
                                     boolean needLocation)
      throws AccessControlException, FileNotFoundException,
      UnresolvedLinkException, IOException;
  
  /**
   * Get listing of all the snapshottable directories
   * 
   * @return Information about all the current snapshottable directory
   * @throws IOException If an I/O error occurred
   */
  @Idempotent
  public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
      throws IOException;

  ///////////////////////////////////////
  // System issues and management
  ///////////////////////////////////////

  /**
   * 在写数据的过程中，Client节点也有可能在任意时刻发生故障，为了预防这种情况，对于任意一个Client打开的文件都需要Client定期调用ClientProtocol.renewLease()方法更新租约。
   * 如果Namenode长时间没有收到Client的租约更新消息，就会认为Client发生故障，这时就会触发一次租约恢复操作，关闭文件并且同步所有数据节点上这个文件数据块的状态，确保HDFS系统中这个文件是正确且一致保存的。
   */
  @Idempotent
  public void renewLease(String clientName) throws AccessControlException, IOException;

  /**
   * Start lease recovery.
   * Lightweight NameNode operation to trigger lease recovery
   * 
   * @param src path of the file to start lease recovery
   * @param clientName name of the current client
   * @return true if the file is already closed
   * @throws IOException
   */
  @Idempotent
  public boolean recoverLease(String src, String clientName) throws IOException;

  public int GET_STATS_CAPACITY_IDX = 0;
  public int GET_STATS_USED_IDX = 1;
  public int GET_STATS_REMAINING_IDX = 2;
  public int GET_STATS_UNDER_REPLICATED_IDX = 3;
  public int GET_STATS_CORRUPT_BLOCKS_IDX = 4;
  public int GET_STATS_MISSING_BLOCKS_IDX = 5;
  
  /**
   * 用于获取文件系统状态信息，包括磁盘使用情况，复制数据块的数量，损坏数据块的数量，丢失数据库的数量等。对应dfsadmin中的 -report 命令
   *  [0] 包含系统的总存储容量, in bytes.
   *  [1] 包含系统的总已用空间, in bytes.
   *  [2] 包含系统的可用存储, in bytes.
   *  [3] 包含系统中复制不足的块数.
   *  [4] 包含具有损坏副本的块数.
   *  [5] 包含没有留下任何良好副本的块数。
   *  [6] 包含block pool的总已用空间.
   */
  @Idempotent
  public long[] getStats() throws IOException;

  /**
   * 获取集群中存活的，死亡的或者所有的数据节点信息，
   */
  @Idempotent
  public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type) throws IOException;

  /**
   * 获取数据节点上所有存储的信息
   */
  @Idempotent
  public DatanodeStorageReport[] getDatanodeStorageReport( HdfsConstants.DatanodeReportType type) throws IOException;

  /**
   * Get the block size for the given file.
   * @param filename The name of the file
   * @return The number of bytes in each block
   * @throws IOException
   * @throws UnresolvedLinkException if the path contains a symlink. 
   */
  @Idempotent
  public long getPreferredBlockSize(String filename)  throws IOException, UnresolvedLinkException;

  /**
   * 用于进入、离开安全模式，或者获取当前安全模式的状态。
   *
   * 安全模式是Namenode的一种状态，处于安全模式中的Namenode不接受客户端对命名空间的修改操作，整个命名空间都处于只读状态。
   * 同时，Namenode也不会向Datanode下发任何数据块的复制、删除指令。
   *
   * 管理员可以通过dfsadmin setSafemode命令触发Namenode进入或者退出安全模式，同时还可以使用这个命令查询安全模式的状态。
   *
   * 需要注意的是，刚刚启动的Namenode会直接自动进入安全模式，当Namenode中保存的满足最小副本系数的数据块达到一定的比例时，Namenode会自动退出安全模式。
   * 对于用户通过dfsAdmin方式触发Namenode进入安全模式的情况，则只能由管理员手动关闭安全模式，Namenode不可以自动退出。
   */
  @Idempotent
  public boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked)  throws IOException;

  /**
   * 将namenod内存中的命名空间保存至新的fsimage中，并且重置editlog。
   * 执行这个操作必须处于安全模式中。
   */
  @AtMostOnce
  public void saveNamespace() throws AccessControlException, IOException;

  
  /**
   * 重置editslog, 也就是关闭正在写的editlog文件，开启一个新的editlog文件。
   * 执行这个操作必须处于安全模式中。
   */
  @Idempotent
  public long rollEdits() throws AccessControlException, IOException;

  /**
   * 用于当失败的存储变得可用时，设置是否对这个存储时候那该保存的副本进行恢复操作。
   */
  @Idempotent
  public boolean restoreFailedStorage(String arg)  throws  IOException;

  /**
   * 触发Namenode重新读取 include/exclude文件。
   *
   * refreshNodes()方法会触发Namenode刷新数据节点列表。
   * 管理员可以通过include文件指定可以连接到Namenode的数据节点列表，通过exclude文件指定不能连接到Namenode的数据节点列表。
   *
   * 每当管理员修改了这两个配置文件后，都需要通过`-refreshNodes`选项触发Namenode刷新数据节点列表，
   * 这个操作会造成Namenode从文件系统中移除已有的数据节点，或者添加新的数据节点
   */
  @Idempotent
  public void refreshNodes() throws IOException;

  /**
   * 提交namenode的升级操作
   */
  @Idempotent
  public void finalizeUpgrade() throws IOException;

  /**
   * 触发namenode进行升级操作。
   *
   * 管理员可以通过`-rollingUpgrade`选项触发Namenode进行升级操作。
   * 当Namenode成功地执行了升级操作后，管理员可以通过`-finalizeUpgrade`提交升级操作，提交升级操作会删除升级操作创建的一些临时目录，提交升级操作之后就不可以再回滚了。
   */
  @Idempotent
  public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException;

  /**
   * @return CorruptFileBlocks, containing a list of corrupt files (with
   *         duplicates if there is more than one corrupt block in a file)
   *         and a cookie
   * @throws IOException
   *
   * Each call returns a subset of the corrupt files in the system. To obtain
   * all corrupt files, call this method repeatedly and each time pass in the
   * cookie returned from the previous call.
   */
  @Idempotent
  public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException;
  
  /**
   * 将Namenode中的主要的数据结构保存在指定文件中，包括同namendoe心跳过的datanode, 等待复制的数据库，等待删除的数据块，当前正在复制的数据库等信息，
   */
  @Idempotent
  public void metaSave(String filename) throws IOException;

  /**
   * 更改datanode在进行数据块平衡操作时所占用的带宽。调用这个命令设置的带宽值会覆盖 dfs.balance.bandwidthPerSec 配置项配置的带宽值。
   */
  @Idempotent
  public void setBalancerBandwidth(long bandwidth) throws IOException;
  
  /**
   * Get the file info for a specific file or directory.
   * @param src The string representation of the path to the file
   *
   * @return object containing information regarding the file
   *         or null if file not found
   * @throws AccessControlException permission denied
   * @throws FileNotFoundException file <code>src</code> is not found
   * @throws UnresolvedLinkException if the path contains a symlink. 
   * @throws IOException If an I/O error occurred        
   */
  @Idempotent
  public HdfsFileStatus getFileInfo(String src) throws AccessControlException,
      FileNotFoundException, UnresolvedLinkException, IOException;
  
  /**
   * Get the close status of a file
   * @param src The string representation of the path to the file
   *
   * @return return true if file is closed
   * @throws AccessControlException permission denied
   * @throws FileNotFoundException file <code>src</code> is not found
   * @throws UnresolvedLinkException if the path contains a symlink.
   * @throws IOException If an I/O error occurred     
   */
  @Idempotent
  public boolean isFileClosed(String src) throws AccessControlException,
      FileNotFoundException, UnresolvedLinkException, IOException;
  
  /**
   * Get the file info for a specific file or directory. If the path 
   * refers to a symlink then the FileStatus of the symlink is returned.
   * @param src The string representation of the path to the file
   *
   * @return object containing information regarding the file
   *         or null if file not found
   *
   * @throws AccessControlException permission denied
   * @throws UnresolvedLinkException if <code>src</code> contains a symlink
   * @throws IOException If an I/O error occurred        
   */
  @Idempotent
  public HdfsFileStatus getFileLinkInfo(String src)
      throws AccessControlException, UnresolvedLinkException, IOException;
  
  /**
   * Get {@link ContentSummary} rooted at the specified directory.
   * @param path The string representation of the path
   *
   * @throws AccessControlException permission denied
   * @throws FileNotFoundException file <code>path</code> is not found
   * @throws UnresolvedLinkException if <code>path</code> contains a symlink. 
   * @throws IOException If an I/O error occurred
   */
  @Idempotent
  public ContentSummary getContentSummary(String path)
      throws AccessControlException, FileNotFoundException,
      UnresolvedLinkException, IOException;

  /**
   * 设置目录中的文件/目录的数量配额，以及文件大小的配额。
   */
  @Idempotent
  public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
      throws AccessControlException, FileNotFoundException,
      UnresolvedLinkException, SnapshotAccessControlException, IOException;

  /**
   * Write all metadata for this file into persistent storage.
   * The file must be currently open for writing.
   * @param src The string representation of the path
   * @param inodeId The inode ID, or GRANDFATHER_INODE_ID if the client is
   *                too old to support fsync with inode IDs.
   * @param client The string representation of the client
   * @param lastBlockLength The length of the last block (under construction) 
   *                        to be reported to NameNode 
   * @throws AccessControlException permission denied
   * @throws FileNotFoundException file <code>src</code> is not found
   * @throws UnresolvedLinkException if <code>src</code> contains a symlink. 
   * @throws IOException If an I/O error occurred
   */
  @Idempotent
  public void fsync(String src, long inodeId, String client,
                    long lastBlockLength)
      throws AccessControlException, FileNotFoundException, 
      UnresolvedLinkException, IOException;

  /**
   * Sets the modification and access time of the file to the specified time.
   * @param src The string representation of the path
   * @param mtime The number of milliseconds since Jan 1, 1970.
   *              Setting mtime to -1 means that modification time should not be set
   *              by this call.
   * @param atime The number of milliseconds since Jan 1, 1970.
   *              Setting atime to -1 means that access time should not be set
   *              by this call.
   *              
   * @throws AccessControlException permission denied
   * @throws FileNotFoundException file <code>src</code> is not found
   * @throws UnresolvedLinkException if <code>src</code> contains a symlink. 
   * @throws SnapshotAccessControlException if path is in RO snapshot
   * @throws IOException If an I/O error occurred
   */
  @Idempotent
  public void setTimes(String src, long mtime, long atime)
      throws AccessControlException, FileNotFoundException, 
      UnresolvedLinkException, SnapshotAccessControlException, IOException;

  /**
   * Create symlink to a file or directory.
   * @param target The path of the destination that the
   *               link points to.
   * @param link The path of the link being created.
   * @param dirPerm permissions to use when creating parent directories
   * @param createParent - if true then missing parent dirs are created
   *                       if false then parent must exist
   *
   * @throws AccessControlException permission denied
   * @throws FileAlreadyExistsException If file <code>link</code> already exists
   * @throws FileNotFoundException If parent of <code>link</code> does not exist
   *           and <code>createParent</code> is false
   * @throws ParentNotDirectoryException If parent of <code>link</code> is not a
   *           directory.
   * @throws UnresolvedLinkException if <code>link</target> contains a symlink. 
   * @throws SnapshotAccessControlException if path is in RO snapshot
   * @throws IOException If an I/O error occurred
   */
  @AtMostOnce
  public void createSymlink(String target, String link, FsPermission dirPerm,
      boolean createParent) throws AccessControlException,
      FileAlreadyExistsException, FileNotFoundException,
      ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
      SnapshotAccessControlException, IOException;

  /**
   * Return the target of the given symlink. If there is an intermediate
   * symlink in the path (ie a symlink leading up to the final path component)
   * then the given path is returned with this symlink resolved.
   *
   * @param path The path with a link that needs resolution.
   * @return The path after resolving the first symbolic link in the path.
   * @throws AccessControlException permission denied
   * @throws FileNotFoundException If <code>path</code> does not exist
   * @throws IOException If the given path does not refer to a symlink
   *           or an I/O error occurred
   */
  @Idempotent
  public String getLinkTarget(String path) throws AccessControlException,
      FileNotFoundException, IOException; 
  
  /**
   * Get a new generation stamp together with an access token for 
   * a block under construction
   * 
   * This method is called only when a client needs to recover a failed
   * pipeline or set up a pipeline for appending to a block.
   * 
   * @param block a block
   * @param clientName the name of the client
   * @return a located block with a new generation stamp and an access token
   * @throws IOException if any error occurs
   */
  @Idempotent
  public LocatedBlock updateBlockForPipeline(ExtendedBlock block,
      String clientName) throws IOException;

  /**
   * Update a pipeline for a block under construction
   * 
   * @param clientName the name of the client
   * @param oldBlock the old block
   * @param newBlock the new block containing new generation stamp and length
   * @param newNodes datanodes in the pipeline
   * @throws IOException if any error occurs
   */
  @AtMostOnce
  public void updatePipeline(String clientName, ExtendedBlock oldBlock, 
      ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
      throws IOException;

  /**
   * Get a valid Delegation Token.
   * 
   * @param renewer the designated renewer for the token
   * @return Token<DelegationTokenIdentifier>
   * @throws IOException
   */
  @Idempotent
  public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) 
      throws IOException;

  /**
   * Renew an existing delegation token.
   * 
   * @param token delegation token obtained earlier
   * @return the new expiration time
   * @throws IOException
   */
  @Idempotent
  public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
      throws IOException;
  
  /**
   * Cancel an existing delegation token.
   * 
   * @param token delegation token
   * @throws IOException
   */
  @Idempotent
  public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
      throws IOException;
  
  /**
   * @return encryption key so a client can encrypt data sent via the
   *         DataTransferProtocol to/from DataNodes.
   * @throws IOException
   */
  @Idempotent
  public DataEncryptionKey getDataEncryptionKey() throws IOException;

  // ########## 快照相关方法 ##############

  /**
   * 创建快照
   *
   * Hadoop 2.X添加了新的快照特性，用户可以为HDFS的任意路径创建快照。
   * 快照保存了一个时间点上HDFS某个路径中所有数据的拷贝，快照可以将失效的集群回滚到之前一个正常的时间点上。
   * 用户可以通过`hdfs dfs`命令执行创建、删除以及重命名快照等操作，
   *
   * 需要特别注意的是，在创建快照之前，必须先通过`hdfs dfsadmin -allowSnapshot`命令开启目录的快照功能，否则不可以在该目录上创建快照。
   */
  @AtMostOnce
  public String createSnapshot(String snapshotRoot, String snapshotName) throws IOException;

  /**
   * 删除快照
   */
  @AtMostOnce
  public void deleteSnapshot(String snapshotRoot, String snapshotName) throws IOException;
  
  /**
   * 重命名快照
   */
  @AtMostOnce
  public void renameSnapshot(String snapshotRoot, String snapshotOldName, String snapshotNewName) throws IOException;
  
  /**
   * 开启指定目录的快照功能。一个目录必须字开启快照功能之后才可以添加快照。
   */
  @Idempotent
  public void allowSnapshot(String snapshotRoot) throws IOException;
    
  /**
   * 关闭指定目录的快照功能
   */
  @Idempotent
  public void disallowSnapshot(String snapshotRoot) throws IOException;
  
  /**
   * 获取两个快照间的不同
   */
  @Idempotent
  public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String fromSnapshot, String toSnapshot) throws IOException;


  // ########## 缓存相关方法 ##############

  /**
   * 添加一个缓存
   * 
   * HDFS 2.3版本添加了集中式缓存管理（HDFS Centralized Cache Management）功能。
   * 用户可以指定一些经常被使用的数据或者高优先级任务对应的数据，让它们常驻内存而不被淘汰到磁盘上，这对于提升Hadoop系统和上层应用的执行效率与实时性有很大的帮助。
   * 这里涉及两个概念。
   * ■　cache directive：表示要被缓存到内存的文件或者目录。
   * ■　cache pool：用于管理一系列的cache directive，类似于命名空间。同时使用UNIX风格的文件读、写、执行权限管理机制。
   */
  @AtMostOnce
  public long addCacheDirective(CacheDirectiveInfo directive, EnumSet<CacheFlag> flags) throws IOException;

  /**
   * 修改缓存
   */
  @AtMostOnce
  public void modifyCacheDirective(CacheDirectiveInfo directive, EnumSet<CacheFlag> flags) throws IOException;

  /**
   * 删除缓存
   */
  @AtMostOnce
  public void removeCacheDirective(long id) throws IOException;

  /**
   * 列出指定路径下的所有缓存
   */
  @Idempotent
  public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter) throws IOException;

  /**
   * 添加一个缓存池
   */
  @AtMostOnce
  public void addCachePool(CachePoolInfo info) throws IOException;

  /**
   * 修改已有缓存池的元数据
   */
  @AtMostOnce
  public void modifyCachePool(CachePoolInfo req) throws IOException;
  
  /**
   * 删除缓冲池
   */
  @AtMostOnce
  public void removeCachePool(String pool) throws IOException;

  /**
   * 列出已有缓存池的信息，包括用户名，用户组，权限
   */
  @Idempotent
  public BatchedEntries<CachePoolEntry> listCachePools(String prevPool) throws IOException;

  /**
   * Modifies ACL entries of files and directories.  This method can add new ACL
   * entries or modify the permissions on existing ACL entries.  All existing
   * ACL entries that are not specified in this call are retained without
   * changes.  (Modifications are merged into the current ACL.)
   */
  @Idempotent
  public void modifyAclEntries(String src, List<AclEntry> aclSpec)
      throws IOException;

  /**
   * Removes ACL entries from files and directories.  Other ACL entries are
   * retained.
   */
  @Idempotent
  public void removeAclEntries(String src, List<AclEntry> aclSpec)
      throws IOException;

  /**
   * Removes all default ACL entries from files and directories.
   */
  @Idempotent
  public void removeDefaultAcl(String src) throws IOException;

  /**
   * Removes all but the base ACL entries of files and directories.  The entries
   * for user, group, and others are retained for compatibility with permission
   * bits.
   */
  @Idempotent
  public void removeAcl(String src) throws IOException;

  /**
   * Fully replaces ACL of files and directories, discarding all existing
   * entries.
   */
  @Idempotent
  public void setAcl(String src, List<AclEntry> aclSpec) throws IOException;

  /**
   * Gets the ACLs of files and directories.
   */
  @Idempotent
  public AclStatus getAclStatus(String src) throws IOException;
  
  /**
   * Create an encryption zone
   */
  @AtMostOnce
  public void createEncryptionZone(String src, String keyName)
    throws IOException;

  /**
   * Get the encryption zone for a path.
   */
  @Idempotent
  public EncryptionZone getEZForPath(String src)
    throws IOException;

  /**
   * Used to implement cursor-based batched listing of {@EncryptionZone}s.
   *
   * @param prevId ID of the last item in the previous batch. If there is no
   *               previous batch, a negative value can be used.
   * @return Batch of encryption zones.
   */
  @Idempotent
  public BatchedEntries<EncryptionZone> listEncryptionZones(
      long prevId) throws IOException;

  /**
   * Set xattr of a file or directory.
   * The name must be prefixed with the namespace followed by ".". For example,
   * "user.attr".
   * <p/>
   * Refer to the HDFS extended attributes user documentation for details.
   *
   * @param src file or directory
   * @param xAttr <code>XAttr</code> to set
   * @param flag set flag
   * @throws IOException
   */
  @AtMostOnce
  public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag) 
      throws IOException;
  
  /**
   * Get xattrs of a file or directory. Values in xAttrs parameter are ignored.
   * If xAttrs is null or empty, this is the same as getting all xattrs of the
   * file or directory.  Only those xattrs for which the logged-in user has
   * permissions to view are returned.
   * <p/>
   * Refer to the HDFS extended attributes user documentation for details.
   *
   * @param src file or directory
   * @param xAttrs xAttrs to get
   * @return List<XAttr> <code>XAttr</code> list 
   * @throws IOException
   */
  @Idempotent
  public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) 
      throws IOException;

  /**
   * List the xattrs names for a file or directory.
   * Only the xattr names for which the logged in user has the permissions to
   * access will be returned.
   * <p/>
   * Refer to the HDFS extended attributes user documentation for details.
   *
   * @param src file or directory
   * @return List<XAttr> <code>XAttr</code> list
   * @throws IOException
   */
  @Idempotent
  public List<XAttr> listXAttrs(String src)
      throws IOException;
  
  /**
   * Remove xattr of a file or directory.Value in xAttr parameter is ignored.
   * The name must be prefixed with the namespace followed by ".". For example,
   * "user.attr".
   * <p/>
   * Refer to the HDFS extended attributes user documentation for details.
   *
   * @param src file or directory
   * @param xAttr <code>XAttr</code> to remove
   * @throws IOException
   */
  @AtMostOnce
  public void removeXAttr(String src, XAttr xAttr) throws IOException;

  /**
   * Checks if the user can access a path.  The mode specifies which access
   * checks to perform.  If the requested permissions are granted, then the
   * method returns normally.  If access is denied, then the method throws an
   * {@link AccessControlException}.
   * In general, applications should avoid using this method, due to the risk of
   * time-of-check/time-of-use race conditions.  The permissions on a file may
   * change immediately after the access call returns.
   *
   * @param path Path to check
   * @param mode type of access to check
   * @throws AccessControlException if access is denied
   * @throws FileNotFoundException if the path does not exist
   * @throws IOException see specific implementation
   */
  @Idempotent
  public void checkAccess(String path, FsAction mode) throws IOException;

  /**
   * Get the highest txid the NameNode knows has been written to the edit
   * log, or -1 if the NameNode's edit log is not yet open for write. Used as
   * the starting point for the inotify event stream.
   */
  @Idempotent
  public long getCurrentEditLogTxid() throws IOException;

  /**
   * Get an ordered list of batches of events corresponding to the edit log
   * transactions for txids equal to or greater than txid.
   */
  @Idempotent
  public EventBatchList getEditsFromTxid(long txid) throws IOException;
}
