/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.common;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext;

import com.google.common.base.Preconditions;

/************************************
 * Some handy internal HDFS constants
 *
 ************************************/

@InterfaceAudience.Private
public final class HdfsServerConstants {
  /* Hidden constructor */
  private HdfsServerConstants() { }
  
  /**
   * Type of the node
   */
  static public enum NodeType {
    NAME_NODE,
    DATA_NODE,
    JOURNAL_NODE;
  }

  /** Startup options for rolling upgrade. */
  public static enum RollingUpgradeStartupOption{
    ROLLBACK, DOWNGRADE, STARTED;

    public String getOptionString() {
      return StartupOption.ROLLINGUPGRADE.getName() + " "
          + name().toLowerCase();
    }

    public boolean matches(StartupOption option) {
      return option == StartupOption.ROLLINGUPGRADE
          && option.getRollingUpgradeStartupOption() == this;
    }

    private static final RollingUpgradeStartupOption[] VALUES = values();

    static RollingUpgradeStartupOption fromString(String s) {
      for(RollingUpgradeStartupOption opt : VALUES) {
        if (opt.name().equalsIgnoreCase(s)) {
          return opt;
        }
      }
      throw new IllegalArgumentException("Failed to convert \"" + s
          + "\" to " + RollingUpgradeStartupOption.class.getSimpleName());
    }

    public static String getAllOptionString() {
      final StringBuilder b = new StringBuilder("<");
      for(RollingUpgradeStartupOption opt : VALUES) {
        b.append(opt.name().toLowerCase()).append("|");
      }
      b.setCharAt(b.length() - 1, '>');
      return b.toString();
    }
  }

  /** Startup options */
  static public enum StartupOption{
    FORMAT  ("-format"),
    CLUSTERID ("-clusterid"),
    GENCLUSTERID ("-genclusterid"),
    REGULAR ("-regular"),
    BACKUP  ("-backup"),
    CHECKPOINT("-checkpoint"),
    UPGRADE ("-upgrade"),
    ROLLBACK("-rollback"),
    FINALIZE("-finalize"),
    ROLLINGUPGRADE("-rollingUpgrade"),
    IMPORT  ("-importCheckpoint"),
    BOOTSTRAPSTANDBY("-bootstrapStandby"),
    INITIALIZESHAREDEDITS("-initializeSharedEdits"),
    RECOVER  ("-recover"),
    FORCE("-force"),
    NONINTERACTIVE("-nonInteractive"),
    RENAMERESERVED("-renameReserved"),
    METADATAVERSION("-metadataVersion"),
    UPGRADEONLY("-upgradeOnly"),
    // The -hotswap constant should not be used as a startup option, it is
    // only used for StorageDirectory.analyzeStorage() in hot swap drive scenario.
    // TODO refactor StorageDirectory.analyzeStorage() so that we can do away with
    // this in StartupOption.
    HOTSWAP("-hotswap");

    private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile(
        "(\\w+)\\((\\w+)\\)");

    private final String name;
    
    // Used only with format and upgrade options
    private String clusterId = null;
    
    // Used only by rolling upgrade
    private RollingUpgradeStartupOption rollingUpgradeStartupOption;

    // Used only with format option
    private boolean isForceFormat = false;
    private boolean isInteractiveFormat = true;
    
    // Used only with recovery option
    private int force = 0;

    private StartupOption(String arg) {this.name = arg;}
    public String getName() {return name;}
    public NamenodeRole toNodeRole() {
      switch(this) {
      case BACKUP: 
        return NamenodeRole.BACKUP;
      case CHECKPOINT: 
        return NamenodeRole.CHECKPOINT;
      default:
        return NamenodeRole.NAMENODE;
      }
    }
    
    public void setClusterId(String cid) {
      clusterId = cid;
    }

    public String getClusterId() {
      return clusterId;
    }
    
    public void setRollingUpgradeStartupOption(String opt) {
      Preconditions.checkState(this == ROLLINGUPGRADE);
      rollingUpgradeStartupOption = RollingUpgradeStartupOption.fromString(opt);
    }
    
    public RollingUpgradeStartupOption getRollingUpgradeStartupOption() {
      Preconditions.checkState(this == ROLLINGUPGRADE);
      return rollingUpgradeStartupOption;
    }

    public MetaRecoveryContext createRecoveryContext() {
      if (!name.equals(RECOVER.name))
        return null;
      return new MetaRecoveryContext(force);
    }

    public void setForce(int force) {
      this.force = force;
    }
    
    public int getForce() {
      return this.force;
    }
    
    public boolean getForceFormat() {
      return isForceFormat;
    }
    
    public void setForceFormat(boolean force) {
      isForceFormat = force;
    }
    
    public boolean getInteractiveFormat() {
      return isInteractiveFormat;
    }
    
    public void setInteractiveFormat(boolean interactive) {
      isInteractiveFormat = interactive;
    }
    
    @Override
    public String toString() {
      if (this == ROLLINGUPGRADE) {
        return new StringBuilder(super.toString())
            .append("(").append(getRollingUpgradeStartupOption()).append(")")
            .toString();
      }
      return super.toString();
    }

    static public StartupOption getEnum(String value) {
      Matcher matcher = ENUM_WITH_ROLLING_UPGRADE_OPTION.matcher(value);
      if (matcher.matches()) {
        StartupOption option = StartupOption.valueOf(matcher.group(1));
        option.setRollingUpgradeStartupOption(matcher.group(2));
        return option;
      } else {
        return StartupOption.valueOf(value);
      }
    }
  }

  // Timeouts for communicating with DataNode for streaming writes/reads
  public static final int READ_TIMEOUT = 60 * 1000;
  public static final int READ_TIMEOUT_EXTENSION = 5 * 1000;
  public static final int WRITE_TIMEOUT = 8 * 60 * 1000;
  public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline

  /**
   * Defines the NameNode role.
   */
  static public enum NamenodeRole {
    NAMENODE  ("NameNode"),
    BACKUP    ("Backup Node"),
    CHECKPOINT("Checkpoint Node");

    private String description = null;
    private NamenodeRole(String arg) {this.description = arg;}
  
    @Override
    public String toString() {
      return description;
    }
  }

  /**
   * 数据块副本状态，数据块副本被构造过程中所应有的状态
   */
  static public enum ReplicaState {
    /**
     * Datanode上的副本已完成写操作，不再修改。FINALIZED状态的副本使用FinalizedReplica类描述。
     *
     * FINALIZED状态下的副本，数据已被完全写入，数据块副本大小及时间戳均不会再发生变化。
     * 此时，就等着其所在数据节点DataNode进行数据块汇报，将该数据块副本汇报给名字节点NameNode。
     */
    FINALIZED(0),
    /**
     * 刚刚被创建或者追加写的副本，处于写操作的数据流管道中，正在被写入，且已写入副本的内容还是可读的。RBW状态的副本使用ReplicaBeingWritten类描述。
     */
    RBW(1),
    /**
     * 如果一个Datanode挂掉并且重启之后，所有处于写状态RBW状态的副本都将转换为RWR状态。RWR状态的副本不会出现在数据流管道中，结果就是等着进行租约恢复操作。RWR状态的副本使用ReplicaWaitingToBeRecovered类描述。
     */
    RWR(2),
    /**
     * 租约（Lease）过期之后发生租约恢复和数据块恢复（Block recovery）时副本所处的状态。RUR状态的副本使用ReplicaUnderRecovery类描述。
     * 数据块副本写入过程中，客户端异常退出，租约Lease过期后，租约Lease和数据块副本replica需要被恢复，处于恢复的状态称为RWR。
     */
    RUR(3),
    /**
     * Datanode之间传输副本（例如cluster rebalance）时，正在传输的副本就处于TEMPORARY状态。
     * 和RBW状态的副本不同的是，TEMPORARY状态的副本内容是不可读的，如果Datanode重启，会直接删除处于TEMPORARY状态的副本。TEMPORARY状态的副本使用ReplicaInPipeline类描述。
     */
    TEMPORARY(4);

    private final int value;

    private ReplicaState(int v) {
      value = v;
    }

    public int getValue() {
      return value;
    }

    public static ReplicaState getState(int v) {
      return ReplicaState.values()[v];
    }

    /** Read from in */
    public static ReplicaState read(DataInput in) throws IOException {
      return values()[in.readByte()];
    }

    /** Write to out */
    public void write(DataOutput out) throws IOException {
      out.writeByte(ordinal());
    }
  }

  /**
   * 状态，一个数据块在under construction（即构建）过程中所应有的状态
   */
  static public enum BlockUCState {
    /**
     * 数据块构建完成状态。这个数据块含有至少一个副本，并且不会被修改。
     *  COMPLETE状态下，数据块的大小和时间戳均不会再发生变化，而且名字节点NameNode已经至少收到一个数据节点DataNode汇报的FINALIZED状态的副本。
     *  同时，该状态下的block会在NameNode内存中保存finalized状态副本replica的位置locations，而当文件的所有block都是COMPLETE状态的，文件才可以被关闭。
     */
    COMPLETE,
    /**
     * 数据块处于构建状态下。它最近被分配，用于write或append。
     * UNDER_CONSTRUCTION状态下，文件刚刚被create，或者正在被append，此时的数据块正在被持续的写入，其大小和时间戳都是可以更改的，
     * 但是这种状态的下的数据块对于读是可见的，具体能读多少则是由客户端询问DataNode得知的，
     * 该block的所有副本replica接收到的数据大小都会大于等于这个可读的数据大小， 这样，才能保证所有的副本replica均可用。
     */
    UNDER_CONSTRUCTION,
    /**
     * 数据块处于恢复阶段状态。
     * 当一个文件租约到期，它的最后一个数据块可能不是COMPLETE状态，并且需要经历一个恢复过程，它将同步存在的副本目录。
     * 当文件正在被写入时，客户端由于某种原因异常退出，正在被写入的最后一个数据块block处于UNDER_CONSTRUCTION状态下，
     * 客户端租约lease就可能超期，此时recovery和block都分别需要继续recovery，而处于recovery过程中的数据块block的状态就称为UNDER_RECOVERY。
     */
    UNDER_RECOVERY,
    /**
     * 数据块已被提交状态
     * 随着获取到给定的时间戳和数据块长度，客户端汇报所有的字节均已写入数据节点，但是还没有达到FINALIZED状态的副本尚未被数据节点汇报，
     * 写文件过程中，每次写完一个数据块，申请另外一个数据块，
     * 或者当文件写入完毕而关闭时，都会将上一个数据块（关闭时为最后一个数据块）提交，即数据块状态从UNDER_CONSTRUCTION到COMMITTED，
     * 此时客户端通过流式数据管道已将全部数据发送给目标节点列表中的全部数据节点并收到确认的信号，但是NameNode尚未接收到任何一个DataNode汇报FINALIZED状态的副本replica。
     */
    COMMITTED;
  }
  
  public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
  public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;

  public static final String CRYPTO_XATTR_ENCRYPTION_ZONE = "raw.hdfs.crypto.encryption.zone";
  public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO = "raw.hdfs.crypto.file.encryption.info";
  public static final String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER = "security.hdfs.unreadable.by.superuser";
}
