package org.study.module.hadoop;

import org.study.module.common.ModuleConstant;
import org.study.shell.ShellBase;
import org.study.shell.ShellFile;

import java.io.File;
import java.util.ArrayList;
import java.util.List;

public class HadoopShell {
    /**
     * 前置条件安装： jdk版本
     */
    public static List<String> preCheck() {
        List<String> commands = new ArrayList<String>();
        commands.add("java -version");
        commands.addAll(ShellBase.command("JDK版本存在！", "请先安装JDK版本！", true));
        return commands;
    }

    /**
     * 安装hadoop，默认安装目录+/hadoop,并在其中创建 临时目录，数据目录，目录
     *
     * @param softPath    软件位置
     * @param installPath 安装位置
     */
    public static List<String> install(String softPath, String installPath) {
        List<String> commands = new ArrayList<String>();
        installPath = installPath + "/hadoop";
        createPath(installPath, commands);
        commands.add("echo \"查看hadoop的安装包\"");
        commands.addAll(ShellFile.findByName(softPath, "hadoop", "hadoop_tar"));
        commands.add("tar -xzvf $hadoop_tar -C " + installPath);
        commands.add("cd " + installPath);
        commands.addAll(ShellFile.findByName(installPath, "hadoop-3", "hadoop_install"));
        source(commands);
        commands.add("echo \"设置hadoop运行的JDK环境\"");
        commands.add("echo \"export JAVA_HOME=$JAVA_HOME\" >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh");
        commands.add("hadoop -version");
        return commands;
    }

    /**
     * 设置Hadoop的JDK运行环境
     *
     * @return
     */
    public static String javaHome() {
        return "echo \"export JAVA_HOME=$JAVA_HOME\" >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh";
    }

    public static String getHadoopTmp(String installPath) {
        return installPath + "/" + ModuleConstant.HADOOP + "/tmp";
    }

    public static String getHadoopHdfsName(String installPath) {
        return installPath + "/" + ModuleConstant.HADOOP + "/hdfs/name";
    }

    public static String getHadoopHdfsData(String installPath) {
        return installPath + "/" + ModuleConstant.HADOOP + "/hdfs/data";
    }
    public static String getHadoopJournal(String installPath){
        return installPath + "/" + ModuleConstant.HADOOP + "/journal";
    }
    public static void createHadoopTmp(String installPath,List<String> commands){
        commands.addAll(ShellFile.createPath(getHadoopTmp(installPath)));
    }
    public static void createHadoopHdfsName(String installPath,List<String> commands){
        commands.addAll(ShellFile.createPath(getHadoopHdfsName(installPath)));
    }
    public static void createHadoopHdfsData(String installPath,List<String> commands){
        commands.addAll(ShellFile.createPath(getHadoopHdfsData(installPath)));
    }
    public static void createHadoopJournal(String installPath,List<String> commands){
        commands.addAll(ShellFile.createPath(getHadoopJournal(installPath)));
    }

    public static String getHadoopKerberos(String installPath) {
        return installPath + "/" + ModuleConstant.HADOOP + "/kerberos";
    }
    public static void createHadoopKerberos(String installPath,List<String> commands){
        commands.addAll(ShellFile.createPath(getHadoopKerberos(installPath)));
    }

    public static void createPath(String installPath, List<String> commands) {
        commands.addAll(ShellFile.createPath(installPath + "/tmp"));
        commands.addAll(ShellFile.createPath(installPath + "/hdfs/name"));
        commands.addAll(ShellFile.createPath(installPath + "/hdfs/data"));
        commands.add("sudo chmod -R 755 " + installPath);
    }

    public static void source(List<String> commands) {
        List<String> pros = new ArrayList<>();
        pros.add("export HADOOP_HOME=$hadoop_install");
        pros.add("export PATH=\\$PATH:\\$HADOOP_HOME/bin");
        pros.add("export PATH=\\$PATH:\\$HADOOP_HOME/sbin");
        commands.addAll(ShellBase.addProfile(pros, "hadoop_env.sh"));
    }


    /**
     * HDFS组件中NameNode角色格式化
     * 只有在NameNode节点需要执行
     */
    public static List<String> hdfsNameNodeFormat() {
        List<String> commands = new ArrayList<>();
        commands.add("echo \"=== 格式化NameNode ===\"");
        commands.add("hdfs namenode -format");
        return commands;
    }

    /**
     * 集群时候需要添加：工作节点
     *
     * @param slaves 工作节点
     */
    public static List<String> works(List<String> slaves) {
        List<String> commands = new ArrayList<>();
        commands.add("cat > $HADOOP_HOME/etc/hadoop/workers<<EOF");
        commands.addAll(slaves);
        commands.add("EOF");
        return commands;
    }

    /**
     * 启动所有的 NameNode
     */
    public static String nameNodeStart(){
        return "hdfs --daemon start namenode";
    }

    /**
     * nameNode格式化
     * 在HA模式下，只要在主上格式即可。
     */
    public static String nameNodeFormat(){
        return "hdfs namenode -format";
    }

    /**
     * 从 Active NameNode（nn1）同步元数据到  Standby NameNode (nn2)
     * 在HA模式下， 在从节点执行
     */
    public static String nameNodeStandby(){
        return "hdfs namenode -bootstrapStandby";
    }

    /**
     * 在 Zookeeper 中创建 HA 所需的目录和状态信息
     * 仅在Active NameNode节点执行。
     */
    public static String zkfcFormat(){
        return "hdfs zkfc -formatZK";
    }

    /**
     * 在NameNode的所有节点上执行
     */
    public static String zkfcStart(){
        return "hdfs --daemon start zkfc";
    }

    /**
     * 在DataNode的所有节点上执行
     */
    public static String dataNodeStart(){
        return "hdfs --daemon start datanode";
    }

    /**
     * 在HA模式下，在 ‌所有 JournalNode 节点‌ 执行
     */
    public static String journalNodeStart() {
        return "hadoop-daemon.sh start journalnode";
    }

    /**
     * 在YARN‌所有 ResourceManager 节点‌ 执行
     */
    public static String resourceManagerStart() {
        return "yarn --daemon start resourcemanager";
    }

    /**
     * 在 YARN所有 NodeManager 节点‌ 执行
     */
    public static String nodeManagerStart() {
        return "yarn --daemon start nodemanager";
    }


//    public static void test(){
//        hdfs zkfc -formatZK          # 在 Zookeeper 中初始化 HA 状态:ml-citation{ref="7" data="citationList"}
//        hadoop-daemon.sh start zkfc  # 启动 ZKFC 进程
//    }

}
