package com.ideal.hadoopadmin.api.bootstrap;


import com.ideal.hadoopadmin.crontab.changeDataBase.ChangeDataBase;
import com.ideal.hadoopadmin.crontab.hdfs.FlushHDFSInfo;
import com.ideal.hadoopadmin.crontab.hive.FlushHiveInfo;
import com.ideal.hadoopadmin.crontab.kerberos.KerberosAPI;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class BootStrap {
    private static Logger logger = LoggerFactory.getLogger(BootStrap.class);
    public static void main(String[] parma){
        if(null == parma || parma.length < 1){
            getSystemOut();
            return;
        }
        if("1".equals(parma[0])){
            new KerberosAPI().call();   // kerberos cluster_user_kbrauth refresh
        } else if("2".equals(parma[0])){
            new FlushHDFSInfo().call(); // meta_hdfs_info_bak refresh
        } else if("3".equals(parma[0])){
            new FlushHiveInfo().call(); // meta_hive_info refresh
        } else if("4".equals(parma[0])){
            new FlushHiveInfo().callSql(2); // 根据meta_hive_info的id更新或增加meta_hive_sql的hiveSql
        } else if("5".equals(parma[0])){
            new ChangeDataBase().call(); // ChangeDataBase
        } else {
            getSystemOut();
        }

    }

    private static void getSystemOut(){
        logger.info("need param : 1:kerberos cluster_user_kbrauth refresh ");
        logger.info("need param : 2:meta_hdfs_info_bak refresh ");
        logger.info("need param : 3:meta_hive_info refresh ");
        logger.info("need param : 4:meta_hive_sql refresh ");
        logger.info("need param : 5:ChangeDataBase ");
    }
}
