package com.ideal.hadoopadmin.api.linux;


import com.ideal.hadoopadmin.crontab.kerberos.KerberosAPI;
import com.ideal.service.hive.HiveService;
import com.ideal.service.user.UserSevice;
import com.ideal.tools.ssh.common.CommonProperties;
import com.ideal.tools.ssh.common.CommonTools;
import com.ideal.tools.ssh.common.OperationMarket;
import com.ideal.tools.ssh.context.ClusterContext;
import com.ideal.tools.ssh.entity.LinuxMachine;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * Created by CC on 2016/3/1.
 * 处理 linux 的 接口
     *
 */
public class UserAPI {

    public static String Cluster_User_Name="Cluster_User_Name";
    public static String Cluster_User_PW="Cluster_User_PW";
    public static String Cluster_User_SysPW="Cluster_User_SysPW";
    public static final String Cluster_User_Client ="Cluster_User_Client";
    public static String Cluster_HIVE_TBLS="Cluster_HIVE_TBLS";
    public static String Cluster_HDFS_PATH_OWNER="Cluster_HDFS_PATH_OWNER";
    public static String Cluster_User_CltQuota="Cluster_User_CltQuota";
    public static String Cluster_User_HomeDir = "Cluster_User_HomeDir";



//    public static void main(String[] args){
//        UserAPI userAPI=new UserAPI();
////        userAPI.callAddClusterUser();
//        userAPI.callDropClustUser();
//    }


    /**添加用户**/
    public static void addClusterUser(ClusterContext context){
        List<LinuxMachine> machineList = context.getOriginalList(); //获取原始机器
        CommonProperties commonProperties =context.getCommonProperties();
        //合并机器操作 这个地方有个 隐患 就是 如果 运行有先后顺序 会比较难做
        List<LinuxMachine> finalList=CommonTools.megerMachineOperation(machineList, context);
        String userName = commonProperties.getArgument(Cluster_User_Name,"");
        if(StringUtils.isBlank(userName)){
            return ;
        }
        String passW = commonProperties.getArgument(Cluster_User_PW,"");
        String sysPW = commonProperties.getArgument(Cluster_User_SysPW,"");

        /**这个是为了hdfs 权限区分**/
        String group = CommonTools.getUserGroupByUser(userName, commonProperties);
        /**hdfs pubic dir**/
        String hdfsSuperUser = commonProperties.getProperty(CommonProperties.HDFS_SUPER_USER,"hdfs");

        String homedirs= commonProperties.getArgument(Cluster_User_HomeDir, "");
//        String homedir=commonProperties.getProperty(Cluster_User_HomeDir,"");
//        String[] client_quota_list  = commonProperties.getProperty(Cluster_User_CltQuota,"").split(";");
//        String client_quota=commonProperties.getProperty(Cluster_User_CltQuota,"");

        //遍历所有的机器
        for(LinuxMachine linuxMachine:finalList){
            //获取机器所承担的角色
            List<LinuxMachine.MachineType> machineRols=linuxMachine.getMachineRoleTypes();
            //为机器的每个角色 添加操作
            for(LinuxMachine.MachineType machineType:machineRols){
                if(machineType == LinuxMachine.MachineType.NameNode){
                    /**添加用户**/
                    /*sudo adduser wwj*/
                    linuxMachine.initOperation(OperationMarket.AddLinuxUser(userName));
                    /**修改密码**/
                    /*echo 'EWVewv71!@' | sudo passwd --stdin  wwj*/
                    linuxMachine.initOperation(OperationMarket.ChangeUserPassWord(userName,sysPW));
                    /**添加组用户**/
                    /*sudo groupadd  wwj_group*/
                    linuxMachine.initOperation(OperationMarket.AddLinuxGroup(group));
                    /**添加用户到linux组**/
                    /*sudo usermod -a -G 'wwj_group' 'wwj'*/
                    linuxMachine.initOperation(OperationMarket.AddUserToGroup(userName,group));
                    /**添加hdfs 父路径**/
                    /*sudo -u hdfs hadoop fs -mkdir -p /user/wwj && sudo -u hdfs hadoop fs -chown hdfs:wwj_group /user/wwj&& sudo -u hdfs hadoop fs -chmod 750 /user/wwj*/
                    linuxMachine.initOperation(
                            OperationMarket.MakeHDFSDir(CommonTools.getHDFSParentDir(userName,commonProperties)
                                    ,hdfsSuperUser,group,"750"),true);
                    /**添加hdfs public路径**/
                    /*sudo -u hdfs hadoop fs -mkdir -p /user/wwj/public && sudo -u hdfs hadoop fs -chown hdfs:wwj_group /user/wwj/public&& sudo -u hdfs hadoop fs -chmod 750 /user/wwj/public*/
                    linuxMachine.initOperation(
                            OperationMarket.MakeHDFSDir(CommonTools.getHDFSPublicDir(userName, commonProperties)
                                    ,hdfsSuperUser,group,"750"),true);
                    /**添加hdfs private路径**/
                    /*sudo -u hdfs hadoop fs -mkdir -p /user/wwj/private && sudo -u hdfs hadoop fs -chown wwj:wwj /user/wwj/private&& sudo -u hdfs hadoop fs -chmod 700 /user/wwj/private*/
                    linuxMachine.initOperation(
                            OperationMarket.MakeHDFSDir(CommonTools.getHDFSPrivateDir(userName, commonProperties)
                                    ,userName,userName,"700"),true);
                    /**添加hdfs TRASH路径**/
                    /*sudo -u hdfs hadoop fs -mkdir -p /user/wwj/.Trash && sudo -u hdfs hadoop fs -chown wwj:wwj /user/wwj/.Trash&& sudo -u hdfs hadoop fs -chmod 700 /user/wwj/.Trash*/
                    linuxMachine.initOperation(
                            OperationMarket.MakeHDFSDir(CommonTools.getHDFSTrashDir(userName, commonProperties)
                                    ,userName,userName,"700"),true);
                    /**添加hdfs hive temp路径**/
                    /*sudo -u hdfs hadoop fs -mkdir -p /tmp/hive-wwj && sudo -u hdfs hadoop fs -chown wwj:wwj_group /tmp/hive-wwj&& sudo -u hdfs hadoop fs -chmod 755 /tmp/hive-wwj*/
                    linuxMachine.initOperation(
                            OperationMarket.MakeHDFSDir(CommonTools.getHDFSHiveTempDir(userName, commonProperties)
                                    ,userName,group,"755"),true);
                } else if(machineType == LinuxMachine.MachineType.Client){

                    String homedir= UserSevice.getClientUserHome(homedirs,linuxMachine.getSshAuthor().getHost());

                    /**添加用户**/
                    linuxMachine.initOperation(OperationMarket.AddLinuxUser(userName,homedir));
                    /**修改密码**/
                    linuxMachine.initOperation(OperationMarket.ChangeUserPassWord(userName,passW));
                    /**添加linux hive 日志目录**/
                    linuxMachine.initOperation(
                            OperationMarket.MakeLinuxDir(CommonTools.getClientHiveLogDir(userName, commonProperties)
                                    , userName, userName, "755"));
                } else if(machineType == LinuxMachine.MachineType.ClushShell){
                    /**在装有clushshell的机器上   批量添加客户机用户**/
                    linuxMachine.initOperation(
                            OperationMarket.AddLinuxUserByClush(userName,sysPW));
                }else if(machineType == LinuxMachine.MachineType.Hive){
                    /**给用户创建 hive 数据库**/
                    linuxMachine.initOperation(OperationMarket.CreateHiveDB(userName, userName));
                }else if(machineType == LinuxMachine.MachineType.KDC){
                    /**添加kdc 用户**/
                    linuxMachine.initOperation(OperationMarket.AddKDCPrinc(userName,sysPW));
                }
            }
        }

        //重新设置 机器列表
        context.setMachineList(finalList);
        //执行
        context.doTheThing();
    }


    /**
     *  Cluster_User_Name  所有的username
     *  Cluster_HIVE_TBLS  所有的hive 表
     * @param context
     */
    public static void delClusterUser(ClusterContext context){
        List<LinuxMachine> machineList = context.getOriginalList();
        CommonProperties commonProperties =context.getCommonProperties();
        //合并机器操作 这个地方有个 隐患 就是 如果 运行有先后顺序 会比较难做
        List<LinuxMachine> finalList=CommonTools.megerMachineOperation(machineList, context);
        String userName = commonProperties.getArgument(Cluster_User_Name,"");
        /**这个是为了hdfs 权限区分**/
        String group = CommonTools.getUserGroupByUser(userName, commonProperties);
        /**用户的hive 表**/
//        List<String> hiveTbls = CommonTools.getHiveTables(commonProperties.getArgument(Cluster_HIVE_TBLS,""));
        HiveService hiveService = new HiveService();
        List<String> hiveTbls = hiveService.getHiveTableByUserName(userName);

        for(LinuxMachine linuxMachine:finalList) {
            //获取机器所承担的角色
            List<LinuxMachine.MachineType> machineRols = linuxMachine.getMachineRoleTypes();
            //为机器的每个角色 添加操作
            for (LinuxMachine.MachineType machineType : machineRols) {
                if(machineType == LinuxMachine.MachineType.NameNode){
                    /**删除用户**/
                    linuxMachine.initOperation(OperationMarket.DelLinuxUser(userName));
                    /**删除用户组**/
                    linuxMachine.initOperation(OperationMarket.DelLinuxGroup(group));
                    /**删除hdfs dir**/
                    linuxMachine.initOperation(
                            OperationMarket.RMHDFSDir(CommonTools.getHDFSParentDir(userName, commonProperties))
                            ,true);
                }else if(machineType == LinuxMachine.MachineType.Client) {
                    /**删除用户组**/
                    linuxMachine.initOperation(OperationMarket.DelLinuxUser(userName));
                    /**删除hive 临时日志目录**/
                    linuxMachine.initOperation(OperationMarket.RMLinuxDir(
                            CommonTools.getClientHiveLogDir(userName, commonProperties)));
                }else if(machineType == LinuxMachine.MachineType.Hive) {
                    /**删除hive db**/
                    linuxMachine.initOperation(OperationMarket.DropHiveDB(userName,hiveTbls));
                }else if(machineType == LinuxMachine.MachineType.KDC){
                    linuxMachine.initOperation(OperationMarket.DelKDCPrinc(userName));
                }else if(machineType == LinuxMachine.MachineType.ClushShell){
                    //删除所有clush用户
                    linuxMachine.initOperation(OperationMarket.DelLinuxUserByClush(userName));
                }
            }
        }

        //重新设置 机器列表
        context.setMachineList(finalList);
        //执行
        context.doTheThing();
    }


//    /**
//     * Cluster_User_Name
//     * Cluster_HIVE_TBLS
//     */
//    public void callDropClustUser(){
//        Map<String,String> propertyMap = new HashMap<String, String>();
//
//        propertyMap.put(CommonProperties.HDFS_SUPER_USER,"hdfs");
//        propertyMap.put(CommonProperties.HDFS_CLUSH_DN_RANGE,"cdhdatanode");
//        propertyMap.put(CommonProperties.HDFS_GROUP_SUFFIX,"_group");
//        propertyMap.put(CommonProperties.HDFS_HIVE_TEMP_DIR,"/tmp/hive-");
//        propertyMap.put(CommonProperties.HDFS_PATH_PREFIX,"/user/");
//        propertyMap.put(CommonProperties.HDFS_PRIVATE_PATH_SUFFIX,"/private");
//        propertyMap.put(CommonProperties.HDFS_PUBLIC_PATH_SUFFIX,"/public");
//        propertyMap.put(CommonProperties.HDFS_TRASH_PATH_SUFFIX,"/.Trash");
//
//
//        propertyMap.put(CommonProperties.HIVE_CLIENT_LOG_DIR,"/tmp/");
//        propertyMap.put(CommonProperties.HIVE_HDFSPATH_PREFIX,"hdfs://ns3/user/");
//        propertyMap.put(CommonProperties.HIVE_SENTRY_PRINCIPAL,"principal=hive/ddp-tst-203@EXAMPLE.COM");
//        propertyMap.put(CommonProperties.HIVE_SENTRY_JDBC,"jdbc:hive2://localhost:10000/");
//        propertyMap.put(CommonProperties.HIVE_SENTRY_ROLE_TEMPLET,"role_@item@_@principle@_@path@");
//        propertyMap.put(CommonProperties.HIVE_SUPER_USER,"hive");
//
//
//
//        CommonProperties commonProperties=new CommonProperties(propertyMap);
//
//        commonProperties.setArgument(UserAPI.Cluster_User_Name, "newClusterUsreTest");
//        String tbls = "table_name";
//        commonProperties.setArgument(UserAPI.Cluster_HIVE_TBLS,tbls);
//
//
//        ClusterContext context=new ClusterContext(commonProperties);
//
//        List<LinuxMachine> machineList = new ArrayList<LinuxMachine>();
//        LinuxMachine nn1 = new LinuxMachine().initIP("10.5.24.148").initLoginName("hadoop")
//                .initPassWord("ShTb,Ts.hd").initMachineType(LinuxMachine.MachineType.NameNode)
//                .initPubKey("AAAAC3NzaC1lZDI1NTE5AAAAICN11MK5VG4SelhiwtZ9oPLz3fQG/jqRC+vhL3GzJCM3");
//        machineList.add(nn1);
//        LinuxMachine nn2 = new LinuxMachine().initIP("10.5.24.149").initLoginName("hadoop")
//                .initPassWord("ShTb,Ts.hd").initMachineType(LinuxMachine.MachineType.NameNode)
//                .initPubKey("AAAAC3NzaC1lZDI1NTE5AAAAILU3d711XCXhlkNyHPoXoVPgBrfd/Mpx0p8awLWuKUaf");
//        machineList.add(nn2);
//        LinuxMachine clt = new LinuxMachine().initIP("10.5.24.151").initLoginName("I-Hadoop")
//                .initPassWord("ideal123").initMachineType(LinuxMachine.MachineType.Client)
//                .initPubKey("01:53:d1:3e:c7:ff:7e:93:6b:c5:b2:e2:14:e8:e2:65");
//        machineList.add(clt);
//        LinuxMachine hive = new LinuxMachine().initIP("10.5.24.151").initLoginName("I-Hadoop")
//                .initPassWord("ideal123").initMachineType(LinuxMachine.MachineType.Hive)
//                .initPubKey("01:53:d1:3e:c7:ff:7e:93:6b:c5:b2:e2:14:e8:e2:65");
//        machineList.add(hive);
//        LinuxMachine kdc = new LinuxMachine().initIP("10.5.24.148").initLoginName("hadoop")
//                .initPassWord("ShTb,Ts.hd").initMachineType(LinuxMachine.MachineType.KDC)
//                .initPubKey("AAAAC3NzaC1lZDI1NTE5AAAAICN11MK5VG4SelhiwtZ9oPLz3fQG/jqRC+vhL3GzJCM3");
//        machineList.add(kdc);
//        LinuxMachine webapp = new LinuxMachine().initIP("10.5.24.151").initLoginName("I-Hadoop")
//                .initPassWord("ideal123").initMachineType(LinuxMachine.MachineType.WebAPP)
//                .initPubKey("01:53:d1:3e:c7:ff:7e:93:6b:c5:b2:e2:14:e8:e2:65");
//        machineList.add(webapp);
//        LinuxMachine clushell = new LinuxMachine().initIP("10.5.24.151").initLoginName("I-Hadoop")
//                .initPassWord("ideal123").initMachineType(LinuxMachine.MachineType.ClushShell)
//                .initPubKey("01:53:d1:3e:c7:ff:7e:93:6b:c5:b2:e2:14:e8:e2:65");
//        machineList.add(clushell);
//
//        context.setMachineList(machineList);
//
//        UserAPI.delClusterUser(context);
//
//
////        Map<String,List<LinuxResult>> mmm=context.getProcessResult();
////        for(Map.Entry<String,List<LinuxResult>> entry: mmm.entrySet()){
////            String key=entry.getKey();
////            List<LinuxResult> val = entry.getValue();
////            for(LinuxResult rs: val){
////
////            }
////        }
//    }
//
//
//
//
//    public void callAddClusterUser(){
//
//        Map<String,String> propertyMap = new HashMap<String, String>();
//
//        propertyMap.put(CommonProperties.HDFS_SUPER_USER,"hdfs");
//        propertyMap.put(CommonProperties.HDFS_CLUSH_DN_RANGE,"cdhdatanode");
//        propertyMap.put(CommonProperties.HDFS_GROUP_SUFFIX,"_group");
//        propertyMap.put(CommonProperties.HDFS_HIVE_TEMP_DIR,"/tmp/hive-");
//        propertyMap.put(CommonProperties.HDFS_PATH_PREFIX,"/user/");
//        propertyMap.put(CommonProperties.HDFS_PRIVATE_PATH_SUFFIX,"/private");
//        propertyMap.put(CommonProperties.HDFS_PUBLIC_PATH_SUFFIX,"/public");
//        propertyMap.put(CommonProperties.HDFS_TRASH_PATH_SUFFIX,"/.Trash");
//
//
//        propertyMap.put(CommonProperties.HIVE_CLIENT_LOG_DIR,"/tmp/");
//        propertyMap.put(CommonProperties.HIVE_HDFSPATH_PREFIX,"hdfs://ns3/user/");
//        propertyMap.put(CommonProperties.HIVE_SENTRY_PRINCIPAL,"principal=hive/ddp-tst-203@EXAMPLE.COM");
//        propertyMap.put(CommonProperties.HIVE_SENTRY_JDBC,"jdbc:hive2://localhost:10000/");
//        propertyMap.put(CommonProperties.HIVE_SENTRY_ROLE_TEMPLET,"role_@item@_@principle@_@path@");
//        propertyMap.put(CommonProperties.HIVE_SUPER_USER,"hive");
//
//
//
//        CommonProperties commonProperties=new CommonProperties(propertyMap);
//
//        commonProperties.setArgument(UserAPI.Cluster_User_Name, "newClusterUsreTest");
//        commonProperties.setArgument(UserAPI.Cluster_User_PW,"nihao");
//        commonProperties.setArgument(UserAPI.Cluster_User_SysPW,"qwe#@$!wd12");
//
//        ClusterContext context=new ClusterContext(commonProperties);
//
//        List<LinuxMachine> machineList = new ArrayList<LinuxMachine>();
//        LinuxMachine nn1 = new LinuxMachine().initIP("10.5.24.148").initLoginName("hadoop")
//                .initPassWord("ShTb,Ts.hd").initMachineType(LinuxMachine.MachineType.NameNode)
//                .initPubKey("AAAAC3NzaC1lZDI1NTE5AAAAICN11MK5VG4SelhiwtZ9oPLz3fQG/jqRC+vhL3GzJCM3");
//        machineList.add(nn1);
//        LinuxMachine nn2 = new LinuxMachine().initIP("10.5.24.149").initLoginName("hadoop")
//                .initPassWord("ShTb,Ts.hd").initMachineType(LinuxMachine.MachineType.NameNode)
//                .initPubKey("AAAAC3NzaC1lZDI1NTE5AAAAILU3d711XCXhlkNyHPoXoVPgBrfd/Mpx0p8awLWuKUaf");
//        machineList.add(nn2);
//        LinuxMachine clt = new LinuxMachine().initIP("10.5.24.151").initLoginName("I-Hadoop")
//                .initPassWord("ideal123").initMachineType(LinuxMachine.MachineType.Client)
//                .initPubKey("01:53:d1:3e:c7:ff:7e:93:6b:c5:b2:e2:14:e8:e2:65");
//        machineList.add(clt);
//        LinuxMachine hive = new LinuxMachine().initIP("10.5.24.151").initLoginName("I-Hadoop")
//                .initPassWord("ideal123").initMachineType(LinuxMachine.MachineType.Hive)
//                .initPubKey("01:53:d1:3e:c7:ff:7e:93:6b:c5:b2:e2:14:e8:e2:65");
//        machineList.add(hive);
//        LinuxMachine kdc = new LinuxMachine().initIP("10.5.24.148").initLoginName("hadoop")
//                .initPassWord("ShTb,Ts.hd").initMachineType(LinuxMachine.MachineType.KDC)
//                .initPubKey("AAAAC3NzaC1lZDI1NTE5AAAAICN11MK5VG4SelhiwtZ9oPLz3fQG/jqRC+vhL3GzJCM3");
//        machineList.add(kdc);
//        LinuxMachine webapp = new LinuxMachine().initIP("10.5.24.151").initLoginName("I-Hadoop")
//                .initPassWord("ideal123").initMachineType(LinuxMachine.MachineType.WebAPP)
//                .initPubKey("01:53:d1:3e:c7:ff:7e:93:6b:c5:b2:e2:14:e8:e2:65");
//        machineList.add(webapp);
//        LinuxMachine clushell = new LinuxMachine().initIP("10.5.24.151").initLoginName("I-Hadoop")
//                .initPassWord("ideal123").initMachineType(LinuxMachine.MachineType.ClushShell)
//                .initPubKey("01:53:d1:3e:c7:ff:7e:93:6b:c5:b2:e2:14:e8:e2:65");
//        machineList.add(clushell);
//
//        context.setMachineList(machineList);
//
//        UserAPI.addClusterUser(context);
//    }

    /**
     * 用户管理->重置密码
     * 这里只修改client 机器的密码
     * @param context
     */
    public static void resetUserPasswd(ClusterContext context){
        //client 所有的
        List<LinuxMachine> machineList = context.getOriginalList();
        CommonProperties commonProperties =context.getCommonProperties();
        String userName = commonProperties.getArgument(UserAPI.Cluster_User_Name,"");
        String passwd = commonProperties.getArgument(UserAPI.Cluster_User_PW,"");
        if(StringUtils.isBlank(userName)){
            return ;
        }
        //webapp 机器是脚本机器 放置执行脚本 所以是执行机器
        List<LinuxMachine> finalMachines = CommonTools.getMachineListByType(machineList,context,
                LinuxMachine.MachineType.Client);
        for(LinuxMachine linuxMachine:finalMachines) {
                    //webapp 只会有一台 这里只需要 得到第一个就可以了
                    linuxMachine.initOperation(OperationMarket.ChangeUserPassWord(
                            userName, passwd));
        }

        //重新设置 机器列表
        context.setMachineList(finalMachines);
        //执行
        context.doTheThing();
    }

    /**
     * 集群管理 -> 机器管理
     * 新加 用户同步按钮
     * @param context
     */
    public static void addClientUser(ClusterContext context){
        List<LinuxMachine> machineList = context.getOriginalList();
        CommonProperties commonProperties =context.getCommonProperties();
        String userName = commonProperties.getArgument(UserAPI.Cluster_User_Name,"");
        String passwd = commonProperties.getArgument(UserAPI.Cluster_User_PW,"");
        String machineIPs = commonProperties.getArgument(UserAPI.Cluster_User_Client,"");
        if (machineIPs==null)return;
        String[] ips = machineIPs.split(",");
        if(StringUtils.isBlank(userName)){
            return ;
        }
        //webapp 机器是脚本机器 放置执行脚本 所以是执行机器
        List<LinuxMachine> finalMachines = CommonTools.getMachineListByType(machineList,context,
                LinuxMachine.MachineType.Client);
        for(LinuxMachine linuxMachine:finalMachines) {
                if(ArrayUtils.contains(ips, linuxMachine.getSshAuthor().getHost())) {

                    //新增用户
                    linuxMachine.initOperation(OperationMarket.AddLinuxUser(userName));
                    linuxMachine.initOperation(OperationMarket.ChangeUserPassWord(userName,passwd));
            }
        }
        //重新设置 机器列表
        context.setMachineList(finalMachines);
        //执行
        context.doTheThing();
    }




}
