package org.study.module;

import org.study.entity.ClusterInfoEntity;
import org.study.entity.ClusterMachineEntity;
import org.study.entity.ClusterModuleInfoEntity;
import org.study.module.common.ModuleCommon;
import org.study.module.common.ModuleConstant;
import org.study.module.common.ShellPre;
import org.study.module.hadoop.*;
import org.study.module.jdk.JdkShellBuilder;
import org.study.module.model.ModuleMachineMode;
import org.study.module.zookeeper.ZookeeperShell;
import org.study.shell.ShellBase;
import org.study.shell.ShellRun;
import org.study.utils.FileUtils;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * 组件集群安装
 */
public class InstallBuilder {
    //组件名称
    private static final String MODULE_NAME = "moduleName";
    //组件角色
    private static final String MODULE_ROLE = "moduleRole";
    //机器hostname
    private static final String HOST = "host";


    private static final String RESOURCE_MANAGER = "resourceManager";

    private static final String JOURNAL_NODE = "journalNode";
    private static final String NAME_NODE = "nameNode";
    private static final String DATA_NODE = "dataNode";

    /**
     * 本地地址
     */
    private String localPath;

    public InstallBuilder(String localPath) {
        this.localPath = localPath;
    }

    /**
     * 集群环境初始化，创建用户
     */
    public void pre(ClusterInfoEntity entity, List<ClusterMachineEntity> machineEntities) {
        List<String> commands = ShellPre.preInstallByWheel(entity.getUser(), entity.getPassword(), entity.getUploadPath(), entity.getInstallPath());
        commands.add(ShellBase.reboot());
        String path = FileUtils.write(commands, localPath, ModuleConstant.USER);
        String remotePath = ShellPre.shellPath(entity.getUploadPath());
        for (ClusterMachineEntity machineEntity : machineEntities) {
            try (ShellRun run = new ShellRun(entity.getUser(), entity.getPassword(), machineEntity.getIp())) {
                run.uploadBySftp(path, remotePath);
                run.executeCommand("sh " + remotePath + "/" + ModuleConstant.USER + ".sh");
                run.close();
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

    public void install(List<ClusterModuleInfoEntity> modules, ClusterInfoEntity cluster, List<ModuleMachineMode> moduleMachineModes) {
        Map<String, ClusterModuleInfoEntity> moduleMap = toModuleMap(modules);
        Map<String, List<ModuleMachineMode>> moduleMapList = toModuleMapList(moduleMachineModes, MODULE_NAME);
        //1. JDK
        jdkBuilder(cluster, moduleMap.get(ModuleConstant.JDK), moduleMapList.get(ModuleConstant.JDK));
        //2. ZooKeeper
        if (cluster.getHighAvailable() == 1) {
            zookeeperBuilder(cluster, moduleMap.get(ModuleConstant.ZOOKEEPER), moduleMapList.get(ModuleConstant.ZOOKEEPER));
        }
        //3. Hadoop
        hadoopBuilder(cluster, moduleMap.get(ModuleConstant.HADOOP), moduleMapList);

    }

    private Map<String, ClusterModuleInfoEntity> toModuleMap(List<ClusterModuleInfoEntity> modules) {
        Map<String, ClusterModuleInfoEntity> map = new HashMap<>();
        for (ClusterModuleInfoEntity module : modules) {
            map.put(module.getModuleName(), module);
        }
        return map;
    }

    private Map<String, List<ModuleMachineMode>> toModuleMapList(List<ModuleMachineMode> modules, String key) {
        Map<String, List<ModuleMachineMode>> map = new HashMap<>();
        String value = null;
        for (ModuleMachineMode module : modules) {
            if (MODULE_NAME.equals(key)) {
                value = module.getModuleName();
            } else if (MODULE_ROLE.equals(key)) {
                value = module.getModuleRole();
            } else if (HOST.equals(key)) {
                value = module.getHost();
            }
            List<ModuleMachineMode> list = map.getOrDefault(value, new ArrayList<>());
            list.add(module);
            map.put(value, list);
        }
        return map;
    }

    private void zookeeperBuilder(ClusterInfoEntity cluster, ClusterModuleInfoEntity entity, List<ModuleMachineMode> modes) {
        Integer id = 1;
        Map<Integer, String> hosts = new HashMap<>();
        for (ModuleMachineMode mode : modes) {
            hosts.put(id, mode.getHost());
            List<String> commands = ZookeeperShell.install(cluster, entity, id);
            String path = FileUtils.writeInstallSh(commands, localPath, ModuleConstant.ZOOKEEPER + "_" + id);
            String remotePath = ShellPre.shellPath(cluster.getUploadPath());
            id += 1;
//            try (ShellRun run = new ShellRun(cluster.getUser(), cluster.getPassword(), mode.getIp())) {
//                run.uploadBySftp(path, remotePath);
//                run.executeCommand("sh " + remotePath + "/" + ModuleConstant.ZOOKEEPER + ".sh");
//                run.close();
//            } catch (Exception e) {
//                e.printStackTrace();
//            }
        }
        List<String> commands = ZookeeperShell.config(cluster.getInstallPath(), hosts);
        commands.add(ZookeeperShell.start());
        String path = FileUtils.writeInstallSh(commands, localPath, ModuleConstant.ZOOKEEPER);
//        String remotePath = ShellPre.shellPath(cluster.getUploadPath());
//        for(ModuleMachineMode mode : modes){
//            try (ShellRun run = new ShellRun(cluster.getUser(), cluster.getPassword(), mode.getIp())) {
//                run.uploadBySftp(path, remotePath);
//                run.executeCommand("sh " + remotePath + "/" + ModuleConstant.ZOOKEEPER + ".sh");
//                run.close();
//            } catch (Exception e) {
//                e.printStackTrace();
//            }
//        }
    }


    private void jdkBuilder(ClusterInfoEntity cluster, ClusterModuleInfoEntity entity, List<ModuleMachineMode> modes) {
        JdkShellBuilder builder = new JdkShellBuilder();
        builder.install(cluster, entity).check();
        String path = FileUtils.writeInstallSh(builder.toList(), localPath, ModuleConstant.JDK);
        String remotePath = ShellPre.shellPath(cluster.getUploadPath());
//        for (ModuleMachineMode moduleMachineMode : modes) {
//            try (ShellRun run = new ShellRun(cluster.getUser(), cluster.getPassword(), moduleMachineMode.getIp())) {
//                run.uploadBySftp(path, remotePath);
//                run.executeCommand("sh " + remotePath + "/" + ModuleConstant.USER + ".sh");
//                run.close();
//            } catch (Exception e) {
//                e.printStackTrace();
//            }
//        }
    }

    private void hadoopBuilder(ClusterInfoEntity cluster, ClusterModuleInfoEntity entity, Map<String, List<ModuleMachineMode>> moduleMapList) {
        List<String> baseCommands = new ArrayList<>();
        Map<String, List<ModuleMachineMode>> listMap = toModuleMapList(moduleMapList.get(ModuleConstant.HADOOP), MODULE_ROLE);
        //高可用
        if (cluster.getHighAvailable() == 1) {
            //1. 解压到指定目录
            baseCommands.addAll(ModuleCommon.installByTar(cluster, entity));
            //2. 设置JDK运行环境
            baseCommands.add(HadoopShell.javaHome());
            //3. 设置环境变量
            HadoopShell.source(baseCommands);
            //4. 配置core_site.xml
            HadoopConfigBuilder coreSite = new HadoopConfigBuilder();
            coreSite.addConfigHead();
            coreSite.fs(cluster.getClusterName()).tmpDir(HadoopShell.getHadoopTmp(cluster.getInstallPath()));
            List<ModuleMachineMode> zs = moduleMapList.get(ModuleConstant.ZOOKEEPER);
            StringBuilder zookeeper = new StringBuilder();
            for (ModuleMachineMode z : zs) {
                zookeeper.append(z.getHost()).append(":").append("2181").append(",");
            }
            coreSite.zookeeperHosts(zookeeper.substring(0, zookeeper.length() - 1)).addConfigEnd();
            HadoopShell.createHadoopTmp(cluster.getInstallPath(),baseCommands);
            baseCommands.addAll(coreSite.toList());
            Map<String, List<ModuleMachineMode>> hostMap = toModuleMapList(moduleMapList.get(ModuleConstant.HADOOP), HOST);
            List<String> yarnConfig = getResourceManager(listMap.get(NAME_NODE));
            List<String> hdfsConfig  = getNameNodeHosts(cluster.getClusterName(),listMap.get(NAME_NODE));
            for (String host : hostMap.keySet()) {
                List<String> commands = new ArrayList<>(baseCommands);
                //获取当前host下的Hadoop组件的所有角色
                Map<String, List<ModuleMachineMode>> hostRoleMap = toModuleMapList(hostMap.get(host), MODULE_ROLE);

                //5. 配置hdfs-site.xml
                HDFSConfigBuilder hdfsSite = new HDFSConfigBuilder();
                hdfsSite.addConfigHead();
                hdfsSite.nameServices(cluster.getClusterName());
                hdfsSite.addConfigs(hdfsConfig);
                StringBuilder jn = new StringBuilder();
                for (ModuleMachineMode n : listMap.get(JOURNAL_NODE)) {
                    jn.append(n.getHost()).append(":8485").append(",");
                }
                hdfsSite.nameNodeSharedEditsDir(jn.substring(0, jn.length() - 1), cluster.getClusterName());
                if (hostRoleMap.get(JOURNAL_NODE) != null) {
                    HadoopShell.createHadoopJournal(cluster.getInstallPath(),commands);
                    hdfsSite.journalNodeEditDir(HadoopShell.getHadoopJournal(cluster.getInstallPath()));
                }

                if(hostRoleMap.get(NAME_NODE) != null){
                    HadoopShell.createHadoopHdfsName(cluster.getInstallPath(),commands);
                    hdfsSite.nameNodeDir(HadoopShell.getHadoopHdfsName(cluster.getInstallPath()));
                }
                if (hostRoleMap.get(DATA_NODE) != null){
                    HadoopShell.createHadoopHdfsData(cluster.getInstallPath(),commands);
                    hdfsSite.replicationNum("3").dataNodeDir(HadoopShell.getHadoopHdfsData(cluster.getInstallPath()));
                }
                hdfsSite.addConfigEnd();
                commands.addAll(hdfsSite.toList());
                //6. 配置mapred-site.xml
                //commands.add("cp $HADOOP_HOME/etc/hadoop/mapred-site.xml.template $HADOOP_HOME/etc/hadoop/mapred-site.xml");
                MapReduceConfigBuilder mrSite = new MapReduceConfigBuilder();
                mrSite.addConfigHead();
                mrSite.mapreduceFramework().journalNodes(jn.substring(0, jn.length() - 1));
                mrSite.resourceManagerAddress(cluster.getClusterName());
                mrSite.jobHistoryAddress(host+":10020").jobHistoryWebAddress(host+":19888");
                mrSite.mapOutputCompress("true").mapOutputCompressCodec();
                mrSite.addConfigEnd();
                commands.addAll(mrSite.toList());
                //7. 配置yarn-site.xml
                YARNConfigBuilder yarnSite = new YARNConfigBuilder();
                yarnSite.addConfigHead();
                //判断是否则resourceManager节点
                yarnSite.resourceManagerHa().resourceManagerClusterId(cluster.getClusterName());
                yarnSite.addConfigs(yarnConfig);
                if (hostRoleMap.get(RESOURCE_MANAGER) != null) {

                    yarnSite.resourceManagerZkAddress(zookeeper.substring(0, zookeeper.length() - 1));
                    yarnSite.resourceManagerRecovery();
                } else {
                    yarnSite.nodeManagerAuxServices();
                    yarnSite.nodeManagerResourceMemory("1024");
                    yarnSite.nodeManagerResourceCpu("2");
                }
                yarnSite.addConfigEnd();
                commands.addAll(yarnSite.toList());
                String path = FileUtils.writeInstallSh(commands, localPath, ModuleConstant.HADOOP+"_"+host);
            }

//            String path = FileUtils.writeInstallSh(commands, localPath, ModuleConstant.HADOOP+"_");
//        String remotePath = ShellPre.shellPath(cluster.getUploadPath());
//        for (ModuleMachineMode moduleMachineMode : listMap.get(ModuleConstant.HADOOP)) {
//            try (ShellRun run = new ShellRun(cluster.getUser(), cluster.getPassword(), moduleMachineMode.getIp())) {
//                run.uploadBySftp(path, remotePath);
//                run.executeCommand("sh " + remotePath + "/" + ModuleConstant.HADOOP + ".sh");
//                run.close();
//            } catch (Exception e) {
//                e.printStackTrace();
//            }
//        }

        }


    }

    private List<String> getResourceManager(List<ModuleMachineMode> modes) {
        StringBuilder rms = new StringBuilder();
        int index = 1;
        YARNConfigBuilder yarnSite = new YARNConfigBuilder();
        for (ModuleMachineMode mode : modes) {
            String rm = "rm"+index;
            yarnSite.haResourceMangerHostName(rm, mode.getHost());
            yarnSite.haResourceManagerResourceTrackerAddress(rm, mode.getHost() + ":8031");
            yarnSite.haResourceManagerAddress(rm, mode.getHost() + ":8032");
            yarnSite.haResourceManagerSchedulerAddress(rm, mode.getHost() + ":8030");
            yarnSite.haResourceManagerAdminAddress(rm, mode.getHost() + ":8033");
            yarnSite.haResourceManagerWebappHttpsAddress(rm, mode.getHost() + ":8090");
            yarnSite.haResourceManagerWebappAddress(rm, mode.getHost() + ":8080");
            rms.append(rm);
            index++;
        }
        yarnSite.resourceManagerHaRmIds(rms.substring(0,rms.length()-1));
        return yarnSite.toList() ;
    }

    private List<String> getNameNodeHosts(String clusterName,List<ModuleMachineMode> modes) {
        HDFSConfigBuilder hdfsSite = new HDFSConfigBuilder();
        StringBuilder rms = new StringBuilder();
        hdfsSite.automaticFailover();
        hdfsSite.failoverProProvider(clusterName);
        for(ModuleMachineMode mode : modes){
            hdfsSite.nameNodeHttpAddress(clusterName, mode.getHost(), "9870");
            hdfsSite.nameNodeRpcAddress(clusterName, mode.getHost(), "9000");
            hdfsSite.nameNodeHttpsAddress(clusterName,mode.getHost(),"50470");
            rms.append(mode.getHost()).append(",");
        }
        hdfsSite.haNameNodes(clusterName, rms.substring(0, rms.length() - 1));
        return hdfsSite.toList();
    }


    private void kerberos(){

    }
    private void ldap(){

    }
}
