package com.talkingdata.hadoop.guard.service;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.talkingdata.hadoop.guard.bean.*;
import com.talkingdata.hadoop.guard.mapper.ApplicationMapper;
import com.talkingdata.hadoop.guard.mapper.StudentMapper;
import com.talkingdata.hadoop.guard.util.DataTime;
import com.talkingdata.hadoop.guard.util.LoadUrl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

/**
 * getHdfsData 数据包含
 * 1. cluster State   ->  getDfsClusterStatus()
 * 2. namenode state  ->  getNamenodeState()
 * 3. datanode state  ->  getDataNodeState()
 * 4. hdfs dir state  ->  getFolderStatus()
 * <p>
 * getYarnData 数据包含
 * 1. cluster State          ->  getYarnClusterStatus()
 * 2. application state      ->  getApps()           -> yarn.getApps()
 * 3. resourcemanager state  ->  getRMState()
 * 4. Scheduler state        ->  getScheduler()      -> yarn.getScheduler()
 */


@Service
public class HadoopHandlerSerivce {
    public static Logger log = Logger.getLogger(HadoopHandlerSerivce.class);
    Configuration dfsConfig = new Configuration();
    YarnConfiguration yarnConfig = new YarnConfiguration();
    long maxFinishedTime;
    String hdfsNameServices;
    String yarnNameServices;
    FileSystem myhdfs;
    String activeNamenode;
    String rcpPort;
    String TIMESTAMP = DataTime.createTime2String();
    String dfsParentUrl;
    String nameNodeStatusUrl;
    String nameNodeInfoUrl;
    String FSNamesystemUrl;
    String activeRM;
    String yarnParentUrl;
    String yarnClusterMetricsUrl;
    String yarnClusterSchedulerUrl;
    String yarnClusterAppsUrl;
    String schedulerName;
    String VERSION;

    public String getYarnNameServices() {
        return yarnNameServices;
    }

    @Autowired
    ApplicationMapper applicationMapper;

    @Autowired
    StudentMapper studentMapper;

    public void yarnInit(XmlConfig xc) throws IOException {
        String mapredFile = xc.getConfDir() + "mapred-site.xml";
        String yarnFile = xc.getConfDir() + "yarn-site.xml";
        this.yarnConfig.addResource(new Path(mapredFile));
        this.yarnConfig.addResource(new Path(yarnFile));
        VERSION = xc.getVersion();
        // 目前没有使用 yarnAPI 操作 yarn集群。所以 kerberos 认证部分暂时没用
        if (xc.getKerberos()) {
            System.setProperty("java.security.krb5.conf", xc.getConfDir() + "krb5.conf");
            System.setProperty("sun.security.krb5.debug", "true");
            UserGroupInformation.setConfiguration(this.yarnConfig);
            String keytabFile = xc.getConfDir() + xc.getKerberosKeytab();
            UserGroupInformation.loginUserFromKeytab(xc.getKerberosUser(), keytabFile);
        }
       /* YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(this.yarnConfig);
        yarnClient.start();*/
        schedulerName = xc.getScheduler();
        yarnNameServices = this.yarnConfig.get("yarn.resourcemanager.cluster-id");
    }

    /**
     * 获取 Yarn集群状态
     */
    public YarnClusterState getYarnClusterStatus() throws IOException {
        JsonNode clusterMetrics = LoadUrl.getOneMetricOnYarn(yarnClusterMetricsUrl, "clusterMetrics");
        // 单位换算
        long availableMB = clusterMetrics.get("availableMB").longValue() * ((long) Math.pow(1024, 2));
        long allocatedMB = clusterMetrics.get("allocatedMB").longValue() * ((long) Math.pow(1024, 2));
        long totalMB = clusterMetrics.get("totalMB").longValue() * ((long) Math.pow(1024, 2));
        String availableByteDesc = StringUtils.byteDesc(availableMB);
        String allocatedMByteDesc = StringUtils.byteDesc(allocatedMB);
        String totalByteDesc = StringUtils.byteDesc(totalMB);
        YarnClusterState yarnClusterState = new YarnClusterState(1,yarnNameServices,
                clusterMetrics.get("appsSubmitted").intValue(),
                clusterMetrics.get("appsCompleted").intValue(),
                clusterMetrics.get("appsFailed").intValue(),
                clusterMetrics.get("availableMB").longValue(),
                availableByteDesc,
                clusterMetrics.get("allocatedMB").longValue(),
                allocatedMByteDesc,
                clusterMetrics.get("totalMB").longValue(),
                totalByteDesc,
                clusterMetrics.get("totalVirtualCores").intValue(),
                clusterMetrics.get("totalNodes").intValue(),
                clusterMetrics.get("lostNodes").intValue(),
                clusterMetrics.get("unhealthyNodes").intValue(),
                clusterMetrics.get("decommissioningNodes").intValue(),
                clusterMetrics.get("decommissionedNodes").intValue(),
                clusterMetrics.get("activeNodes").intValue(),
                clusterMetrics.get("shutdownNodes").intValue(),
                VERSION,
                TIMESTAMP
        );
        log.info(yarnClusterState.toString());
        return yarnClusterState;
    }

    /**
     * 获取集群Yarn resourcemanager 状态
     */
    public ArrayList<RmState> getRMState() {
        ArrayList<RmState> rmList = new ArrayList<>();
        for (String rmId : this.yarnConfig.get("yarn.resourcemanager.ha.rm-ids").split(",")) {
            String httpAddressMetric = "yarn.resourcemanager.webapp.address." + rmId;
            String httpAddress = this.yarnConfig.get(httpAddressMetric);
            String hostname = this.yarnConfig.get(httpAddressMetric).split(":")[0];
            String RmUrl = "http://" + httpAddress + "/ws/v1/cluster/info";
            String serviceState;
            try {
                JsonNode clusterInfo = LoadUrl.getOneMetricOnYarn(RmUrl, "clusterInfo");
                serviceState = clusterInfo.get("haState").asText();
                if ("ACTIVE".equals(serviceState)) {
                    activeRM = hostname;
                    yarnParentUrl = "http://" + httpAddress + "/ws/v1/cluster/";
                    yarnClusterMetricsUrl = yarnParentUrl + "metrics";
                    yarnClusterSchedulerUrl = yarnParentUrl + "scheduler";
                    yarnClusterAppsUrl = yarnParentUrl + "apps";
                }
            } catch (Exception e) {
                serviceState = "none";
                log.info(yarnNameServices + "-" + rmId + "-" + hostname + " get url error" + TIMESTAMP);
            }
            RmState rmState = new RmState(1,yarnNameServices, rmId, hostname, httpAddress, serviceState, TIMESTAMP);
            log.info(rmState.toString());
            rmList.add(rmState);
        }
        log.info(yarnNameServices + "\tactiveRM : " + activeRM +
                "\nyarnParentUrl : " + yarnParentUrl +
                "\nyarnClusterMetricsUrl : " + yarnClusterMetricsUrl +
                "\nyarnClusterSchedulerUrl : " + yarnClusterSchedulerUrl +
                "\nyarnClusterAppsUrl : " + yarnClusterAppsUrl);
        return rmList;
    }

    public ArrayList<SchedulerReport> getSchedulerReport(ArrayList<SchedulerState> ssList){
        // yarn.scheduler.maximum-allocation-vcores     1台服务器提供的cpu
        // yarn.scheduler.maximum-allocation-mb         1台服务器提供的内存
        ArrayList<SchedulerReport> reportArrayList = new ArrayList<>();
        int vcores = Integer.valueOf(this.yarnConfig.get("yarn.scheduler.maximum-allocation-vcores"));
        long vmem = Integer.valueOf(this.yarnConfig.get("yarn.scheduler.maximum-allocation-mb"));

        for (SchedulerState ss : ssList) {
            long costMem = ss.getMemory() / vmem + 1;
            int costVcore = ss.getvCores() / vcores + 1;
            long cost = costMem > costVcore?costMem:costVcore;
            log.info("clusterid : " + ss.getClusterId()+
                    "\tqueueName : "+ss.getQueueName() +
                    "\tgetMemory : " + ss.getMemory() +
                    "\tvmem : " + vmem +
                    "\tcostMem : " + costMem +
                    "\tss.getvCores() : " + ss.getvCores() +
                    "\tvcores : " + vcores +
                    "\tcostVcore : " + costVcore +
                    "\tcost : " + cost +
                    "\ttimestamp : " + TIMESTAMP);
            SchedulerReport schedulerReport = new SchedulerReport(yarnNameServices, ss.getQueueName(), cost, TIMESTAMP);
            reportArrayList.add(schedulerReport);
        }
        return  reportArrayList;
    }
    /**
     *  获取队列信息
     */
    public ArrayList<SchedulerState>  getScheduler() throws IOException {
        ArrayList<SchedulerState> ssList = new ArrayList<>();
        // capacityScheduler 调度策略
        if ("capacityScheduler".equals(schedulerName)) {
            ssList = this.getCapacityScheduler();
        } else if ("fairScheduler".equals(schedulerName)) {
            // fairScheduler 调度策略
            ssList = this.getFairScheduler();
        }
        return ssList;
    }

    /**
     * 获取CapacityScheduler 队列信息
     */
    public ArrayList<SchedulerState> getCapacityScheduler() throws IOException {
        ArrayList<SchedulerState> schedulerList = new ArrayList<>();
        JsonNode scheduler = LoadUrl.getOneMetricOnYarn(yarnClusterSchedulerUrl, "scheduler").path("schedulerInfo");
        JsonNode cm = scheduler.get("capacities").get("queueCapacitiesByPartition").get(0).get("configuredMaxResource");
        long rootMem = cm.get("memory").longValue();
        String strMemory = StringUtils.byteDesc(rootMem * ((long) Math.pow(1024, 2)));
        SchedulerState schedulerRoot = new SchedulerState(yarnNameServices,
                scheduler.get("queueName").asText(),
                strMemory,
                rootMem,
                cm.get("vCores").intValue(),
                scheduler.get("maxCapacity").longValue(),
                scheduler.get("usedCapacity").doubleValue(),
                scheduler.get("usedCapacity").doubleValue(),
                schedulerName,
                TIMESTAMP
        );
        schedulerList.add(schedulerRoot);
        log.info(schedulerRoot.toString());
        JsonNode childQueue = scheduler.get("queues").get("queue");
        for (JsonNode cq : childQueue) {
            long mem = cm.get("memory").longValue();
            String childMemory = StringUtils.byteDesc(mem * ((long) Math.pow(1024, 2)));
            SchedulerState schedulerCQ = new SchedulerState(
                    yarnNameServices,
                    cq.get("queueName").asText(),
                    childMemory,
                    mem,
                    cq.get("maxEffectiveCapacity").get("vCores").intValue(),
                    cq.get("maxCapacity").longValue(),
                    cq.get("usedCapacity").doubleValue(),
                    cq.get("usedCapacity").doubleValue(),
                    schedulerName,
                    TIMESTAMP
            );
            schedulerList.add(schedulerCQ);
            log.info(schedulerCQ.toString());
        }/**/
        return schedulerList;
    }

    /**
     * 获取FairScheduler 队列信息
     */
    public ArrayList<SchedulerState> getFairScheduler() throws IOException {
        ArrayList<SchedulerState> schedulerList = new ArrayList<>();
        JsonNode scheduler = LoadUrl.getOneMetricOnYarn(yarnClusterSchedulerUrl, "scheduler").path("schedulerInfo").path("rootQueue");
        long rootMaxMem = scheduler.get("maxResources").get("memory").longValue();
        String strMemory = StringUtils.byteDesc(rootMaxMem * ((long) Math.pow(1024, 2)));
        double queuesUsedResourcesPercent = (scheduler.get("usedResources").get("memory").doubleValue() / rootMaxMem) * 100;
        SchedulerState schedulerRoot = new SchedulerState(yarnNameServices,
                scheduler.get("queueName").asText(),
                strMemory,
                rootMaxMem,
                scheduler.get("maxResources").get("vCores").intValue(),
                rootMaxMem,
                scheduler.get("usedResources").get("memory").doubleValue(),
                queuesUsedResourcesPercent,
                schedulerName,
                TIMESTAMP
        );
        log.info(schedulerRoot.toString());
        JsonNode childQueue = scheduler.get("childQueues").get("queue");
        // 获取子队列
        if (childQueue != null)
            schedulerList = getChildQueues(childQueue, schedulerList, rootMaxMem);
        for (SchedulerState ss : schedulerList) {
            System.out.println(ss.toString() + "\n");
        }
        schedulerList.add(schedulerRoot);
        return schedulerList;
    }

    /**
     * 获取子队列信息
     */
    public ArrayList<SchedulerState> getChildQueues(JsonNode childQueue, ArrayList<SchedulerState> childList, long rootMaxMem) {
        for (JsonNode cq : childQueue) {
            long mem = cq.get("maxResources").get("memory").longValue();
            String strMemory = StringUtils.byteDesc(mem * ((long) Math.pow(1024, 2)));
            float usedMem = cq.get("usedResources").get("memory").floatValue();
            // 总mem/已用内存 = 队列使用率
            double usedMemPercent = usedMem / rootMaxMem * 100;
            SchedulerState schedulerCQ = new SchedulerState(
                    yarnNameServices,
                    cq.get("queueName").asText(),
                    strMemory,
                    mem,
                    cq.get("maxResources").get("vCores").intValue(),
                    cq.get("maxResources").get("memory").longValue(),
                    cq.get("usedResources").doubleValue(),
                    usedMemPercent,
                    schedulerName,
                    TIMESTAMP
            );
            if (cq.get("childQueues") != null) {
                childList = (this.getChildQueues(cq.get("childQueues").get("queue"), childList, rootMaxMem));
            }
            childList.add(schedulerCQ);
            log.info(schedulerCQ.toString());
        }
        return childList;
    }

    /**
     * 获取 Yarn 集群 任务信息
     */
    public ArrayList<ApplicationState> getApplications() throws IOException {
        JsonNode appsList = LoadUrl.getOneMetricOnYarn(yarnClusterAppsUrl, "apps").path("app");
        ArrayList<ApplicationState> appList = new ArrayList<>();
        try {
            maxFinishedTime = applicationMapper.selectMaxFinishedTimeByClusterId(yarnNameServices);
        } catch (Exception e) {
            e.printStackTrace();
            maxFinishedTime = 1L;
            log.info(yarnNameServices + "\t第一次采集任务信息 ！");
        }
        for (JsonNode app : appsList) {
            String trackingUrl ;
            //  如果任务完成时间大于 上一次 任务中最大完成时间，说明是新任务
            //  并且任务已经完成了
            if ((app.get("finishedTime").longValue() > maxFinishedTime)&&(!("ACCEPTED".equals(app.get("state")))) && (!("RUNNING".equals(app.get("state"))))) {
                //log.info("maxFinishedTime : " + maxFinishedTime + " ------ " + "finishedTime : " + app.get("finishedTime").longValue()+"----"+app.get("id").asText());
                try {
                    trackingUrl = app.get("trackingUrl").asText();
                }catch (Exception e) {
                    trackingUrl = "";
                }
                ApplicationState applicationState = new ApplicationState(yarnNameServices,
                        app.get("id").asText(),
                        app.get("user").asText(),
                        app.get("name").asText(),
                        app.get("applicationType").asText(),
                        app.get("queue").asText(),
                        app.get("startedTime").longValue(),
                        app.get("finishedTime").longValue(),
                        app.get("state").asText(),
                        app.get("finalStatus").asText(),
                        app.get("memorySeconds").longValue(),
                        app.get("vcoreSeconds").longValue(),
                        app.get("queueUsagePercentage").intValue(),
                        trackingUrl,
                        TIMESTAMP
                );
                log.info(applicationState);
                appList.add(applicationState);
            }
        }
        return appList;
    }

    public void dfsInit(XmlConfig xc) throws IOException {
        String coreFile = xc.getConfDir() + "core-site.xml";
        String hdfsFile = xc.getConfDir() + "hdfs-site.xml";
        hdfsNameServices = xc.getServiceId();
        VERSION = xc.getVersion();
        dfsConfig.addResource(new Path(coreFile));
        dfsConfig.addResource(new Path(hdfsFile));
        dfsConfig.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        dfsConfig.set("dfs.client.failover.proxy.provider." + hdfsNameServices, "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
        if (xc.getKerberos()) {
            String krb5File = xc.getConfDir() + "krb5.conf";
            System.setProperty("java.security.krb5.conf", krb5File);
            UserGroupInformation.setConfiguration(this.dfsConfig);
            String keytabFile = xc.getConfDir() + xc.getKerberosKeytab();
            UserGroupInformation.loginUserFromKeytab(xc.getKerberosUser(),keytabFile);
        }
        myhdfs = FileSystem.get(this.dfsConfig);
    }

    public List<NameNodeState> getNamenodeState() throws IOException {
        String haMetric = "dfs.ha.namenodes." + hdfsNameServices;
        List<NameNodeState> nnList = new ArrayList<>();
        for (String nnId : dfsConfig.get(haMetric).split(",")) {
            String RpcAddressMetric = "dfs.namenode.rpc-address." + hdfsNameServices + "." + nnId;
            String httpAddress = dfsConfig.get("dfs.namenode.http-address." + hdfsNameServices + "." + nnId);
            String rpcAddress = dfsConfig.get(RpcAddressMetric);
            String address = dfsConfig.get(RpcAddressMetric).split(":")[0];
            rcpPort = httpAddress.split(":")[1];
            String nameNodeUrl = "http://" + address + ":" + rcpPort + "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus";
            String ServiceState;
            String nnstr = LoadUrl.getString(nameNodeUrl);
            ObjectMapper objectMapper = new ObjectMapper();
            JsonNode beans = objectMapper.readTree(nnstr).path("beans");
            ServiceState = beans.get(0).get("State").asText();
            if ("active".equals(ServiceState)) {
                activeNamenode = address;
            }
            NameNodeState nameNodeState = new NameNodeState(hdfsNameServices, nnId, address, ServiceState, rpcAddress, httpAddress, TIMESTAMP);
            log.info(nameNodeState.toString());
            nnList.add(nameNodeState);
        }
        dfsParentUrl = "http://" + activeNamenode + ":" + rcpPort + "/jmx?qry=";
        nameNodeStatusUrl = dfsParentUrl + "Hadoop:service=NameNode,name=NameNodeStatus";
        nameNodeInfoUrl = dfsParentUrl + "Hadoop:service=NameNode,name=NameNodeInfo";
        FSNamesystemUrl = dfsParentUrl + "Hadoop:service=NameNode,name=FSNamesystem";
        log.info(nameNodeStatusUrl);
        log.info(nameNodeInfoUrl);
        log.info(FSNamesystemUrl);
        return nnList;
    }

    /**
     * 集群是否处于安全模式状态
     */
    public String getSafemode() throws IOException {
        System.out.println("nameNodeInfoUrl : " + nameNodeInfoUrl);
        String isSafeMode = LoadUrl.getOneMetric(nameNodeInfoUrl, "Safemode");
        if (isSafeMode.equals("")) {
            isSafeMode = "false";
        } else {
            isSafeMode = "true";
        }
        return isSafeMode;
    }

    /**
     * 集群是否启动安全机制
     */
    public String getSecurity() throws IOException {
        return LoadUrl.getOneMetric(nameNodeStatusUrl, "SecurityEnabled");
    }

    /**
     * 获取集群block数量
     */
    public Long getTotalBlocks() throws IOException {
        return Long.valueOf(LoadUrl.getOneMetric(nameNodeInfoUrl, "TotalBlocks"));
    }

    /**
     * 获取集群文件数量
     */
    public Long getFilesTotal() throws IOException {
        return Long.valueOf(LoadUrl.getOneMetric(FSNamesystemUrl, "FilesTotal"));
    }

    /**
     * 获取集群 hdfs 使用率
     */
    public Double getPercentUsed() throws IOException {
        String percentUsed = LoadUrl.getOneMetric(nameNodeInfoUrl, "PercentUsed");
        return Double.valueOf(percentUsed);
    }

    /**
     * 获取丢块信息
     */
    public int getMissingBlocks() throws IOException {
        return Integer.valueOf(LoadUrl.getOneMetric(FSNamesystemUrl, "MissingBlocks"));
    }

    public Long getUnderReplicatedBlocks() throws IOException {
        return Long.valueOf(LoadUrl.getOneMetric(FSNamesystemUrl, "UnderReplicatedBlocks"));
    }

    public int getNumLiveDataNodes() throws IOException {
        return Integer.valueOf(LoadUrl.getOneMetric(FSNamesystemUrl, "NumLiveDataNodes"));
    }

    public int getNumDeadDataNodes() throws IOException {
        return Integer.valueOf(LoadUrl.getOneMetric(FSNamesystemUrl, "NumDeadDataNodes"));
    }

    public int getNumDecomLiveDataNodes() throws IOException {
        return Integer.valueOf(LoadUrl.getOneMetric(FSNamesystemUrl, "NumDecomLiveDataNodes"));
    }

    public int getNumDecomDeadDataNodes() throws IOException {
        return Integer.valueOf(LoadUrl.getOneMetric(FSNamesystemUrl, "NumDecomDeadDataNodes"));
    }

    public ArrayList<DataNodeState> getDataNodeState() throws IOException {
        ArrayList<DataNodeState> dnList = new ArrayList<>();
        String LiveNodesStr = LoadUrl.getOneMetric(nameNodeInfoUrl, "LiveNodes");
        //System.out.println(LiveNodesStr);
        ObjectMapper om = new ObjectMapper();
        HashMap<String, HashMap<String, Object>> m = om.readValue(LiveNodesStr, new TypeReference<HashMap<String, HashMap<String, Object>>>() {
        });
        for (Map.Entry<String, HashMap<String, Object>> entry : m.entrySet()) {
            String hostname = entry.getKey().split(":")[0];
            int lastContact = (int) (entry.getValue().get("lastContact"));
            Long numBlocks = Long.parseLong(entry.getValue().get("numBlocks").toString());
            Double blockPoolUsedPercent = (Double) (entry.getValue().get("blockPoolUsedPercent"));
            String adminState = (String) (entry.getValue().get("adminState"));
            Long capacity = Long.parseLong(entry.getValue().get("capacity").toString());
            int volfails = (int) (entry.getValue().get("volfails"));
            String failedStorageIDs;
            Long lastVolumeFailureDate;
            if (volfails < 1) {
                lastVolumeFailureDate = 0L;
                failedStorageIDs = null;
            } else {
                failedStorageIDs = entry.getValue().get("failedStorageIDs").toString();
                lastVolumeFailureDate = (Long) (entry.getValue().get("lastVolumeFailureDate"));
            }
            DataNodeState dataNodeState = new DataNodeState(
                    hdfsNameServices,
                    hostname,
                    lastContact,
                    numBlocks,
                    blockPoolUsedPercent,
                    adminState,
                    capacity,
                    volfails,
                    failedStorageIDs,
                    lastVolumeFailureDate,
                    TIMESTAMP
            );
            dnList.add(dataNodeState);
            log.info(dataNodeState.toString());
        }
        return dnList;
    }

    /**
     * 获取集群信息
     */
    public DfsClusterState getDfsClusterStatus() throws IOException {
        FsStatus fs = myhdfs.getStatus();
        long capacity = fs.getCapacity();
        String capacityByteDesc = StringUtils.byteDesc(capacity);
        DfsClusterState hdfsState = new DfsClusterState(1,
                hdfsNameServices,
                getSafemode(),
                getSecurity(),
                activeNamenode,
                capacity,
                capacityByteDesc,
                getTotalBlocks(),
                getFilesTotal(),
                getPercentUsed(),
                getMissingBlocks(),
                getUnderReplicatedBlocks(),
                getNumLiveDataNodes(),
                getNumDeadDataNodes(),
                getNumDecomLiveDataNodes(),
                getNumDecomDeadDataNodes(),
                VERSION,
                TIMESTAMP
        );
        log.info(hdfsState.toString());
        return hdfsState;
    }

    public ArrayList<FolderState> getFolderStatus(String hdfspath) throws IOException {
        ArrayList<FolderState> fsList = new ArrayList<>();
        Long createTime = DataTime.createTime2Long();
        String path = hdfspath;
        FileStatus[] fileStatuses = myhdfs.listStatus(new Path(path));
        for (FileStatus dir : fileStatuses) {
            if (dir.isDirectory()) {
                //String nameDir = path + "/" + dir.getPath().getName();
                String nameDir = path + dir.getPath().getName();
                ContentSummary dirContentSummary = myhdfs.getContentSummary(dir.getPath());
                long fileCount = dirContentSummary.getFileCount();
                long length = dirContentSummary.getLength();
                long spaceConsumed = dirContentSummary.getSpaceConsumed();
                FolderState folderState = new FolderState(hdfsNameServices, nameDir, fileCount, length, spaceConsumed, createTime, TIMESTAMP);
                log.info(folderState);
                fsList.add(folderState);
            }
        }
        return fsList;
    }
}
