package com.zyb.dfs.namenode.server;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;

/**
 * 这个组件是负责管理集群中所有的datanode节点
 */
public class DataNodeManager {

    /**
     * 内存中datanode的集合
     */
    private Map<String, DataNodeInfo> dataNodeInfoMap = new ConcurrentHashMap<String, DataNodeInfo>();
    private FSNamesystem nameSystem;

    public DataNodeManager() {
        new DataNodeAliveMonitor().start();
    }

    public void setNameSystem(FSNamesystem nameSystem) {
        this.nameSystem = nameSystem;
    }

    /**
     * 注册datanode
     *
     * @param ip
     * @param hostname
     */
    public Boolean register(String ip, String hostname, Integer nidPort) {
        if (dataNodeInfoMap.containsKey(ip + "-" + hostname)) {
            return false;
        }
        DataNodeInfo dataNodeInfo = new DataNodeInfo(ip, hostname, nidPort);
        dataNodeInfoMap.put(ip + "-" + hostname, dataNodeInfo);
        return true;
    }

    /**
     * datanode心跳
     *
     * @param ip
     * @param hostname
     * @return
     */
    public Boolean heartbeat(String ip, String hostname) {
        DataNodeInfo dataNodeInfo = dataNodeInfoMap.get(ip + "-" + hostname);
        if (dataNodeInfo != null) {
            dataNodeInfo.setLatestHeartbeatTime(System.currentTimeMillis());
        } else {
            // 这个时候需要指示datanode重新注册以及全量上报
            return false;
        }
        return true;
    }

    /**
     * 设置一个datanode的存储大小
     *
     * @param ip
     * @param hostname
     * @param storedDataSize
     */
    public void setStoredDataSize(String ip, String hostname, Long storedDataSize) {
        DataNodeInfo dataNodeInfo = dataNodeInfoMap.get(ip + "-" + hostname);
        dataNodeInfo.setStotageDatasize(storedDataSize);
    }

    /**
     * 获取datanode信息
     *
     * @param ip
     * @param hostname
     * @return
     */
    public DataNodeInfo getDataNodeInfo(String ip, String hostname) {
        return dataNodeInfoMap.get(ip + "-" + hostname);
    }

    public DataNodeInfo getDataNodeInfo(String id) {
        return dataNodeInfoMap.get(id);
    }

    /**
     * 分配双副本对应的数据节点
     *
     * @param filesize
     * @return
     */
    public List<DataNodeInfo> allocateDataNodes(long filesize) {
        // 请出来所有的datanode，并且按照存储数据大小来排序，选择最少的两个出来
        // 默认认为要上传的文件会被放到那两个DataNode上去，此时我们就应该更新那两个DataNode存储数据的大小
        // 只有这样，后面的上传请求才能基于最新的存储情况
        synchronized (this) {
            List<DataNodeInfo> dataNodeInfoList = new ArrayList<DataNodeInfo>();
            for (DataNodeInfo dataNodeInfo : dataNodeInfoMap.values()) {
                dataNodeInfoList.add(dataNodeInfo);
            }
            Collections.sort(dataNodeInfoList);

            List<DataNodeInfo> selectedDataNodeInfoList = new ArrayList<>();
            if (dataNodeInfoList.size() >= 2) {
                selectedDataNodeInfoList.add(dataNodeInfoList.get(0));
                selectedDataNodeInfoList.add(dataNodeInfoList.get(1));
                dataNodeInfoList.get(0).addStoredDataSize(filesize);
                dataNodeInfoList.get(1).addStoredDataSize(filesize);
            }
            return selectedDataNodeInfoList;
        }
    }

    /**
     * 分配双副本对应的数据节点
     *
     * @param fileSize
     * @return
     */
    public DataNodeInfo reallocateDataNode(long fileSize, String excludedDataNodeId) {
        synchronized (this) {
            // 先得把排除掉的那个数据节点的存储的数据量减少文件的大小
            DataNodeInfo excludedDataNode = dataNodeInfoMap.get(excludedDataNodeId);
            excludedDataNode.addStoredDataSize(-fileSize);

            // 取出来所有的datanode，并且按照已经存储的数据大小来排序
            List<DataNodeInfo> datanodeList = new ArrayList<DataNodeInfo>();
            for (DataNodeInfo datanode : dataNodeInfoMap.values()) {
                if (!excludedDataNode.equals(datanode)) {
                    datanodeList.add(datanode);
                }
            }
            Collections.sort(datanodeList);

            // 选择存储数据最少的头两个datanode出来
            DataNodeInfo selectedDatanode = null;
            if (datanodeList.size() >= 1) {
                selectedDatanode = datanodeList.get(0);
                datanodeList.get(0).addStoredDataSize(fileSize);
            }

            return selectedDatanode;
        }
    }

    /**
     * 创建丢失副本的任务
     */
    public void createLostReplicasTask(DataNodeInfo deadDatanode) {
        List<String> files = nameSystem.getFilesByDatanode(deadDatanode.getHostname());
        for (String file : files) {
            String filename = file.split("_")[0];
            Long fileLength = Long.valueOf(file.split("_")[1]);
            // 负责任务的目标复制节点
            DataNodeInfo destDatanode = allocateReplicateDataNode(fileLength);
            // 获取这个复制任务的源头数据节点
            DataNodeInfo sourceDatanode = nameSystem.getReplicateSource(filename, deadDatanode);


            ReplicateTask replicateTask = new ReplicateTask(
                    filename, fileLength, sourceDatanode, destDatanode);
            // 将复制任务放到目标数据节点的任务队列里去
            destDatanode.addReplicateTask(replicateTask);

        }
    }

    /**
     * 分配用来复制副本的数据节点
     *
     * @param fileSize
     * @return
     */
    public DataNodeInfo allocateReplicateDataNode(long fileSize) {
        synchronized (this) {
            // 取出来所有的datanode，并且按照已经存储的数据大小来排序
            List<DataNodeInfo> datanodeList = new ArrayList<DataNodeInfo>();
            for (DataNodeInfo datanode : dataNodeInfoMap.values()) {
                datanodeList.add(datanode);
            }
            Collections.sort(datanodeList);

            // 选择存储数据最少的头两个datanode出来
            DataNodeInfo selectedDatanode = null;
            if (datanodeList.size() >= 1) {
                selectedDatanode = datanodeList.get(0);
                datanodeList.get(0).addStoredDataSize(fileSize);
            }

            return selectedDatanode;
        }
    }

    /**
     * 为重平衡去创建副本复制的任务
     */
    public void createRebalanceReplicateTasks() {
        synchronized(this) {
            // 计算集群节点存储数据的平均值
            long totalStoredDataSize = 0;
            for(DataNodeInfo datanode : dataNodeInfoMap.values()) {
                totalStoredDataSize += datanode.getStotageDatasize();
            }
            long averageStoredDataSize = totalStoredDataSize / dataNodeInfoMap.size();

            // 将集群中的节点区分为两类：迁出节点和迁入节点
            List<DataNodeInfo> sourceDatanodes = new ArrayList<DataNodeInfo>();
            List<DataNodeInfo> destDatanodes = new ArrayList<DataNodeInfo>();

            for(DataNodeInfo datanode : dataNodeInfoMap.values()) {
                if(datanode.getStotageDatasize() > averageStoredDataSize) {
                    sourceDatanodes.add(datanode);
                }
                if(datanode.getStotageDatasize() < averageStoredDataSize) {
                    destDatanodes.add(datanode);
                }
            }

            // 为迁入节点生成复制的任务，为迁出节点生成删除的任务
            // 在这里生成的删除任务统一放到24小时之后延迟调度执行，咱们可以实现一个延迟调度执行的线程
            List<RemoveReplicaTask> removeReplicaTasks = new ArrayList<RemoveReplicaTask>();

            for(DataNodeInfo sourceDatanode : sourceDatanodes) {
                long toRemoveDataSize = sourceDatanode.getStotageDatasize() - averageStoredDataSize;

                for(DataNodeInfo destDatanode : destDatanodes) {
                    // 直接一次性放到一台机器就可以了
                    if(destDatanode.getStotageDatasize() + toRemoveDataSize <= averageStoredDataSize) {
                        createRebalanceTasks(sourceDatanode, destDatanode,
                                removeReplicaTasks, toRemoveDataSize);
                        break;
                    }
                    // 只能把部分数据放到这台机器上去
                    else if(destDatanode.getStotageDatasize() < averageStoredDataSize) {
                        long maxRemoveDataSize = averageStoredDataSize - destDatanode.getStotageDatasize();
                        long removedDataSize = createRebalanceTasks(sourceDatanode, destDatanode,
                                removeReplicaTasks, maxRemoveDataSize);
                        toRemoveDataSize -= removedDataSize;
                    }
                }
            }

            // 交给一个延迟线程去24小时之后执行删除副本的任务
            new DelayRemoveReplicaThread(removeReplicaTasks).start();
        }
    }

    private long createRebalanceTasks(DataNodeInfo sourceDatanode, DataNodeInfo destDatanode,
                                      List<RemoveReplicaTask> removeReplicaTasks, long maxRemoveDataSize) {
        List<String> files = nameSystem.getFilesByDatanode(sourceDatanode.getIp(),
                sourceDatanode.getHostname());

        // 遍历文件，不停的为每个文件生成一个复制的任务，直到准备迁移的文件的大小
        // 超过了待迁移总数据量的大小为止
        long removedDataSize = 0;

        for(String file : files) {
            String filename = file.split("_")[0];
            long fileLength = Long.valueOf(file.split("_")[1]);

            if(removedDataSize + fileLength >= maxRemoveDataSize) {
                break;
            }

            // 为这个文件生成复制任务
            ReplicateTask replicateTask = new ReplicateTask(
                    filename, fileLength, sourceDatanode, destDatanode);
            destDatanode.addReplicateTask(replicateTask);
            destDatanode.addStoredDataSize(fileLength);

            // 为这个文件生成删除任务
            sourceDatanode.addStoredDataSize(-fileLength);
            nameSystem.removeReplicaFromDataNode(sourceDatanode.getId(), file);
            RemoveReplicaTask removeReplicaTask = new RemoveReplicaTask(
                    filename, sourceDatanode);
            removeReplicaTasks.add(removeReplicaTask);

            removedDataSize += fileLength;
        }

        return removedDataSize;
    }

    /**
     * 延迟删除副本的线程
     * @author zhonghuashishan
     *
     */
    class DelayRemoveReplicaThread extends Thread {

        private List<RemoveReplicaTask> removeReplicaTasks;

        public DelayRemoveReplicaThread(List<RemoveReplicaTask> removeReplicaTasks) {
            this.removeReplicaTasks = removeReplicaTasks;
        }

        @Override
        public void run() {
            long start = System.currentTimeMillis();

            while(true) {
                try {
                    long now = System.currentTimeMillis();

                    if(now - start > 24 * 60 * 60 * 1000) {
                        for(RemoveReplicaTask removeReplicaTask : removeReplicaTasks) {
                            removeReplicaTask.getDatanode().addRemoveReplicaTask(removeReplicaTask);
                        }
                        break;
                    }

                    Thread.sleep(60 * 1000);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }

    }

    /**
     * datanode是否存活的监控线程
     */
    class DataNodeAliveMonitor extends Thread {
        @Override
        public void run() {
            try {
                while (true) {
                    List<DataNodeInfo> toRemoveDatanodes = new ArrayList<DataNodeInfo>();

                    Iterator<DataNodeInfo> datanodesIterator = dataNodeInfoMap.values().iterator();
                    DataNodeInfo datanode = null;
                    while (datanodesIterator.hasNext()) {
                        datanode = datanodesIterator.next();
                        if (System.currentTimeMillis() - datanode.getLatestHeartbeatTime() > 1 * 60 * 1000) {
                            toRemoveDatanodes.add(datanode);
                        }
                    }

                    if (!toRemoveDatanodes.isEmpty()) {
                        for (DataNodeInfo toRemoveDatanode : toRemoveDatanodes) {
                            // 来实现DataNode宕机之后的一些处理
                            dataNodeInfoMap.remove(toRemoveDatanode.getIp() + "-"
                                    + toRemoveDatanode.getHostname());
                            // 还有一个地方是在哪儿呢？就是这个叫做，我们还维护了一块是数据节点和副本的关系
                            createLostReplicasTask(toRemoveDatanode);
                            // 删除掉这个数据结构
                            nameSystem.removeDeadDatanode(toRemoveDatanode);
                        }
                    }

                    Thread.sleep(30 * 1000);
                }
            } catch (Exception e) {

            }
        }
    }
}
