package com.zyb.dfs.namenode.server;


import com.alibaba.fastjson.JSONObject;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.ReentrantReadWriteLock;

/**
 * 负责管理元数据的核心组件
 *
 * @author zhonghuashishan
 */
public class FSNamesystem {
    /**
     * 副本数量
     */
    public static final Integer REPLICA_NUM = 2;


    /**
     * 负责管理内存文件目录树的组件
     */
    private FSDirectory directory;
    /**
     * 负责管理edits log写入磁盘的组件
     */
    private FSEditlog editlog;

    /**
     * 每个文件对应的副本所在的datanode
     */
    private Map<String, List<DataNodeInfo>> replicasByFilename = new ConcurrentHashMap<String, List<DataNodeInfo>>();

    /**
     * 每个DataNode对应的所有的文件副本
     */
    private Map<String, List<String>> filesByDatanode =
            new HashMap<String, List<String>>();
    /**
     * 副本数据结构的锁
     */
    ReentrantReadWriteLock replicasLock = new ReentrantReadWriteLock();

    ReentrantReadWriteLock replicasByFilenameLock = new ReentrantReadWriteLock();

    /**
     * 最新一次checkpoint更新的txid
     */
    private long checkpointTxid = 0;

    private DataNodeManager dataNodeManager;

    public FSNamesystem(DataNodeManager dataNodeManager) {
        this.directory = new FSDirectory();
        this.editlog = new FSEditlog(this);
        this.dataNodeManager = dataNodeManager;
        recoverNamespace();
    }

    /**
     * 从元数据中删除副本和datanode之间的关系
     *
     * @param dataNodeInfo
     */
    public void removeDataNode(DataNodeInfo dataNodeInfo) {

    }

    /**
     * 创建目录
     *
     * @param path 目录路径
     * @return 是否成功
     */
    public Boolean mkdir(String path) throws Exception {
        this.directory.mkdir(path);
        this.editlog.logEdit(EditLogFactory.mkdir(path));
        return true;
    }

    /**
     * 创建文件
     *
     * @param filename 包含绝对路径
     * @return
     * @throws Exception
     */
    public Boolean create(String filename) throws Exception {
        if (!directory.create(filename)) {
            return false;
        }
        this.editlog.logEdit(EditLogFactory.create(filename));
        return true;
    }

    /**
     * 强制把内存缓冲刷入到磁盘中
     */
    public void flush() {
        this.editlog.flush();
    }

    /**
     * 获取一个FSEditlog组件
     *
     * @return
     */
    public FSEditlog getEditlog() {
        return editlog;
    }

    /**
     * @param txid
     */
    public void setCheckpointTxid(long txid) {
        this.checkpointTxid = txid;
    }

    public long getCheckpointTxid() {
        return checkpointTxid;
    }

    /**
     * 将checkpointtxid保存到磁盘上去
     */
    public void saveCheckpointTxid() {
        String path = "/Users/admin/editslog/checkpoint-txid.mate";
        RandomAccessFile file = null;
        FileOutputStream out = null;
        FileChannel disLogFileChannel = null;

        try {
            File exitsfile = new File(path);
            if (exitsfile.exists()) {
                exitsfile.delete();
            }

            file = new RandomAccessFile(path, "rw");
            out = new FileOutputStream(file.getFD());
            disLogFileChannel = out.getChannel();
            ByteBuffer databuffer = ByteBuffer.wrap(String.valueOf(checkpointTxid).getBytes());
            disLogFileChannel.write(databuffer);
            // 强制把数据刷入到磁盘
            disLogFileChannel.force(false);
        } catch (Exception e) {
            e.getStackTrace();
        } finally {
            try {
                if (out != null) {
                    out.close();
                }
                if (file != null) {
                    file.close();
                }
                if (disLogFileChannel != null) {
                    disLogFileChannel.close();
                }
            } catch (Exception e) {
                e.getStackTrace();
            }
        }
    }

    /**
     * 恢复元数据
     */
    public void recoverNamespace() {
        try {
            loadFSImage();
            loadEditLog();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    /**
     * 加载fsimage文件到内存里来进行恢复
     */
    private void loadFSImage() throws Exception {
        FileInputStream in = null;
        FileChannel channel = null;
        try {
            in = new FileInputStream("/Users/admin/backupnode/fsimage.meta");
            channel = in.getChannel();

            ByteBuffer buffer = ByteBuffer.allocate(1024 * 1024); // 这个参数是可以动态调节的
            // 每次你接受到一个fsimage文件的时候记录一下他的大小，持久化到磁盘上去
            // 每次重启就分配对应空间的大小就可以了
            int count = channel.read(buffer);

            buffer.flip();
            //TO-DO 需要将fsimageJson 反序列化成内存的INode对象
            String fsimageJson = new String(buffer.array(), 0, count);
            JSONObject fsimageJosn = JSONObject.parseObject(fsimageJson);
            directory.setINode();
        } finally {
            if (in != null) {
                in.close();
            }
            if (channel != null) {
                channel.close();
            }
        }
    }

    /**
     * 加载和回放editlog
     *
     * @throws Exception
     */
    private void loadEditLog() throws Exception {
        File dir = new File("F:\\development\\editslog\\");

        List<File> files = new ArrayList<File>();
        for (File file : dir.listFiles()) {
            files.add(file);
        }

        Iterator<File> fileIterator = files.iterator();
        while (fileIterator.hasNext()) {
            File file = fileIterator.next();
            if (!file.getName().contains("edits")) {
                fileIterator.remove();
            }
        }

        Collections.sort(files, new Comparator<File>() {

            @Override
            public int compare(File o1, File o2) {
                Integer o1StartTxid = Integer.valueOf(o1.getName().split("-")[1]);
                Integer o2StartTxid = Integer.valueOf(o2.getName().split("-")[1]);
                return o1StartTxid - o2StartTxid;
            }

        });

        if (files == null || files.size() == 0) {
            System.out.println("当前没有任何editlog文件，不进行恢复......");
            return;
        }

        for (File file : files) {
            if (file.getName().contains("edits")) {
                System.out.println("准备恢复editlog文件中的数据：" + file.getName());

                String[] splitedName = file.getName().split("-");
                long startTxid = Long.valueOf(splitedName[1]);
                long endTxid = Long.valueOf(splitedName[2].split("[.]")[0]);

                // 如果是checkpointTxid之后的那些editlog都要加载出来
                if (endTxid > checkpointTxid) {
                    String currentEditsLogFile = "/Users/admin/editslog/edits-"
                            + startTxid + "-" + endTxid + ".log";
                    List<String> editsLogs = Files.readAllLines(Paths.get(currentEditsLogFile),
                            StandardCharsets.UTF_8);

                    for (String editLogJson : editsLogs) {
                        JSONObject editLog = JSONObject.parseObject(editLogJson);
                        long txid = editLog.getLongValue("txid");

                        if (txid > checkpointTxid) {
                            System.out.println("准备回放editlog：" + editLogJson);

                            // 回放到内存里去
                            String op = editLog.getString("OP");

                            if (op.equals("MKDIR")) {
                                String path = editLog.getString("PATH");
                                try {
                                    directory.mkdir(path);
                                } catch (Exception e) {
                                    e.printStackTrace();
                                }
                            }
                        }
                    }
                }
            }
        }
    }

    /**
     * 加载
     *
     * @return
     * @throws Exception
     */
    private void loadCheckpointTxid() throws Exception {
        FileInputStream in = null;
        FileChannel channel = null;
        try {
            String path = "/Users/admin/edits/checkpoint-txid.meta";

            File file = new File(path);
            if (!file.exists()) {
                System.out.println("checkpoint txid文件不存在，不进行恢复.......");
                return;
            }

            in = new FileInputStream(path);
            channel = in.getChannel();

            ByteBuffer buffer = ByteBuffer.allocate(1024); // 这个参数是可以动态调节的
            // 每次你接受到一个fsimage文件的时候记录一下他的大小，持久化到磁盘上去
            // 每次重启就分配对应空间的大小就可以了
            int count = channel.read(buffer);

            buffer.flip();
            long checkpointTxid = Long.valueOf(new String(buffer.array(), 0, count));
            System.out.println("恢复checkpoint txid：" + checkpointTxid);

            this.checkpointTxid = checkpointTxid;
        } finally {
            if (in != null) {
                in.close();
            }
            if (channel != null) {
                channel.close();
            }
        }
    }

    /**
     * 给指定的文件增加一个成功副本
     *
     * @param filename
     * @throws Exception
     */
    public void addReceiveReplica(String hostname, String ip, String filename, long fileLength) {
        try {
            replicasLock.writeLock().lock();

            // 维护每个文件的副本所在的数据节点
            List<DataNodeInfo> replicas = replicasByFilename.get(filename);
            if (replicas == null) {
                replicas = new ArrayList<DataNodeInfo>();
                replicasByFilename.put(filename, replicas);
            }

            DataNodeInfo datanode = dataNodeManager.getDataNodeInfo(ip, hostname);
            // 检查当前副本是否超标
            if (replicas.size() == REPLICA_NUM) {
                // 减少这个节点上的存储数据量
                datanode.addStoredDataSize(-fileLength);

                // 生成副本复制任务
                RemoveReplicaTask removeReplicaTask = new RemoveReplicaTask(filename, datanode);
                datanode.addRemoveReplicaTask(removeReplicaTask);

                return;
            }

            replicas.add(datanode);

            // 维护每个数据节点拥有的文件副本
            List<String> files = filesByDatanode.get(hostname);
            if (files == null) {
                files = new ArrayList<String>();
                filesByDatanode.put(hostname, files);
            }

            files.add(filename);

            System.out.println("收到存储上报，当前的副本信息为：" + replicasByFilename + "，" + filesByDatanode);
        } finally {
            replicasLock.writeLock().unlock();
        }
    }

    /**
     * 删除数据节点的文件副本的数据结构
     */
    public void removeDeadDatanode(DataNodeInfo datanode) {
        try {
            replicasLock.writeLock().lock();

            List<String> filenames = filesByDatanode.get(datanode.getHostname());
            for (String filename : filenames) {
                List<DataNodeInfo> replicas = replicasByFilename.get(filename);
                replicas.remove(datanode);
            }

            filesByDatanode.remove(datanode.getHostname());
        } finally {
            replicasLock.writeLock().unlock();
        }
    }

    /**
     * 从数据节点删除掉一个文件副本
     * @param id
     */
    public void removeReplicaFromDataNode(String id, String file) {
        try {
            replicasLock.writeLock().lock();

            filesByDatanode.get(id).remove(file);

            Iterator<DataNodeInfo> replicasIterator =
                    replicasByFilename.get(file.split("_")[0]).iterator();
            while(replicasIterator.hasNext()) {
                DataNodeInfo replica = replicasIterator.next();
                if(replica.getId().equals(id)) {
                    replicasIterator.remove();
                }
            }
        } finally {
            replicasLock.writeLock().unlock();
        }
    }

    /**
     * 获取数据节点包含的文件
     *
     * @param hostname
     * @return
     */
    public List<String> getFilesByDatanode(String hostname) {
        try {
            replicasLock.writeLock().lock();
            return filesByDatanode.get(hostname);
        } finally {
            replicasLock.writeLock().unlock();
        }
    }

    /**
     * 获取数据节点包含的文件
     * @param hostname
     * @return
     */
    public List<String> getFilesByDatanode(String ip, String hostname) {
        try {
            replicasLock.readLock().lock();

            System.out.println("当前filesByDatanode为" + filesByDatanode + "，将要以key=" + ip + "-" +  hostname + "获取文件列表");

            return filesByDatanode.get(ip + "-" + hostname);
        } finally {
            replicasLock.readLock().unlock();
        }
    }

    /**
     * 获取复制任务的源头数据节点
     *
     * @return
     */
    public DataNodeInfo getReplicateSource(String filename, DataNodeInfo deadDatanode) {
        DataNodeInfo replicateSource = null;

        try {
            replicasLock.readLock().lock();
            List<DataNodeInfo> replicas = replicasByFilename.get(filename);
            for (DataNodeInfo replica : replicas) {
                if (!replica.equals(deadDatanode)) {
                    replicateSource = replica;
                }
            }
        } finally {
            replicasLock.readLock().unlock();
        }

        return replicateSource;
    }


    /**
     * 获取文件的某个副本所在的机器
     *
     * @param filename
     * @return
     */
    public DataNodeInfo getDataNodeForFile(String filename, String excludedDataNodeId) {
        try {
            replicasLock.readLock().lock();

            DataNodeInfo excludedDataNode = dataNodeManager.getDataNodeInfo(excludedDataNodeId);

            List<DataNodeInfo> datanodes = replicasByFilename.get(filename);
            if(datanodes.size() == 1) {
                if(datanodes.get(0).equals(excludedDataNode)) {
                    return null;
                }
            }

            int size = datanodes.size();
            Random random = new Random();

            while(true) {
                int index = random.nextInt(size);
                DataNodeInfo datanode = datanodes.get(index);
                if(!datanode.equals(excludedDataNode)) {
                    return datanode;
                }
            }
        } finally {
            replicasByFilenameLock.readLock().unlock();
        }
    }
}
