package com.sunday.leaf.old.leaf.snowflake;

import com.sunday.common.core.gson.GsonUtils;
import com.sunday.leaf.old.leaf.snowflake.constants.SnowflakeConstants;
import com.sunday.leaf.old.leaf.snowflake.exception.CheckLastTimeException;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.RetryUntilElapsed;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.Stat;
import org.springframework.util.FileCopyUtils;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

/**
 * @Description: zookeeper持有者
 * 构建根路径, 更新本地缓存附件, 持续更新当前节点下的端点信息
 * @Author: zsy
 * @Create: 2022/10/24 15:50
 */
@Slf4j
public class SnowflakeZookeeperHolder {

    /**
     * key ip:port
     * 例如: 10.192.xxx.xx:15000
     */
    private String listenAddress = null;

    /**
     * zk key ip:port_0000000000
     * 例如: /snowflake/sunday-leaf-starter/forever/10.192.xxx.xx:15000_0000000000
     */
    private String zkAddressNode = null;

    /**
     * 当前服务所持有的节点ID
     * 默认 0
     */
    @Getter
    private int workerId;

    @Getter
    private String ip;

    @Getter
    private String port;

    @Getter
    private String address;

    /**
     * 更新节点最后时间
     */
    private long lastUpdateTime;

    /**
     * zk数据源
     */
    private CuratorFramework curator;

    private SnowflakeZookeeperHolder(String ip, String port, String address) {
        this.ip = ip;
        this.port = port;
        this.listenAddress = ip + ":" + port;
        this.address = address;
    }

    public boolean init() {
        try {
            curator = createWithOptions(address, new RetryUntilElapsed(1000, 3), 10000, 6000);
            curator.start();
            // 检测根节点是否存在
            Stat stat = curator.checkExists().forPath(SnowflakeConstants.PATH_FOREVER);
            if (stat == null) {
                // 不存在根节点,机器第一次启动,创建/snowflake/sunday-leaf-starter/forever/ip:port_0000000000,并上传数据
                zkAddressNode = createNode(curator);
                updateLocalAndUploadData(curator);
                return true;
            } else {
                return curator.getChildren().forPath(SnowflakeConstants.PATH_FOREVER).stream()
                        .map(key -> key.split("_"))
                        .filter(nodeKey -> listenAddress.equals(nodeKey[0]))
                        .findAny()
                        .map(nodeKey -> {
                            // 有自己的节点,zkAddressNode=ip:port
                            zkAddressNode = SnowflakeConstants.PATH_FOREVER + "/" + nodeKey[0] + "_" + nodeKey[1];
                            checkInitTimeStamp(curator, zkAddressNode);
                            workerId = Integer.parseInt(nodeKey[1]);
                            updateLocalAndUploadData(curator);
                            log.info("[Old NODE]find forever node have this endpoint ip-{} port-{} workerId-{} childNode and start SUCCESS", ip, port, workerId);
                            return true;
                        })
                        .orElseGet(() -> {
                            // 表示新启动的节点,创建持久节点 ,不用check时间
                            zkAddressNode = createNode(curator);
                            workerId = Integer.parseInt(zkAddressNode.split("_")[1]);
                            updateLocalAndUploadData(curator);
                            log.info("[New NODE]can not find node on forever node that endpoint ip-{} port-{} workerId-{},create own node on forever node and start SUCCESS ", ip, port, workerId);
                            return true;
                        });
            }
        } catch (Exception e) {
            /**
             * 这个里面必须明确说明一下
             * 1. 当zk无法连接的时候, 通过本地获取节点ID。
             * 这种情况在实体机上是可行的, 但是在容器化的时候, 通过默认临时目录可能随着重启就消失了, 因此每次都是和zk中进行比对重新创建.
             * 2. 新增补偿策略, 新增 updateLocalAndUploadData 方法, 统一将执行顺序，将更新本地文件->上传数据到zk的顺序进行，当异常时，
             * 如果本地存在节点ID信息，则反推出zk节点名称，持续更新。
             */
            log.error("start node error {}", e.toString(), e);
            try {
                Properties properties = new Properties();
                properties.load(new FileInputStream(new File(SnowflakeConstants.LOCAL_PATH)));
                workerId = Integer.valueOf(properties.getProperty("workerId"));
                log.info("start failed ,use local node file properties workerId-{}", workerId);
                zkAddressNode = SnowflakeConstants.PATH_FOREVER + "/" + listenAddress + "_" + String.format("%010d", workerId);
                scheduledUploadData(curator, zkAddressNode);
            } catch (Exception ex) {
                log.error("read file error ", ex.toString());
                return false;
            }
            return true;
        }
    }

    /**
     * @Description: 更新本地文件并上传数据到zk
     * @Param: [curator]
     * @Return: void
     * @Author: zsy
     * @Time: 2022/10/25 10:28
     */
    private void updateLocalAndUploadData(CuratorFramework curator) {
        // 本地缓存文件更新节点信息
        updateLocalWorkerId(workerId);
        // 定时上报本机时间给forever节点
        scheduledUploadData(curator, zkAddressNode);
    }

    /**
     * @Description: 创建任务调度，定时更新zk中的信息
     * 主要更新的是 {"ip":"10.192.xxx.xx","port":"15000","timestamp":1666604743861} 中的 timestamp, 更新最后的时间戳
     * @Param: [curator, zkAddressNode]
     * @Return: void
     * @Author: zsy
     * @Time: 2022/10/24 18:06
     */
    private void scheduledUploadData(final CuratorFramework curator, final String zkAddressNode) {
        // 创建一个单线程执行器，可以调度命令, 在给定的延迟后运行，或定期执行
        Executors.newSingleThreadScheduledExecutor(r -> {
            Thread thread = new Thread(r, "schedule-upload-time");
            thread.setDaemon(true);
            return thread;
        }).scheduleWithFixedDelay(() -> updateNewData(curator, zkAddressNode), 1L, 3L, TimeUnit.SECONDS); //每3s上报数据
    }

    /**
     * @Description: 验证节点信息是否符合要求
     * 该节点的时间不能小于最后一次上报的时间
     * @Param: [curator, zkAddressNode]
     * @Return: void
     * @Author: zsy
     * @Time: 2022/10/24 22:59
     */
    private void checkInitTimeStamp(CuratorFramework curator, String zkAddressNode) {
        byte[] bytes;
        try {
            bytes = curator.getData().forPath(zkAddressNode);
        } catch (Exception e) {
            log.error("the get node has this endpoint exception {} ", e.toString());
            throw new RuntimeException(e);
        }
        Endpoint endPoint = deBuildData(new String(bytes));
        // 该节点的时间不能小于最后一次上报的时间
        if (endPoint.getTimestamp() > System.currentTimeMillis()) {
            throw new CheckLastTimeException("init timestamp check error,forever node timestamp gt this node time");
        }
    }

    /**
     * @Description: 创建持久顺序节点 ,并把节点数据存入
     * @Param: [curator]
     * @Return: java.lang.String
     * @Author: zsy
     * @Time: 2022/10/24 16:15
     */
    private String createNode(CuratorFramework curator) {
        try {
            return curator.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT_SEQUENTIAL).forPath(SnowflakeConstants.PATH_FOREVER + "/" + listenAddress + "_", buildData().getBytes());
        } catch (Exception e) {
            log.error("create node error msg {} ", e.toString());
            throw new RuntimeException(e);
        }
    }

    /**
     * @Description: 更新节点信息, 主要更新 timestamp 时间戳
     * @Param: [curator, path]
     * @Return: void
     * @Author: zsy
     * @Time: 2022/10/24 18:03
     */
    private void updateNewData(CuratorFramework curator, String path) {
        try {
            // 如果本机当前时间小于最后更新节点的时间，则什么都不做，否则更新zk中的最后时间
            if (System.currentTimeMillis() < lastUpdateTime) {
                return;
            }
            curator.setData().forPath(path, buildData().getBytes());
            lastUpdateTime = System.currentTimeMillis();
        } catch (Exception e) {
            log.error("update init data error path is {} error is {}", path, e.toString(), e);
        }
    }

    private String buildData() {
        return GsonUtils.SERIALIZE_NULL.toJson(new Endpoint(ip, port, System.currentTimeMillis()));
    }

    private Endpoint deBuildData(String json) {
        return GsonUtils.DEFAULT.fromJson(json, Endpoint.class);
    }

    /**
     * @Description: 本地缓存文件更新节点信息
     * 在节点系统上更新当前服务workerId值, 当zk服务无法提供服务时，读取本地文件启动服务
     * @Param: [workerId]
     * @Return: void
     * @Author: zsy
     * @Time: 2022/10/24 17:18
     */
    private void updateLocalWorkerId(int workerId) {
        File leafConfFile = new File(SnowflakeConstants.LOCAL_PATH);
        // 检测节点本地文件是否存在
        boolean exists = leafConfFile.exists();
        log.info("file exists status is {}", exists);
        // 附件存在
        if (exists) {
            try {
                // 设置节点ID
                FileCopyUtils.copy(("workerId=" + workerId).getBytes(), leafConfFile);
                log.info("update file cache workerId is {}", workerId);
            } catch (IOException e) {
                log.error("update file cache error ", e.toString());
            }
        } else {
            // 不存在文件,父目录页肯定不存在
            try {
                // 创建父级目录
                boolean mkDirs = leafConfFile.getParentFile().mkdirs();
                log.info("init local file cache create parent dis status is {}, worker id is {}", mkDirs, workerId);
                if (mkDirs) {
                    // 创建文件
                    if (leafConfFile.createNewFile()) {
                        // 设置节点ID
                        FileCopyUtils.copy(("workerId=" + workerId).getBytes(), leafConfFile);
                        log.info("local file cache workerId is {}", workerId);
                    }
                } else {
                    log.warn("create parent dir error");
                }
            } catch (IOException e) {
                log.warn("create workerId conf file error", e);
            }
        }
    }

    private CuratorFramework createWithOptions(String address, RetryPolicy retryPolicy, int connectionTimeoutMS, int sessionTimeoutMS) {
        return CuratorFrameworkFactory.builder().connectString(address)
                .retryPolicy(retryPolicy)
                .connectionTimeoutMs(connectionTimeoutMS)
                .sessionTimeoutMs(sessionTimeoutMS)
                .build();
    }

    public static SnowflakeZookeeperHolder of(String ip, String port, String address) {
        return new SnowflakeZookeeperHolder(ip, port, address);
    }

    /**
     * @Description: 端点信息
     * @Author: zsy
     * @Create: 2022/10/24 18:18
     */
    @Data
    @AllArgsConstructor
    @NoArgsConstructor
    public static class Endpoint {

        private String ip;

        private String port;

        private long timestamp;

    }

}
