package com.xctech.yace.util;

import lombok.extern.slf4j.Slf4j;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.zookeeper.CreateMode;

import java.util.*;


/**
 * kafka offset 工具类
 *
 */
@Slf4j
public class TradeOffsetUtil {

    public static void main(String[] args) {
        String zkConnStr = "192.168.0.171:2181";
        int sleepTime = 1000;
        int retryTimes = 3;
        CuratorFramework client =
                CuratorFrameworkFactory.newClient(
                        zkConnStr,
                        new ExponentialBackoffRetry(
                                sleepTime,
                                retryTimes
                        ));
        client.start();
        try {
//            setData(client,"/xc/offset/test/lix","123");
//            client.setData().forPath(path, value.getBytes());
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    /** 服务重启标志父路径 */
    private static final String PATH = "/xctech/restart/services/";

    // 找到TFLOWInfo偏移量对应数据的超时时间(120s)
    private static final int FIND_OFFSET_TIMEOUT = 120;

    // 交易日间实时偏移量维护
    private static final String REGEX_OFFSET_MSG =
            "^[0-9]+?_[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}";

    // 时间戳格式
    private static final String REGEX_DATETIME_MSG = "[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}";

    public static CuratorFramework getZkClient(ParameterTool parameterTool) {
        CuratorFramework client = CuratorFrameworkFactory.newClient(parameterTool.get("zookeeper.bootstrap.servers"),
                new ExponentialBackoffRetry(parameterTool.getInt("zookeeper.baseSleepTime", 1000) ,
                        parameterTool.getInt("zookeeper.maxRetries", 3)));
        return client;
    }

    /**
     * 取得topics对应的最新offset
     *
     * @param consumer 消费者
     * @param topics   topic列表
     * @return topic每个分区最新的offset
     */
    public static Map<String, Map<TopicPartition, Long>> getLatestOffset(KafkaConsumer<Byte[], Byte[]> consumer, List<String> topics) {
        HashMap<String, Map<TopicPartition, Long>> res = new HashMap<>(topics.size());
        for (String topic : topics) {
            List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
            ArrayList<TopicPartition> topicPartitions = new ArrayList<>();
            for (PartitionInfo partitionInfo : partitionInfos) {
                topicPartitions.add(new TopicPartition(topic, partitionInfo.partition()));
            }

            Map<TopicPartition, Long> offsets = consumer.endOffsets(topicPartitions);
            res.put(topic, offsets);
        }
        return res;
    }


    /**
     * 保存最新的offset
     *
     * @param client  zookeeper client
     * @param topicOffsets offset
     * @param date         日期(yyyyMMdd)
     * @param offsetRootPath      offset存储的父路径(例如: /xxx/xx)
     * @param restartFlagRootPath 重启标志的父路径
     * @param servicesName      服务名称(例如: clearing)
     * @param overWrite    是否覆盖
     */
    public static void saveOffset(CuratorFramework client,
                                        Map<String, Map<TopicPartition, Long>> topicOffsets,
                                        String date,
                                        String offsetRootPath,
                                        String restartFlagRootPath,
                                        String servicesName,
                                        boolean overWrite) throws Exception {
        if(!overWrite) {
            log.info("overwrite设置为false，不更新offset和重启标志");
        };
        for (Map.Entry<String, Map<TopicPartition, Long>> entry : topicOffsets.entrySet()) {
            String topic = entry.getKey();
            Map<TopicPartition, Long> partitionOffset = entry.getValue();
            String chPath = offsetRootPath + "/"  + servicesName +"/" + topic;
            String value = getValue(date, partitionOffset);
            setData(client, chPath, value);
        }

        //写出重启标识
        String servicesPath = restartFlagRootPath + servicesName;
        String value = System.currentTimeMillis() + "";
        setData(client, servicesPath, value);
    }

    public static void setData(CuratorFramework client, String path, String value) throws Exception {
        try {
            if(client.checkExists().forPath(path) == null) {
                client.create()
                        .creatingParentsIfNeeded()
                        .withMode(CreateMode.PERSISTENT)
                        .forPath(path, value.getBytes());
            }else{
                client.setData().forPath(path, value.getBytes());
            }
        } catch (Exception e) {
            throw e;
        }
    }

    // 从zk指定节点获取数据
    public static String getData(CuratorFramework client, String path) throws Exception {
        try {
            if (client.checkExists().forPath(path) == null) {
                return null;
            } else {
                return new String(client.getData().forPath(path));
            }
        } catch (Exception e) {
            throw e;
        }
    }

    // 格式化下发给清算的，日间交易接口的首条offset
    private static String getValue(String date, Map<TopicPartition, Long> partitionOffset) {
        StringBuffer sb = new StringBuffer();
        sb.append(date).append(",");
        for (Map.Entry<TopicPartition, Long> offsets : partitionOffset.entrySet()) {
            int partition = offsets.getKey().partition();
            Long offset = offsets.getValue();
            sb.append(partition).append("-").append(offset).append(",");
        }
        return sb.subSequence(0, sb.length() - 1).toString();
    }


    /**
     * 根据topic和日期取得最新的offset
     * @param client  zookeeper client
     * @param topics    topic列表
     * @param path      offset存储的父路径(例如:/xxx/xx)
     * @return  topic 对应的offset集合
     */
    public static Tuple2<String, HashMap<KafkaTopicPartition, Long>> getOffset(
            CuratorFramework client,
            List<String> topics,
            String path) {
        HashMap<KafkaTopicPartition, Long> res = new HashMap<>();
        String date = null;
        for (String topic : topics) {
            String chPath = path+"/"+topic;
            try {
                String value = new String(client.getData().forPath(chPath));
                String[] split = value.split(",");
                checkDate(date, chPath, split[0]);
                date = split[0];
                for (int i = 1; i < split.length; i++) {
                    String[] ps = split[i].split("-");
                    res.put(new KafkaTopicPartition(topic, Integer.parseInt(ps[0])), Long.parseLong(ps[1]));
                }
            } catch (Exception e) {
                log.error("从zookeeper获取offset失败!path:{}, message:{}", chPath, e.getMessage());
            }
        }
        return Tuple2.of(date, res);
    }

    private static void checkDate(String date, String chPath, String date2) {
        if(date != null && !date.equals(date2)) log.error("取到offset日期不一致! topic:{}, date:{}, 当前topic日期:{}", chPath, date, date2);
    }

}
