package com.itstyle.seckill.topic.zk;

import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.recipes.cache.ChildData;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.framework.recipes.leader.LeaderSelector;
import org.apache.curator.framework.recipes.leader.LeaderSelectorListener;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.curator.retry.RetryNTimes;
import org.apache.curator.utils.EnsurePath;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;

import java.util.concurrent.TimeUnit;

/**
 * @Auther: liuwenxiang
 * @Date: 2020/3/12 22:25
 * @Description: https://www.cnblogs.com/LiZhiW/p/4923693.html
 */
public class ZkDemo {
    // zk的Java客户端:zookeeper原生的、Apache Curator、开源的zkclient
    //推荐使用Apache Curator
    /**
     * zookeeper原生客户端Maven地址：
     * <dependency>
     * <groupId>org.apache.zookeeper</groupId>
     * <artifactId>zookeeper</artifactId>
     * <version>3.4.6</version>
     * </dependency>
     * Apache Curator的Maven地址：
     * <dependency>
     * <groupId>org.apache.curator</groupId>
     * <artifactId>curator-framework</artifactId>
     * <version>2.9.0</version>
     * </dependency>
     * zkclient的Maven地址：
     * <dependency>
     * <groupId>com.github.adyliu</groupId>
     * <artifactId>zkclient</artifactId>
     * <version>2.1.1</version>
     * </dependency>
     */
    public static CuratorFramework client;

    static {
        RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
        client = CuratorFrameworkFactory.newClient("127.0.0.1:2181", retryPolicy);
        client.start();
    }

    public static void testWatcher() {
        try {
            client.checkExists().forPath("/lwx/curator");
            //设置监听器
            Watcher w = new Watcher() {
                @Override
                public void process(WatchedEvent watchedEvent) {
                    System.out.println("监听到的变化 watchedEvent = " + watchedEvent);
                }
            };
            //为服务端节点添加监听 //这种监听是一次性的
            client.getData().usingWatcher(w).forPath("/lwx/curator");
            //第一次更新节点
            client.setData().forPath("/lwx/curator/lock", "data2".getBytes());
            //第二次更新节点
            client.setData().forPath("/lwx/curator/lock", "data1".getBytes());
            Thread.sleep(Integer.MAX_VALUE);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    public static void testCacheWatcher() {

        PathChildrenCache watcher = new PathChildrenCache(client, "/lwx/curator", true);
        watcher.getListenable().addListener((client1, event) -> {
            ChildData data = event.getData();
            if (data == null) {
                System.out.println("No data in event[" + event + "]");
            } else {
                System.out.println("Receive event: "
                        + "type=[" + event.getType() + "]"
                        + ", path=[" + data.getPath() + "]"
                        + ", data=[" + new String(data.getData()) + "]"
                        + ", stat=[" + data.getStat() + "]");
            }
        });
        try {
            watcher.start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE);
            System.out.println("Register zk watcher successfully!");
            client.getZookeeperClient().getZooKeeper().create("/lwx/curator/aa", "aa".getBytes(), null, CreateMode.EPHEMERAL);
            client.getZookeeperClient().getZooKeeper().create("/lwx/curator/bb", "bb".getBytes(), null, CreateMode.EPHEMERAL);

            Thread.sleep(Integer.MAX_VALUE);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    public static  void testLock(){
        new Thread(new Runnable() {
            @Override
            public void run() {
                doWith(client);
            }
        }).start();

        new Thread(new Runnable() {
            @Override
            public void run() {
                doWith(client);
            }
        }).start();
    }

    private static void doWith(CuratorFramework client){
        //https://blog.csdn.net/crazymakercircle/article/details/85956246
        InterProcessMutex lock = new InterProcessMutex(client, "/curator/test");
        try {
            //判断是否获取到锁
            if (lock.acquire(10 * 1000, TimeUnit.SECONDS)) {
                System.out.println(Thread.currentThread().getName() + " hold lock");
                Thread.sleep(5000L);
                System.out.println(Thread.currentThread().getName() + " release lock");
            }
        } catch (Exception e) {
            e.printStackTrace();
        }finally {
            try {
                lock.release();
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }




    private static void registerListener(LeaderSelectorListener listener){
        // 1.Connect to zk
        CuratorFramework client = CuratorFrameworkFactory.newClient(
                "127.0.0.1:2181",
                new RetryNTimes(10, 5000)
        );
        client.start();

        // 2.Ensure path
        try {
            new EnsurePath("/node1").ensure(client.getZookeeperClient());
        } catch (Exception e) {
            e.printStackTrace();
        }

        // 3.Register listener
        LeaderSelector selector = new LeaderSelector(client, "/node1", listener);
        selector.autoRequeue();
        selector.start();
    }





    private static void selectLeader() throws InterruptedException {
        LeaderSelectorListener listener = new LeaderSelectorListener() {
            @Override
            public void takeLeadership(CuratorFramework client) throws Exception {
                System.out.println(Thread.currentThread().getName() + " take leadership!");

                // takeLeadership() method should only return when leadership is being relinquished.
                Thread.sleep(5000L);

                System.out.println(Thread.currentThread().getName() + " relinquish leadership!");
            }

            @Override
            public void stateChanged(CuratorFramework client, ConnectionState state) {
            }
        };

        new Thread(() -> {
            registerListener(listener);
        }).start();

        new Thread(() -> {
            registerListener(listener);
        }).start();

        new Thread(() -> {
            registerListener(listener);
        }).start();

        Thread.sleep(Integer.MAX_VALUE);
    }



    public static void main(String[] args) throws InterruptedException {
        //只能监听一次操作
        testWatcher();
        //cache监听--可以监听多次操作
        //testCacheWatcher(); node path tree
        //分布式协调--分布式锁
        //testLock();
        //Leader选举
       // selectLeader();

    }

    /**
     * Zookeeper分布式锁，能有效的解决分布式问题，不可重入问题，实现起来较为简单。
     *
     * 但是，Zookeeper实现的分布式锁其实存在一个缺点，那就是性能并不太高。因为每次在创建锁和释放锁的过程中，都要动态创建、销毁瞬时节点来实现锁功能。ZK中创建和删除节点只能通过Leader服务器来执行，然后Leader服务器还需要将数据同不到所有的Follower机器上。
     *
     * 所以，在高性能，高并发的场景下，不建议使用Zk的分布式锁。
     *
     * 目前分布式锁，比较成熟、主流的方案是基于redis及基于zookeeper的二种方案。这两种锁，应用场景不同。而 zookeeper只是其中的一种。Zk的分布式锁的应用场景，主要高可靠，而不是太高并发的场景下。
     *
     * 在并发量很高，性能要求很高的场景下，推荐使用基于redis的分布式锁。
     *
     */
}
