package utils;

import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;

import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;


/**
 * Twitter_Snowflake<br>
 * SnowFlake的结构如下(每部分用-分开):<br>
 * 0 - 0000000000 0000000000 0000000000 0000000000 0 - 00000 - 00000 - 000000000000 <br>
 * 1位标识，由于long基本类型在Java中是带符号的，最高位是符号位，正数是0，负数是1，所以id一般是正数，最高位是0<br>
 * 41位时间截(毫秒级)，注意，41位时间截不是存储当前时间的时间截，而是存储时间截的差值（当前时间截 - 开始时间截)
 * 得到的值），这里的的开始时间截，一般是我们的id生成器开始使用的时间，由我们程序来指定的（如下下面程序IdWorker类的startTime属性）。41位的时间截，可以使用69年，年T = (1L << 41) / (1000L * 60 * 60 * 24 * 365) = 69<br>
 * 10位的数据机器位，可以部署在1024个节点，包括5位datacenterId和5位workerId<br>
 * 12位序列，毫秒内的计数，12位的计数顺序号支持每个节点每毫秒(同一机器，同一时间截)产生4096个ID序号<br>
 * 加起来刚好64位，为一个Long型。<br>
 * SnowFlake的优点是，整体上按照时间自增排序，并且整个分布式系统内不会产生ID碰撞(由数据中心ID和机器ID作区分)，并且效率较高，经测试，SnowFlake每秒能够产生26万ID左右。
 */
public class SnowflakeIdWorker {

//    private static final Logger logger = LoggerFactory.getLogger(SnowflakeIdWorker.class);

    private static final SnowflakeIdWorker INSTANCE;

    static {
        INSTANCE = createInstanceByRedis();
    }

    /**
     * 方法名称：根据本机IP实例化雪花算法ID生成器
     * 方法描述: 确保所有服务器为IP前三个数一致的同一网段，否则无法保证生成的workId和datacenterId唯一<br>
     *
     * @throw RuntimeException
     * <p>创建人：周志辉
     * <p>创建时间：2019/5/15 10:32
     */
//    private static SnowflakeIdWorker createInstanceByIp() {
//        long workId = 0;
//        long dataCenterId = 0;
//        try {
//            String localHostIpString = HostIpUtils.getLocalHostIpString();
//            logger.info("获取的本机IP为： " + localHostIpString);
//            String[] array = localHostIpString.split("\\.");
//            long sum = 0;
//            for (String s : array) {
//                sum += Long.parseLong(s);
//            }
//            long max = 1 << 5;
//            dataCenterId = sum / max;
//            workId = sum % max;
//            logger.info("初始化生成的机房ID:" + dataCenterId + "\t机器ID：" + workId);
//            return new SnowflakeIdWorker(workId, dataCenterId);
//        } catch (Exception e) {
//            e.printStackTrace();
//            throw new RuntimeException("雪花算法id生成器初始化失败");
//        }
//    }

    /**
     * 方法名称：借助redis分配workId实例化雪花ID生成器
     * 方法描述: <br>
     *
     * @return <p>创建人：周志辉
     * <p>创建时间：2019/5/15 18:06
     */
    private static SnowflakeIdWorker createInstanceByRedis() {
        long workId = 0;
        long dataCenterId = 0;
//        RedisConfValueDao redisValueDao = SpringContextUtil.getBean(RedisConfValueDao.class);
//        String lockValue = System.currentTimeMillis() + 30 * 1000 + "";
//        try {
//            while (!redisValueDao.lock("SnowflakeLock", lockValue)) {
//                TimeUnit.SECONDS.sleep(30);
//                lockValue = System.currentTimeMillis() + 30 * 1000 + "";
//            }
//            Set<String> keys = redisValueDao.getByPrefix(SchedulerQuartzJob.PREFIX_REDIS_SNOWFLAKE);
//            BitSet bitSet = new BitSet();
//            keys.forEach(s -> bitSet.set(Integer.parseInt(s.substring(10))));
//            long id = -1;
//            for (int i = 0; i < 1024; i++) {
//                if (!bitSet.get(i)) {
//                    id = i;
//                    break;
//                }
//            }
//            if (id == -1) {
//                throw new RuntimeException("0-1023的机器ID已经全部分配完");
//            }
//
//            redisValueDao.setKey(SchedulerQuartzJob.PREFIX_REDIS_SNOWFLAKE + id, "1", SchedulerQuartzJob.REDIS_TIMEOUT_MINUTE, TimeUnit.MINUTES);
//
//            // 设置定时任务，定时加固已分配的id
//            SchedulerQuartzJob.setId(id);
//            SchedulerFactoryBean schedulerFactoryBean = SpringContextUtil.getBean(SchedulerFactoryBean.class);
//            Scheduler scheduler = schedulerFactoryBean.getScheduler();
//
//            JobDetail jobDetail =
//                    JobBuilder.newJob(SchedulerQuartzJob.class).withIdentity("snowflakeJob", "snowflakeGroup").build();
//            // 基于表达式构建触发器
//            CronScheduleBuilder cronScheduleBuilder = CronScheduleBuilder.cronSchedule(SchedulerQuartzJob.CRON_VALUE);
//            // CronTrigger表达式触发器 继承于Trigger
//            // TriggerBuilder 用于构建触发器实例
//            CronTrigger cronTrigger = TriggerBuilder.newTrigger().withIdentity("snowflakeJob", "snowflakeGroup")
//                    .withSchedule(cronScheduleBuilder).build();
//            scheduler.scheduleJob(jobDetail, cronTrigger);
//            scheduler.start();
//
//            long max = 1 << 5;
//            dataCenterId = id / max;
//            workId = id % max;
//            logger.info("初始化生成的机房ID:" + dataCenterId + "\t机器ID：" + workId);
//        } catch (Exception e) {
//            e.printStackTrace();
//            throw new RuntimeException("雪花算法id生成器初始化失败");
//        } finally {
//            redisValueDao.unlock("SnowflakeLock", lockValue);
//        }
        return new SnowflakeIdWorker(workId, dataCenterId);
    }

    // ==============================Fields===========================================
    /**
     * 开始时间截 (2015-01-01)
     */
    //TODO 开始时间戳的选择影响生成的id长度，选取时间越靠前，则生成id越长，最长为19位，最短为9位
    private final long twepoch = 1420041600000L;

    /**
     * 机器id所占的位数
     */
    private static final long workerIdBits = 5L;

    /**
     * 数据标识id所占的位数
     */
    private static final long datacenterIdBits = 5L;

    /**
     * 支持的最大机器id，结果是31 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)
     */
    public static final long maxWorkerId = -1L ^ (-1L << workerIdBits);

    /**
     * 支持的最大数据标识id，结果是31
     */
    public static final long maxDatacenterId = -1L ^ (-1L << datacenterIdBits);

    /**
     * 序列在id中占的位数
     */
    private final long sequenceBits = 12L;

    /**
     * 机器ID向左移12位
     */
    private final long workerIdShift = sequenceBits;

    /**
     * 数据标识id向左移17位(12+5)
     */
    private final long datacenterIdShift = sequenceBits + workerIdBits;

    /**
     * 时间截向左移22位(5+5+12)
     */
    private final long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits;

    /**
     * 生成序列的掩码，这里为4095 (0b111111111111=0xfff=4095)
     */
    private final long sequenceMask = -1L ^ (-1L << sequenceBits);

    /**
     * 工作机器ID(0~31)
     */
    private long workerId;

    /**
     * 数据中心ID(0~31)
     */
    private long datacenterId;

    /**
     * 毫秒内序列(0~4095)
     */
    private long sequence = 0L;

    /**
     * 上次生成ID的时间截
     */
    private long lastTimestamp = -1L;

    /**
     * 时间戳偏移量
     */
    private long offsetOfTimestamp = 0L;

    //==============================Constructors=====================================

    /**
     * 构造函数
     *
     * @param workerId     工作ID (0~31)
     * @param datacenterId 数据中心ID (0~31)
     */
    private SnowflakeIdWorker(long workerId, long datacenterId) {
        if (workerId > maxWorkerId || workerId < 0) {
            throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId));
        }
        if (datacenterId > maxDatacenterId || datacenterId < 0) {
            throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDatacenterId));
        }
        this.workerId = workerId;
        this.datacenterId = datacenterId;
    }

    // ==============================Methods==========================================

    /**
     * 获得下一个ID (该方法是线程安全的)
     *
     * @return SnowflakeId
     */
    private synchronized long nextId() {
        long timestamp = timeGen();

        long newTimestamp = timestamp + offsetOfTimestamp;
        //如果当前时间小于上一次ID生成的时间戳，说明系统时钟回退过这个时候应当抛出异常
        //由于系统需要校正时间，所以会有一定可能性发生时钟回退，所以此处算法做了些改进，避免回退时，此处抛异常处理
        if (newTimestamp < lastTimestamp) {
//            throw new RuntimeException(
//                    String.format("Clock moved backwards.  Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
            offsetOfTimestamp = lastTimestamp - timestamp;
            newTimestamp = lastTimestamp;
            System.out.println("  时钟回拨，时间戳偏移量更新为：" + offsetOfTimestamp);
        }

        //如果是同一时间生成的，则进行毫秒内序列
        if (lastTimestamp == newTimestamp) {
            sequence = (sequence + 1) & sequenceMask;
            //毫秒内序列溢出
            if (sequence == 0) {
                //原算法毫秒内序列号溢出，则阻塞到下一毫秒，改进为，时间戳偏量加1
                //阻塞到下一个毫秒,获得新的时间戳
//                timestamp = tilNextMillis(lastTimestamp);
                offsetOfTimestamp++;
                newTimestamp++;
            }
        } else {
            //时间戳改变，毫秒内序列重置
            sequence = 0L;
        }

        //上次生成ID的时间截
        //保存的是上次用来计算ID时实际用的时间戳，就是当时时间戳加上保存的偏移量
        lastTimestamp = newTimestamp;

        //移位并通过或运算拼到一起组成64位的ID
        return ((newTimestamp - twepoch) << timestampLeftShift) //
                | (datacenterId << datacenterIdShift) //
                | (workerId << workerIdShift) //
                | sequence;
    }

    /**
     * 阻塞到下一个毫秒，直到获得新的时间戳
     *
     * @param lastTimestamp 上次生成ID的时间截
     * @return 当前时间戳
     */
    protected long tilNextMillis(long lastTimestamp) {
        long timestamp = timeGen();
        while (timestamp <= lastTimestamp) {
            timestamp = timeGen();
        }
        return timestamp;
    }

    /**
     * 返回以毫秒为单位的当前时间
     *
     * @return 当前时间(毫秒)
     */
    protected long timeGen() {
        return System.currentTimeMillis();
    }

//    public static long getNextLongId() {
//        return INSTANCE.nextId();
//    }

    public static String getNextId() {
        String result = INSTANCE.nextId() + "";
        while (result.length() < 19) {
            result = "0" + result;
        }
        return result;
    }

    static class SchedulerQuartzJob implements Job {

        private static long id;

        public static final String PREFIX_REDIS_SNOWFLAKE = "Snowflake_";

        public static final long REDIS_TIMEOUT_MINUTE = 60l;

        public static final String CRON_VALUE = "0 0/30 * * * ?";


//        RedisConfValueDao redisValueDao = SpringContextUtil.getBean(RedisConfValueDao.class);

        @Override
        public void execute(JobExecutionContext jobExecutionContext) throws JobExecutionException {
//            redisValueDao.setKey(PREFIX_REDIS_SNOWFLAKE + id, "1", REDIS_TIMEOUT_MINUTE, TimeUnit.MINUTES);
        }

        public static long getId() {
            return id;
        }


        public static void setId(long id) {
            SchedulerQuartzJob.id = id;
        }
    }

    //==============================Test=============================================

    /**
     * 测试
     */
    public static void main(String[] args) throws Exception {
        int seconds = 20;
        int maxThread = 10;
        CountDownLatch countDownLatch = new CountDownLatch(10);
        AtomicInteger counter = new AtomicInteger(0);
        Map<String, Object> ids = new ConcurrentHashMap<>();
        ExecutorService exec = Executors.newFixedThreadPool(maxThread);
        long startTimestamp = System.currentTimeMillis();
        for (int i = 0; i < maxThread; i++) {
            exec.submit(() -> {
                System.out.println(Thread.currentThread().getName() + " begin------------");
                while (System.currentTimeMillis() - startTimestamp < seconds * 1000) {
                    String id = SnowflakeIdWorker.getNextId();
                    System.out.println(Thread.currentThread().getName() + ": " + id);
                    Object value = new Object();
                    Object returnValue = ids.putIfAbsent(id, new Object());
                    System.out.println(Thread.currentThread().getName() + ": ------id");
                    assert returnValue == value;
                    System.out.println(Thread.currentThread().getName() + ": --------id");
                    counter.addAndGet(1);
                }
                System.out.println(Thread.currentThread().getName() + " count------------");
                countDownLatch.countDown();
                System.out.println(Thread.currentThread().getName() + " end------------");
            });
        }

        System.out.println("await");
        countDownLatch.await();
        //实测hashSet多线程下添加会丢失元素
        assert counter.get() == ids.size();
        System.out.println("每秒生成id数量：" + counter.get() / seconds);
    }
}
