package cn.dansj.common.utils.transfer;

import java.util.Arrays;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;

/**
 * 分布式 Snowflake ID 生成器
 */
public final class SnowflakeIdGenerator {
    // ====================== 配置常量 ======================
    private static final long WORKER_ID_BITS = 5L;
    private static final long DATACENTER_ID_BITS = 5L;
    private static final long SEQUENCE_BITS = 12L;

    private static final long MAX_WORKER_ID = ~(-1L << WORKER_ID_BITS);
    private static final long MAX_DATACENTER_ID = ~(-1L << DATACENTER_ID_BITS);
    private static final long SEQUENCE_MASK = ~(-1L << SEQUENCE_BITS);

    private static final long WORKER_ID_SHIFT = SEQUENCE_BITS;
    private static final long DATACENTER_ID_SHIFT = SEQUENCE_BITS + WORKER_ID_BITS;
    private static final long TIMESTAMP_LEFT_SHIFT = SEQUENCE_BITS + WORKER_ID_BITS + DATACENTER_ID_BITS;

    private static final long DEFAULT_EPOCH = 1288834974657L;

    // ====================== ThreadLocal 生成器 ======================
    private static final ThreadLocal<SnowflakeIdWorker> ID_WORKER = ThreadLocal.withInitial(() -> {
        Random random = ThreadLocalRandom.current();
        return new SnowflakeIdWorker(random.nextInt((int) MAX_WORKER_ID), random.nextInt((int) MAX_DATACENTER_ID), DEFAULT_EPOCH);
    });

    public static long nextID() {
        return ID_WORKER.get().nextID();
    }

    public static long[] nextID(int count) {
        return ID_WORKER.get().nextID(count);
    }

    public static String nextUID() {
        return Base62.encode(nextID());
    }

    // ====================== 优化后的ID生成器核心 ======================
    private static class SnowflakeIdWorker {
        private final long epoch;
        private final long datacenterIdShifted;
        private final long workerIdShifted;
        // 序列号状态
        private long sequence = 0L;
        private long lastTimestamp = -1L;
        // 预分配序列号池
        private long sequencePool = 0L;
        // 时间戳缓存
        private long lastKnownMillis = 0L;
        private long millisCounter = 0L;

        public SnowflakeIdWorker(long workerId, long datacenterId, long epoch) {
            // 参数校验
            if (workerId > MAX_WORKER_ID || workerId < 0) {
                throw new IllegalArgumentException("Worker ID must be between 0 and " + MAX_WORKER_ID);
            }
            if (datacenterId > MAX_DATACENTER_ID || datacenterId < 0) {
                throw new IllegalArgumentException("Datacenter ID must be between 0 and " + MAX_DATACENTER_ID);
            }
            this.epoch = epoch;
            this.datacenterIdShifted = datacenterId << DATACENTER_ID_SHIFT;
            this.workerIdShifted = workerId << WORKER_ID_SHIFT;
        }

        public long nextID() {
            long timestamp = optimizedTimeGen();
            if (timestamp < lastTimestamp) return handleClockBackward();
            if (timestamp == lastTimestamp) {
                sequence = (sequence + 1) & SEQUENCE_MASK;
                if (sequence == 0) {
                    timestamp = waitNextMillis(timestamp);
                }
            } else {
                sequence = 0L;
            }
            lastTimestamp = timestamp;
            return ((timestamp - epoch) << TIMESTAMP_LEFT_SHIFT) | datacenterIdShifted | workerIdShifted | sequence;
        }

        public long[] nextID(int count) {
            if (count <= 0) {
                throw new IllegalArgumentException("Count must be positive");
            }

            // 小批量直接循环生成
            if (count <= 32) {
                long[] ids = new long[count];
                for (int i = 0; i < count; i++) {
                    ids[i] = nextID();
                }
                return ids;
            }

            // 大批量生成
            return bulkGenerate(count);
        }

        private long[] bulkGenerate(int count) {
            long timestamp = optimizedTimeGen();

            if (timestamp < lastTimestamp) {
                return handleClockBackwardBulk(count);
            }

            long[] ids = new long[count];
            int generated = 0;

            while (generated < count) {
                if (timestamp != lastTimestamp) {
                    // 重置序列号池
                    sequencePool = 0L;
                    lastTimestamp = timestamp;
                }

                int available = (int) (SEQUENCE_MASK - sequencePool + 1);
                int toGenerate = Math.min(count - generated, available);

                if (toGenerate <= 0) {
                    timestamp = waitNextMillis(timestamp);
                    continue;
                }

                // 生成一批ID
                for (int i = 0; i < toGenerate; i++) {
                    ids[generated++] = ((timestamp - epoch) << TIMESTAMP_LEFT_SHIFT) | datacenterIdShifted | workerIdShifted | sequencePool++;
                }

                if (sequencePool > SEQUENCE_MASK) {
                    timestamp = waitNextMillis(timestamp);
                }
            }

            return ids;
        }

        private long optimizedTimeGen() {
            final long now = System.currentTimeMillis();
            if (now == lastKnownMillis) {
                return now + (++millisCounter / (SEQUENCE_MASK + 1));
            } else {
                lastKnownMillis = now;
                millisCounter = 0;
                return now;
            }
        }

        private long waitNextMillis(long currentTimestamp) {
            long timestamp;
            do {
                Thread.yield();
                timestamp = optimizedTimeGen();
            } while (timestamp <= currentTimestamp);
            return timestamp;
        }

        private long handleClockBackward() {
            long currentTimestamp = optimizedTimeGen();
            long offset = lastTimestamp - currentTimestamp;
            if (offset < 1000) {
                try {
                    Thread.sleep(offset);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                }
                return nextID();
            }
            throw new IllegalStateException(String.format("Clock moved backwards. Last: %d, Current: %d", lastTimestamp, currentTimestamp));
        }

        private long[] handleClockBackwardBulk(int count) {
            long[] recoveredIds = new long[count];
            for (int i = 0; i < count; i++) {
                try {
                    recoveredIds[i] = nextID();
                } catch (Exception e) {
                    return Arrays.copyOf(recoveredIds, i);
                }
            }
            return recoveredIds;
        }
    }

    // ====================== Base62 编码工具 ======================
    private static class Base62 {
        private static final char[] DIGITS = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz".toCharArray();

        public static String encode(long number) {
            if (number == 0) return "0";

            char[] buf = new char[11];
            int pos = 10;
            while (number > 0) {
                buf[pos--] = DIGITS[(int) (number % 62)];
                number /= 62;
            }
            return new String(buf, pos + 1, 10 - pos);
        }
    }

    // ====================== 分布式配置支持 ======================
    public interface IdConfigProvider {
        int getWorkerId();

        int getDatacenterId();

        long getEpoch();
    }

    public static void configureDistributed(IdConfigProvider configProvider) {
        ID_WORKER.set(new SnowflakeIdWorker(configProvider.getWorkerId(), configProvider.getDatacenterId(), configProvider.getEpoch()));
    }
}