package com.zzr.demo.redis.service.impl;

import com.zzr.demo.redis.service.ProbabilityGroupCountService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.HyperLogLogOperations;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.stereotype.Service;

import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;

/**
 * @author: xiaomi
 * @date: 2021/10/9
 * @description: 使用 HyperLogLog 实现概率性唯一计数器
 * 设计：
 * 1. 为了效率，使用 PFMERGE 来替代 PFCOUNT
 * 2. 为了支持上面的设计，把计数当成写操作，把统计当成读操作，使用 ReentrantReadWriteLock 来实现。写操作时，除了需要维护hll 的计数，还需要维护 merge 的hll.
 * 这样overhead 不划算；也就是说，这是个写多读少的场景。不适合 ReentrantReadWriteLock
 * 3. 如果真的要用上可重入锁，可以加锁在读的时候：获取 size 时进行对 merge 的维护
 */
@Service
@Slf4j
public class HLLGroupCountServiceImpl implements ProbabilityGroupCountService {

    final String HLL_GROUP_COUNT_KEY = "hll::group::count";
    final String HLL_GROUP_MERGE_KEY = "hll::group::merge";
    //0-不可用；1-可用
    volatile int mergeState = 0;

    //region 读写锁的版本- abort
//    ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
//    ReentrantReadWriteLock.ReadLock readLock = readWriteLock.readLock();
    //读时原子性操作，并不需要
//    Condition readCondition = readLock.newCondition();
//    ReentrantReadWriteLock.WriteLock writeLock = readWriteLock.writeLock();
    //endregion

    ReentrantLock lock = new ReentrantLock();

    @Autowired
    StringRedisTemplate stringRedisTemplate;
    //粗糙地保存一下 item 的集合
//    static volatile HashMap<String, Set<String>> itemMap = new HashMap<String, Set<String>>();
    static volatile Set<String> itemSet = new HashSet<>();

    static volatile String groupName;

    /**
     * 看作是写操作
     *
     * @param gn
     * @param itemName
     */
    @Override
    public void increase(String gn, String itemName) {
        itemSet.add(itemName);
        groupName = gn;
        try {
            HyperLogLogOperations<String, String> opsForHyperLogLog = stringRedisTemplate.opsForHyperLogLog();
            String sid = iptl.get();
            String key = HLL_GROUP_COUNT_KEY + "::" + groupName + "::" + itemName;
            Long add = opsForHyperLogLog.add(key, sid);
            log.info("key:{},add:{}", key, add);
        } finally {
            iptl.remove();
            //将merge 改为不可用
            mergeState = 0;
        }


        //region 读写锁的版本
        //        writeLock.lock();
//        try {
//            HyperLogLogOperations<String, String> opsForHyperLogLog = stringRedisTemplate.opsForHyperLogLog();
//            //移除 PFMERGE 的 hyperloglog
//            stringRedisTemplate.delete(HLL_GROUP_MERGE_KEY);
//
//            String sid = iptl.get();
//            String key = HLL_GROUP_COUNT_KEY + "::" + groupName + "::" + itemName;
//            Long add = opsForHyperLogLog.add(key, sid);
//            log.info("add:{}", add);
//        } finally {
//            iptl.remove();
//            writeLock.unlock();
//        }
        //endregion


    }

    @Override
    public long size(String groupName) throws InterruptedException {
        log.info("iteSet size:{}", itemSet.size());
        lock.lock();
        long size;
        try {
            HyperLogLogOperations<String, String> opsForHyperLogLog = stringRedisTemplate.opsForHyperLogLog();
            if (mergeState == 1) {
                //可以直接读
                size = opsForHyperLogLog.size(HLL_GROUP_MERGE_KEY);
                if (size == 0) {
                    String[] arr = new String[itemSet.size()];
                    int index = 0;
                    for (String item :
                            itemSet) {
                        arr[index++] = HLL_GROUP_COUNT_KEY + "::" + groupName + "::" + item;
                    }
                    //再检查一遍
                    //DEL
                    opsForHyperLogLog.delete(HLL_GROUP_MERGE_KEY);
                    log.info("执行删除：{}", HLL_GROUP_MERGE_KEY);
                    //PFMERGE
                    opsForHyperLogLog.union(HLL_GROUP_MERGE_KEY, arr);
                    //PFCOUNT
                    size = opsForHyperLogLog.size(HLL_GROUP_MERGE_KEY);
                }
                return size;
            }
            //此时merger 不可用
            String[] arr = new String[itemSet.size()];
            int index = 0;
            for (String item :
                    itemSet) {
                arr[index++] = HLL_GROUP_COUNT_KEY + "::" + groupName + "::" + item;
            }
            //再检查一遍
            //DEL
            opsForHyperLogLog.delete(HLL_GROUP_MERGE_KEY);
            log.info("执行删除：{}", HLL_GROUP_MERGE_KEY);
            //PFMERGE
            opsForHyperLogLog.union(HLL_GROUP_MERGE_KEY, arr);
            //PFCOUNT
            size = opsForHyperLogLog.size(HLL_GROUP_MERGE_KEY);
            return size;
        } finally {
            mergeState = 1;
            lock.unlock();
        }

        //region 读写锁的版本
//        readLock.lock();
//        try {
//            HyperLogLogOperations<String, String> opsForHyperLogLog = stringRedisTemplate.opsForHyperLogLog();
//            if (mergeState == 1) {
//                //可以直接从该 merge 获取数据
//                Long size = opsForHyperLogLog.size(HLL_GROUP_MERGE_KEY);
//                log.info("groupName:{},get from merge,size:{}", groupName, size);
//                return size;
//            }
//            //不可用需要重新
//            opsForHyperLogLog.union(HLL_GROUP_MERGE_KEY, )
//        } finally {
//            readLock.unlock();
//        }
        //endregion


    }

    @Override
    public long size(String groupName, String itemName) {
        itemSet.add(itemName);
        String key = HLL_GROUP_COUNT_KEY + "::" + groupName + "::" + itemName;
        HyperLogLogOperations<String, String> opsForHyperLogLog = stringRedisTemplate.opsForHyperLogLog();
        Long size = opsForHyperLogLog.size(key);
        log.info("groupName:{},itemName:{},size:{}", groupName, itemName, size);
        return size;
    }
}
