package com.cx.async;

import com.cx.exception.CacheConfigException;
import com.cx.property.LettuceProperties;
import com.cx.support.AsyncCallback;
import com.cx.support.LettuceConnectionManager;
import com.cx.support.RedisLettuceCacheConfig;
import com.cx.utils.*;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import io.lettuce.core.AbstractRedisClient;
import io.lettuce.core.KeyValue;
import io.lettuce.core.RedisFuture;
import io.lettuce.core.SetArgs;
import io.lettuce.core.api.StatefulRedisConnection;
import io.lettuce.core.api.async.*;
import io.lettuce.core.support.AsyncPool;
import lombok.extern.slf4j.Slf4j;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.LongAdder;

/**
 * 异步多线程操作Redis，并使用退避重试5次
 * @author 舒建辉 Created on 2019-04-18 15:42.
 */
@Slf4j
public class AsyncRedis {

    private final AbstractRedisClient client;
    private final LettuceConnectionManager lettuceConnectionManager;
    private final RedisLettuceCacheConfig<String, Object> config;
    private final LettuceProperties lettuceProperties;
    private final AsyncPool<StatefulRedisConnection<String, Object>> asyncPool;

    public AsyncRedis(RedisLettuceCacheConfig<String, Object> config) {
        this.config = config;
        if (config.getRedisClient() == null) {
            throw new CacheConfigException("RedisClient is required");
        }

        this.client = config.getRedisClient();
        this.lettuceConnectionManager = config.getManager();
        this.lettuceConnectionManager.init(client, config.getConnection(), config.getAsyncPool());
        this.lettuceProperties = SerializeAdapter.lettuceProperties();
        this.asyncPool = lettuceConnectionManager.asyncPool(client);
    }

    private void setTimeout(CacheResult cr) {
        Duration d = Duration.ofMillis(config.getAsyncResultTimeoutInMillis());
        cr.setTimeout(d);
    }

    protected void logError(String oper, String key, Throwable e) {
        StringBuilder sb = new StringBuilder(64);
        sb.append(oper).append(" error. key=")
                .append(key);
        if (needLogStackTrace(e)) {
            log.error(sb.toString(), e);
        } else {
            sb.append(' ');
            while (e != null) {
                sb.append(e.getClass().getName());
                sb.append(':');
                sb.append(e.getMessage());
                e = e.getCause();
                if (e != null) {
                    sb.append("\ncause by ");
                }
            }
            log.error(sb.toString());
        }
    }

    protected boolean needLogStackTrace(Throwable e) {
//        if (e instanceof CacheEncodeException) {
//            return true;
//        }
//        return false;
        return true;
    }

    /*********************************** Key ***********************************/

    /**
     * Key单个异步执行keys
     *
     */
    public List<String> keys(final String key) {
        Assert.notNull(key, "The given key must not be null!");

        CacheResult cacheResult = null;
        for (int i = 0; i <= CacheConsts.RETRY_TIMES; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisKeyAsyncCommands<String, Object> asyncCommands = (RedisKeyAsyncCommands<String, Object>)conn.async();
                return asyncCommands.keys(key).handle((rts, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("KEYS", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (null == rts) {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        } else if (rts.size() > 0) {
                            return new ResultData(CacheResultCode.SUCCESS, null, rts);
                        } else if (rts.size() == 0) {
                            return new ResultData(CacheResultCode.NOT_EXISTS, null, null);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("KEYS %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }

        return null == cacheResult.getData() ? null : (List<String>)cacheResult.getData();
    }

    /**
     * Key单个异步执行exists
     *
     */
    public Boolean exists(final String key) {
        Assert.notNull(key, "The given key must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisKeyAsyncCommands<String, Object> asyncCommands = (RedisKeyAsyncCommands<String, Object>)conn.async();
                return asyncCommands.exists(key).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("EXISTS", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (null == rt) {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        } else if (rt > 0) {
                            return new ResultData(CacheResultCode.SUCCESS, null, true);
                        } else if (rt == 0) {
                            return new ResultData(CacheResultCode.NOT_EXISTS, null, false);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("EXISTS key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }

        return null == cacheResult.getData() ? null : (boolean)cacheResult.getData();
    }

    /**
     * Key单个异步执行del
     *
     */
    public void del(final String... keys) {
        Assert.notNull(keys, "The given keys must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {
            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisKeyAsyncCommands<String, Object> asyncCommands = (RedisKeyAsyncCommands<String, Object>)conn.async();
                return asyncCommands.del(keys).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("DEL", "keys(" + keys.length + ")", ex));
                        return new ResultData(ex);
                    } else {
                        if (rt == null) {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        } else if (rt == 0) {
                            return new ResultData(CacheResultCode.NOT_EXISTS, null, null);
                        } else if (rt < keys.length) {
                            return new ResultData(CacheResultCode.PART_SUCCESS, null, null);
                        } else if (rt == keys.length) {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("DEL keys(%s) 已重试第%s次", keys.length, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /*********************************** String ***********************************/

    /**
     * String单个异步执行set
     *
     */
    public void set(final String key, final Object val, final Long expireTime) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notNull(val, "The given val must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                final RedisFuture<String> future;
                if(null == expireTime){
                    future = asyncCommands.set(key, val);
                }else {
                    future = asyncCommands.set(key, val, SetArgs.Builder.ex(expireTime));
                }

                return future.handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("SET", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (CacheConsts.OK.equals(rt)) {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, rt, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("SET key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * String单个异步执行setlist
     *
     */
    public <T> void setlist(final String key, final List<T> list, final Long expireTime) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notEmpty(list, "The given list must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {

                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                final RedisFuture<String> future;
                if(null == expireTime){
                    future = asyncCommands.set(key, list);
                }else {
                    future = asyncCommands.set(key, list, SetArgs.Builder.ex(expireTime));
                }

                return future.handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("SETLIST", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (CacheConsts.OK.equals(rt)) {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, rt, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("SETLIST key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * String单个异步执行setex
     *
     */
    public void setex(final String key, final Object obj, final long expireTime) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notNull(obj, "The given obj must not be null!");
        Assert.notNull(expireTime, "The given expireTime must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                return asyncCommands.setex(key, expireTime, obj).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("SETEX", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (CacheConsts.OK.equals(rt)) {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, rt, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("SETEX key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * String单个异步执行setexlist
     *
     */
    public <T> void setexlist(final String key, final List<T> list, final long expireTime) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notEmpty(list, "The given list must not be null!");
        Assert.notNull(expireTime, "The given expireTime must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                return asyncCommands.setex(key, expireTime, list).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("SETEXLIST", "list(" + list.size() + ")", ex));
                        return new ResultData(ex);
                    } else {
                        if (CacheConsts.OK.equals(rt)) {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, rt, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("SETEXLIST key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * String单个异步执行setnx
     *
     */
    public void setnx(final String key, final Object obj) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notNull(obj, "The given obj must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                return asyncCommands.setnx(key, obj).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("SETNX", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (null == rt || rt) {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        } else {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("SETNX key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * String单个异步执行setnxlist
     *
     */
    public <T> void setnxlist(final String key, final List<T> list) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notEmpty(list, "The given list must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                return asyncCommands.setnx(key, list).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("SETNXLIST", "list(" + list.size() + ")", ex));
                        return new ResultData(ex);
                    } else {
                        if (null == rt || rt) {
                            return new ResultData(CacheResultCode.FAIL, null, false);
                        } else {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("SETNXLIST key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * String单个异步执行get
     *
     */
    public <T> T get(final String key, final Class<T> targetClass) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notNull(targetClass, "The given targetClass must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                return asyncCommands.get(key).handle((obj, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("GET", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (obj != null) {
                            return new ResultData(CacheResultCode.SUCCESS, null, obj);
                        } else {
                            return new ResultData(CacheResultCode.NOT_EXISTS, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("GET key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }

        return null == cacheResult.getData() ? null : (T)cacheResult.getData();
    }

    /**
     * String单个异步执行getlist
     *
     */
    public <T> List<T> getlist(final String key, final Class<T> targetClass) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notNull(targetClass, "The given targetClass must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                return asyncCommands.get(key).handle((obj, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("GET", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (obj != null) {

                            return new ResultData(CacheResultCode.SUCCESS, null, obj);
                        } else {

                            return new ResultData(CacheResultCode.NOT_EXISTS, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("GETLIST key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }

        return null == cacheResult.getData() ? null : (List<T>)cacheResult.getData();
    }

    /**
     * String批量异步执行mset
     *
     */
    public void mset(final Map<String, Object> objectMap) {
        Assert.notEmpty(objectMap, "The given objectMap must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                return asyncCommands.mset(objectMap).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("MSET", "map(" + objectMap.size() + ")", ex));
                        return new ResultData(ex);
                    } else {
                        if (CacheConsts.OK.equals(rt)) {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, rt, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("MSET keys(%s) 已重试第%s次", objectMap.size(), i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * String批量异步执行setex
     *
     */
    public void batchSetex(final Map<String, Object> objectMap, final long expireTime) {
        Assert.notEmpty(objectMap, "The given objectMap must not be null!");

        CacheResult cacheResult = null;
        while (objectMap.size() > 0) {
            for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

                final Map<String, Object> removedMap = Maps.newHashMapWithExpectedSize(20);

                // execute work
                cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {


                    //选用最高效的for循环，java8 forEach太慢，这里注意set批量个数不要超过20（即配置的 request-queue-size: 20 #请求队列大小）个，否则报异常：
                    //io.lettuce.core.RedisException: Internal stack size exceeded: 20. Commands are not accepted until the stack size drops.
                    LongAdder count = new LongAdder();
                    CompletableFuture<Integer> future = CompletableFuture.completedFuture(0);
                    final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                    for (Iterator<Map.Entry<String, Object>> iterator = objectMap.entrySet().iterator(); iterator.hasNext();) {

                        count.increment();
                        if (count.sum() >= lettuceProperties.getRequestQueueSize()) {
                            count.reset();
                            break;
                        }

                        Map.Entry<String, Object> entry = iterator.next();
                        final RedisFuture<String> resp = asyncCommands.setex(entry.getKey(), expireTime, entry.getValue());
                        future = future.thenCombine(resp, (failCount, respStr) -> CacheConsts.OK.equals(respStr) ? failCount : failCount + 1);
                        removedMap.put(entry.getKey(), entry.getValue());
                        iterator.remove();
                    }

                    return future.handle((failCount, ex) -> {
                        if (ex != null) {
                            JetCacheExecutor.defaultExecutor().execute(() -> logError("BATCHSETEX", "map(" + objectMap.size() + ")", ex));
                            return new ResultData(ex);
                        } else {
                            if (failCount == 0) {
                                return new ResultData(CacheResultCode.SUCCESS, null, null);
                            } else if (failCount == objectMap.size()) {
                                return new ResultData(CacheResultCode.FAIL, null, null);
                            } else {
                                return new ResultData(CacheResultCode.PART_SUCCESS, null, null);
                            }
                        }
                    });
                }));

                setTimeout(cacheResult);

                if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                    objectMap.putAll(removedMap);
                    removedMap.clear();
                    log.info(String.format("BATCHSETEX keys(%s) 已重试第%s次", objectMap.size(), i));
                    continue;
                } else if(i > CacheConsts.RETRY_TIMES){
                    throw new RuntimeException("Redis cache data is failed!");
                }

                break;
            }
        }
    }

    /**
     * String批量异步执行mget
     *
     */
    public <T> Map<String, T> mget(final List<String> keys, final Class<T> targetClass) {
        Assert.notEmpty(keys, "The given keys must not be null!");
        Assert.notNull(targetClass, "The given targetClass must not be null!");

        final int size = keys.size();

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            final List<String> keyList = new ArrayList<>(keys);
            final Map<String, T> cacheResultMap = Maps.newHashMapWithExpectedSize(20);

            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisStringAsyncCommands<String, Object> asyncCommands = (RedisStringAsyncCommands<String, Object>)conn.async();
                return asyncCommands.mget(keys.toArray(new String[size])).handle((rts, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("MGET", "key(" + size + ")", ex));
                        return new ResultData(ex);
                    } else {
                        for (int j = 0; j < rts.size(); j++) {
                            KeyValue kv = rts.get(j);
                            String key = keyList.get(j);
                            if (kv != null && kv.hasValue()) {
                                cacheResultMap.put(key, (T)kv.getValue());
                            } else {
                                cacheResultMap.put(key, (T)CacheConsts.NIL);
                            }
                        }

                        return new ResultData(CacheResultCode.SUCCESS, null, cacheResultMap);
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("MGET keys(%s) 已重试第%s次", size, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }

        return null == cacheResult.getData() ? null : (Map<String, T>)cacheResult.getData();
    }

    /**
     * String批量异步执行get
     *
     */
    public <T> List<T> batchGet(final List<String> keys, final Class<T> targetClass) {
        Assert.notEmpty(keys, "The given keys must not be null!");
        Assert.notNull(targetClass, "The given targetClass must not be null!");

        final int size = keys.size();
        final List<T> retList = Lists.newArrayListWithCapacity(20);
        final List<String> keyList = new CopyOnWriteArrayList<>(keys);
        keys.clear();

        CacheResult cacheResult = null;
        while (keyList.size() > 0) {
            for (int i = 0; i <= CacheConsts.RETRY_TIMES; i++) {

                final List<String> removedList = Lists.newArrayListWithCapacity(20);

                cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {

                    final List<RedisFuture<Object>> futures = Lists.newArrayListWithCapacity(size);
                    final RedisAsyncCommands<String, Object> asyncCommands = conn.async();
                    final RedisStringAsyncCommands<String, Object> stringAsyncCommands = (RedisStringAsyncCommands<String, Object>)asyncCommands;

                    // disable auto-flushing
                    asyncCommands.setAutoFlushCommands(false);

                    LongAdder count = new LongAdder();
                    for (String key : keyList) {

                        count.increment();
                        if (count.sum() >= lettuceProperties.getRequestQueueSize()) {
                            count.reset();
                            break;
                        }

                        futures.add(stringAsyncCommands.get(key));
                        removedList.add(key);
                        keyList.remove(key);
                    }

                    // write all commands to the transport layer
                    asyncCommands.flushCommands();

                    CompletableFuture<Integer> future = CompletableFuture.completedFuture(0);
                    for(int j = 0; j < futures.size(); j++) {
                        future = future.thenCombine(futures.get(j), (failCount, respStr) -> retList.add((T)respStr) ? failCount : failCount + 1);
                    };

                    return future.handle((failCount, ex) -> {
                        if (ex != null) {
                            JetCacheExecutor.defaultExecutor().execute(() -> logError("BATCHGET", "key(" + keys.size() + ")", ex));
                            return new ResultData(ex);
                        } else {
                            if (failCount == 0) {
                                return new ResultData(CacheResultCode.SUCCESS, null, null);
                            } else if (failCount == keys.size()) {
                                return new ResultData(CacheResultCode.FAIL, null, null);
                            } else {
                                return new ResultData(CacheResultCode.PART_SUCCESS, null, null);
                            }
                        }
                    });
                }));

                setTimeout(cacheResult);

                if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                    keyList.addAll(removedList);
                    removedList.clear();
                    log.info(String.format("BATCHGET keys(%s) 已重试第%s次", keys.size(), i));
                    continue;
                } else if(i > CacheConsts.RETRY_TIMES){
                    throw new RuntimeException("Redis cache data is failed!");
                }

                break;
            }
        }

        return retList;
    }

    /*********************************** Hash ***********************************/

    /**
     * Hash单个异步执行hdel
     *
     */
    public void hdel(final String key, final String... keys) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notNull(keys, "The given keys must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisHashAsyncCommands<String, Object> asyncCommands = (RedisHashAsyncCommands<String, Object>)conn.async();
                return asyncCommands.hdel(key, keys).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("HDEL", "key(" + keys.length + ")", ex));
                        return new ResultData(ex);
                    } else {
                        if (rt == null) {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        } else if (Integer.valueOf(rt.toString()) == 0) {
                            return new ResultData(CacheResultCode.NOT_EXISTS, null, null);
                        } else if (Integer.valueOf(rt.toString()) < keys.length) {
                            return new ResultData(CacheResultCode.PART_SUCCESS, null, null);
                        } else if (Integer.valueOf(rt.toString()) == keys.length) {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("HDEL keys(%s) 已重试第%s次", keys.length, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * Hash单个异步执行hmset
     *
     */
    public void hmset(final String key, final Map<String, Object> entityMap) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notEmpty(entityMap, "The given entityMap must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisHashAsyncCommands<String, Object> asyncCommands = (RedisHashAsyncCommands<String, Object>)conn.async();
                return asyncCommands.hmset(key, entityMap).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("HMSET", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (CacheConsts.OK.equals(rt)) {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        } else {
                            return new ResultData(CacheResultCode.FAIL, rt, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("HMSET key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * Hash单个异步执行hgetall
     *
     */
    public Map<String, Object> hgetall(final String key) {
        Assert.notNull(key, "The given key must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisHashAsyncCommands<String, Object> asyncCommands = (RedisHashAsyncCommands<String, Object>)conn.async();
                return asyncCommands.hgetall(key).handle((rts, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("HGETALL", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (!CollectionUtils.isEmpty(rts)) {
                            return new ResultData(CacheResultCode.SUCCESS, null, rts);
                        } else {
                            return new ResultData(CacheResultCode.NOT_EXISTS, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("HGETALL key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }

        return null == cacheResult.getData() ? null : (Map<String, Object>)cacheResult.getData();
    }

    /**
     * Hash批量异步执行hmset
     *
     */
    public void batchHmset(final Map<String, Map<String, Object>> map) {
        Assert.notEmpty(map, "The given map must not be null!");

        CacheResult cacheResult = null;
        while (map.size() > 0) {
            for (int i = 0; i <= CacheConsts.RETRY_TIMES; i++) {

                final Map<String, Map<String, Object>> removedMap = Maps.newHashMapWithExpectedSize(20);

                // execute work
                cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {

                    //选用最高效的for循环，java8 forEach太慢，这里注意hmset批量个数不要超过20（即配置的 request-queue-size: 20 #请求队列大小）个，否则报异常：
                    //io.lettuce.core.RedisException: Internal stack size exceeded: 20. Commands are not accepted until the stack size drops.
                    LongAdder count = new LongAdder();
                    CompletableFuture<Integer> future = CompletableFuture.completedFuture(0);
                    final RedisHashAsyncCommands<String, Object> asyncCommands = (RedisHashAsyncCommands<String, Object>)conn.async();
                    for (Iterator<Map.Entry<String, Map<String, Object>>> iterator = map.entrySet().iterator(); iterator.hasNext();) {

                        count.increment();
                        if (count.sum() >= lettuceProperties.getRequestQueueSize()) {
                            count.reset();
                            break;
                        }

                        final Map.Entry<String, Map<String, Object>> entry = iterator.next();
                        final RedisFuture<String> resp = asyncCommands.hmset(entry.getKey(), entry.getValue());
                        future = future.thenCombine(resp, (failCount, respStr) -> CacheConsts.OK.equals(respStr) ? failCount : failCount + 1);
                        removedMap.put(entry.getKey(), entry.getValue());
                        iterator.remove();
                    }

                    return future.handle((failCount, ex) -> {
                        if (ex != null) {
                            JetCacheExecutor.defaultExecutor().execute(() -> logError("BATCHHMSET", "map(" + map.size() + ")", ex));
                            return new ResultData(ex);
                        } else {
                            if (failCount == 0) {
                                return new ResultData(CacheResultCode.SUCCESS, null, null);
                            } else if (failCount == map.size()) {
                                return new ResultData(CacheResultCode.FAIL, null, null);
                            } else {
                                return new ResultData(CacheResultCode.PART_SUCCESS, null, null);
                            }
                        }
                    });
                }));

                setTimeout(cacheResult);

                if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                    map.putAll(removedMap);
                    removedMap.clear();
                    log.info(String.format("BATCHHMSET 已重试第%s次", i));
                    continue;
                } else if(i > CacheConsts.RETRY_TIMES){
                    throw new RuntimeException("Redis cache data is failed!");
                }

                break;
            }
        }
    }

    /**
     * Hash批量异步执行hgetall
     *
     */
    public List<Map<String, Object>> batchHgetall(final List<String> keys) {
        Assert.notEmpty(keys, "The given keys must not be null!");

        final int size = keys.size();
        final List<Map<String, Object>> retList = Lists.newArrayListWithCapacity(size);
        final List<String> keyList = new CopyOnWriteArrayList<>(keys);
        keys.clear();

        CacheResult cacheResult = null;
        while (keyList.size() > 0) {
            for (int i = 0; i <= CacheConsts.RETRY_TIMES; i++) {

                final List<String> removedList = Lists.newArrayListWithCapacity(20);

                cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {

                    final List<RedisFuture<Map<String, Object>>> futures = Lists.newArrayListWithCapacity(size);
                    final RedisAsyncCommands<String, Object> asyncCommands = conn.async();
                    final RedisHashAsyncCommands<String, Object> hashAsyncCommands = (RedisHashAsyncCommands<String, Object>)asyncCommands;

                    // disable auto-flushing
                    asyncCommands.setAutoFlushCommands(false);

                    LongAdder count = new LongAdder();
                    for (String key : keyList) {

                        count.increment();
                        if (count.sum() >= lettuceProperties.getRequestQueueSize()) {
                            count.reset();
                            break;
                        }

                        futures.add(hashAsyncCommands.hgetall(key));
                        removedList.add(key);
                        keyList.remove(key);
                    }

                    // write all commands to the transport layer
                    asyncCommands.flushCommands();

                    CompletableFuture<Integer> future = CompletableFuture.completedFuture(0);
                    for(int j = 0; j < futures.size(); j++) {
                        future = future.thenCombine(futures.get(j), (failCount, respStr) -> retList.add((Map<String, Object>)respStr) ? failCount : failCount + 1);
                    };

                    return future.handle((failCount, ex) -> {
                        if (ex != null) {
                            JetCacheExecutor.defaultExecutor().execute(() -> logError("BATCHHGETALL", "key(" + keys.size() + ")", ex));
                            return new ResultData(ex);
                        } else {
                            if (failCount == 0) {
                                return new ResultData(CacheResultCode.SUCCESS, null, null);
                            } else if (failCount == keys.size()) {
                                return new ResultData(CacheResultCode.FAIL, null, null);
                            } else {
                                return new ResultData(CacheResultCode.PART_SUCCESS, null, null);
                            }
                        }
                    });
                }));

                setTimeout(cacheResult);

                if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                    keyList.addAll(removedList);
                    removedList.clear();
                    log.info(String.format("BATCHHGETALL keys(%s) 已重试第%s次", keys.size(), i));
                    continue;
                } else if(i > CacheConsts.RETRY_TIMES){
                    throw new RuntimeException("Redis cache data is failed!");
                }

                break;
            }
        }

        return retList;
    }

    /*********************************** List ***********************************/

    /**
     * List异步执行lpush
     *
     */
    public void lpush(final String key, final List<String> list) {
        Assert.notNull(key, "The given key must not be null!");
        Assert.notEmpty(list, "The given list must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisListAsyncCommands<String, Object> asyncCommands = (RedisListAsyncCommands<String, Object>)conn.async();
                return asyncCommands.lpush(key, list).handle((rt, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("LPUSH", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (null == rt || rt == 0) {
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        } else {
                            return new ResultData(CacheResultCode.SUCCESS, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("LPUSH key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }

    /**
     * List异步执行lrange
     *
     */
    public List<Object> lrange(final String key) {
        Assert.notNull(key, "The given key must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisListAsyncCommands<String, Object> asyncCommands = (RedisListAsyncCommands<String, Object>)conn.async();
                return asyncCommands.lrange(key, 0, -1).handle((rts, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("LRANGE", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (!CollectionUtils.isEmpty(rts)) {
                            return new ResultData(CacheResultCode.SUCCESS, null, rts);
                        }else if(rts.size() == 0){
                            return new ResultData(CacheResultCode.NOT_EXISTS, null, null);
                        } else{
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("LRANGE key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }

        return null == cacheResult.getData() ? null : (List<Object>)cacheResult.getData();
    }

    /*********************************** Set ***********************************/

    /**
     * Set异步执行smembers
     *
     */
    public List<Object> smembers(final String key) {
        Assert.notNull(key, "The given key must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                final RedisSetAsyncCommands<String, Object> asyncCommands = (RedisSetAsyncCommands<String, Object>)conn.async();
                return asyncCommands.smembers(key).handle((rts, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("SMEMBERS", key, ex));
                        return new ResultData(ex);
                    } else {
                        if (!CollectionUtils.isEmpty(rts)) {
                            return new ResultData(CacheResultCode.SUCCESS, null, rts);
                        }else if(rts.size() == 0){
                            return new ResultData(CacheResultCode.NOT_EXISTS, null, null);
                        } else{
                            return new ResultData(CacheResultCode.FAIL, null, null);
                        }
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("SMEMBERS key %s 已重试第%s次", key, i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }

        return null == cacheResult.getData() ? null : new ArrayList<>((Set<Object>)cacheResult.getData());
    }

    /*********************************** 通用 ***********************************/

    /**
     * 多执行多个异步命令
     *
     */
    public void batchProcess(final AsyncCallback asyncCallback) {
        Assert.notNull(asyncCallback, "The given asyncCallback must not be null!");

        CacheResult cacheResult = null;
        for(int i = 0; i <= CacheConsts.RETRY_TIMES ; i++) {

            // execute work
            cacheResult = new CacheResult(asyncPool.acquire().thenCompose(conn -> {
                RedisAsyncCommands<String, Object> asyncCommands = conn.async();
//                asyncCommands.watch(UUID.randomUUID().toString());
                asyncCommands.multi();
                asyncCallback.asyncInvoke(asyncCommands);

                return asyncCommands.exec().handle((transactionResult, ex) -> {
                    if (ex != null) {
                        JetCacheExecutor.defaultExecutor().execute(() -> logError("BATCHPROCESS", null, ex));
                        return new ResultData(ex);
                    } else {
                        final ResultData[] cacheResultData = new ResultData[1];
                        transactionResult.iterator().forEachRemaining(rt -> {
                            if ((null != rt) && (((rt instanceof String) && CacheConsts.OK.equals(rt.toString()))
                                    || ((rt instanceof Long) && Long.valueOf(rt.toString()).compareTo(0L)>=0)
                                    || ((rt instanceof Boolean) && Boolean.valueOf(rt.toString())))){
                                cacheResultData[0] = new ResultData(CacheResultCode.SUCCESS, null, null);
                            } else{
                                cacheResultData[0] = new ResultData(CacheResultCode.FAIL, null, null);
                            }
                        });
                        return cacheResultData[0];
                    }
                });
            }));

            setTimeout(cacheResult);

            if (i <= CacheConsts.RETRY_TIMES && cacheResult.isNeedRetry()) {

                log.info(String.format("BATCHPROCESS 已重试第%s次", i));
                continue;
            } else if(i > CacheConsts.RETRY_TIMES){
                throw new RuntimeException("Redis cache data is failed!");
            }

            break;
        }
    }
}