package org.qiyu.live.user.provider.service.impl;

import com.alibaba.fastjson.JSON;
import com.google.common.collect.Maps;
import jakarta.annotation.Resource;
import org.apache.rocketmq.client.exception.MQBrokerException;
import org.apache.rocketmq.client.exception.MQClientException;
import org.apache.rocketmq.client.producer.MQProducer;
import org.apache.rocketmq.remoting.exception.RemotingException;
import org.idea.qiyu.live.framework.redis.starter.key.UserProviderCacheKeyBuilder;
import org.qiyu.live.user.dto.UserCacheAsyncDeleteDTO;
import org.qiyu.live.user.dto.UserDTO;
import org.qiyu.live.user.provider.dao.po.UserPO;
import org.qiyu.live.user.provider.dao.mapper.IUserMapper;
import org.qiyu.live.user.provider.service.IUserService;
import org.springframework.dao.DataAccessException;
import org.springframework.data.redis.core.RedisOperations;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.SessionCallback;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import org.qiyu.live.common.interfaces.utils.ConvertBeanUtils;

import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

import org.apache.rocketmq.common.message.Message;

import static org.qiyu.live.user.constants.CacheAsyncDeleteCode.USER_INFO_DELETE;
import static org.qiyu.live.user.constants.UserProviderTopicNames.CACHE_ASYNC_DELETE_TOPIC;

/**
 * Created with IntelliJ IDEA.
 * Description:
 * User: sfx
 * Date: 2023-09-16
 * Time: 11:00
 */
@Service
public class UserServiceImpl implements IUserService {

    @Resource
    private IUserMapper userMapper;

    // 引入Redis缓存
    @Resource
    private RedisTemplate<String, UserDTO> redisTemplate;
    // 构建redis的key
    @Resource
    private UserProviderCacheKeyBuilder userProviderCacheKeyBuilder;

    // 引入MQ,用于延时双删
    @Resource
    private MQProducer mqProducer;

    @Override
    public UserDTO getUserInfo(Long userId) {
        if (userId == null) {
            return null;
        }
        String key = userProviderCacheKeyBuilder.buildUserInfoKey(userId);
        // 缓存中有,直接返回
        UserDTO userDTO = (UserDTO) redisTemplate.opsForValue().get(key);
        if (userDTO != null) {
            return userDTO;
        }
        // 缓存没有,查询数据库
        UserPO userPO = userMapper.selectById(userId);
        if (userPO != null) {
            userDTO = ConvertBeanUtils.convert(userPO, UserDTO.class);
            // 存放到缓存中,设置为30min有效,表示用户在直播间的平均逗留时长
            redisTemplate.opsForValue().set(key, userDTO, 30, TimeUnit.MINUTES);
        }
        return userDTO;
    }

    public boolean updateUserInfo(UserDTO userDTO) {
        if (userDTO == null || userDTO.getUserId() == null) {
            return false;
        }
        UserPO userPO = ConvertBeanUtils.convert(userDTO, UserPO.class);
        int updateStatus = userMapper.updateById(userPO);
        //更新成功,删除缓存,->延时双删->延时1s进行删除
        if (updateStatus > -1) {
            // 更新的时候删除缓存
            String key = userProviderCacheKeyBuilder.buildUserInfoKey(userDTO.getUserId());
            redisTemplate.delete(key);
            // 防止我们删除之后,又来了一个线程从从节点读取,写入缓存->写入的脏数据,为避免这种情况
            // 我们延迟1s删除缓存,利用RocketMQ的延迟消息机制
            // 给MQ发送消息
            UserCacheAsyncDeleteDTO userCacheAsyncDeleteDTO = new UserCacheAsyncDeleteDTO();
            userCacheAsyncDeleteDTO.setCode(USER_INFO_DELETE.getCode());
            Map<String, Object> jsonParams = new HashMap<>();
            jsonParams.put("userId", userDTO.getUserId());
            userCacheAsyncDeleteDTO.setJson(JSON.toJSONString(jsonParams));
            Message message = new Message();
            // 设置topic
            message.setTopic(CACHE_ASYNC_DELETE_TOPIC);
            // 设置延迟时间->1s
            message.setDelayTimeLevel(1);
            // 设置消息主题
            message.setBody(JSON.toJSONString(userCacheAsyncDeleteDTO).getBytes());
            // 生产者发送消息给MQ
            try {
                mqProducer.send(message);
            } catch (MQClientException | RemotingException | MQBrokerException | InterruptedException e) {
                throw new RuntimeException(e);
            }
        }
        return true;
    }

    @Override
    public boolean insertOne(UserDTO userDTO) {
        if (userDTO == null || userDTO.getUserId() == null) {
            return false;
        }
        userMapper.insert(ConvertBeanUtils.convert(userDTO, UserPO.class));
        return true;
    }

    /**
     * @param userIdList 查询多个userId
     * @return
     */
    @Override
    public Map<Long, UserDTO> batchQueryUserInfo(List<Long> userIdList) {
        // 1. userIdList为空直接返回
        if (CollectionUtils.isEmpty(userIdList)) {
            return Maps.newHashMap();
        }
        // 2. 对id进行合法性判断(进行过滤掉),id>1000
        userIdList = userIdList.stream().filter(userId -> userId >= 1000).collect(Collectors.toList());
        // 3. 去Redis中查询,redis中有的直接放到结果集合中,没有的继续查询数据库
        // 3.1 查询到所有的key
        List<String> keys = new ArrayList<>();
        userIdList.forEach(userId -> {
            keys.add(userProviderCacheKeyBuilder.buildUserInfoKey(userId));
        });

        // 3.2 根据key查询在Redis中的userId
        List<UserDTO> userIdInCacheList = new ArrayList<>(redisTemplate.opsForValue().multiGet(keys).stream().filter(Objects::nonNull).toList());

        // 3.3 查询不在redis中userId
        List<Long> userIdInCacheListIds = userIdInCacheList.stream().map(UserDTO::getUserId).toList();
        List<Long> userIdNotInCacheListIds = userIdList.stream().filter(userId -> !userIdInCacheListIds.contains(userId)).toList();

        // 4. redis的查询结果与中的查询结果是一致的,那么直接返回
        if (!CollectionUtils.isEmpty(userIdInCacheListIds) && userIdInCacheListIds.size() == userIdList.size()) {
            // 转化成Map进行返回
            return userIdInCacheList.stream().collect(Collectors.toMap(UserDTO::getUserId, x -> x));
        }

        // 5. 对redis中没有的userId,去数据库中查询,因为底层使用的是ShardingJDBC
        // 会进行分表查询数据,会把多条SQL进行合并union All 这样十分耗费性能,所以
        // 我们底层使用的是并行流的方式来查询,提高查询速度

//        // 直接使用这样查询,底层ShardingJDBC会使用多个union all合并多条SQL,效率贼慢
//        userMapper.selectBatchIds(userIdList);

        // 5.1 我们对不在redis中的userId进行查询数据库
        // 我们使用并行流的方式查询
        // 5.1.1 对于每一个userId进行分表
        Map<Long, List<Long>> userIdListMap = userIdNotInCacheListIds.stream().collect(Collectors.groupingBy(userId -> userId % 100));
        List<UserDTO> dbQueryResult = new CopyOnWriteArrayList<>();
        //List<UserDTO> userDTOList1 = ConvertBeanUtils.convertList(userMapper.selectBatchIds(userIdList), UserDTO.class);
        userIdListMap.values().parallelStream().forEach(queryUserIdList -> {
            dbQueryResult.addAll(ConvertBeanUtils.convertList(userMapper.selectBatchIds(queryUserIdList), UserDTO.class));
        });

        // 6. 数据库查询的结果不为空,那么把这些数据放到缓存中,以便后续进行命中
        // 因为我们实现的是直播业务,用户有可能不断地进入直播间,那么就要频繁的查询它的信息
        if (!CollectionUtils.isEmpty(dbQueryResult)) {
            Map<String, UserDTO> saveCacheMap = dbQueryResult.stream()
                    .collect(Collectors.toMap(userDto -> userProviderCacheKeyBuilder.buildUserInfoKey(userDto.getUserId()), x -> x));
            // 批量的保存到redis中
            redisTemplate.opsForValue().multiSet(saveCacheMap);

            // 给redis中的key批量设置过期时间,由于multiSet没有设置过期时间的选项,
            // 所以我们使用管道命令,减少网络IO的传输
            redisTemplate.executePipelined(new SessionCallback<Object>() {
                @Override
                public <K, V> Object execute(RedisOperations<K, V> operations) throws DataAccessException {
                    for (String key : saveCacheMap.keySet()) {
                        // 避免缓存雪崩
                        operations.expire((K) key, createRandomExpireTime(), TimeUnit.SECONDS);
                    }
                    return null;
                }
            });
            // 加入到结果集中
            userIdInCacheList.addAll(dbQueryResult);
        }
        return userIdInCacheList.stream().collect(Collectors.toMap(UserDTO::getUserId, x -> x));
    }

    /**
     * 生成随机时间
     *
     * @return 返回随机时间
     */
    private long createRandomExpireTime() {
        int SECONDS = ThreadLocalRandom.current().nextInt(10000);//10000s随机
        return SECONDS + 30 * 60;// + 30min
    }
}