package org.qiyu.live.user.provider.service.impl;

import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.toolkit.CollectionUtils;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.apache.rocketmq.client.exception.MQBrokerException;
import org.apache.rocketmq.client.exception.MQClientException;
import org.apache.rocketmq.client.producer.MQProducer;
import org.apache.rocketmq.common.message.Message;
import org.apache.rocketmq.remoting.exception.RemotingException;
import org.qiyu.live.common.interfaces.utils.ConvertBeanUtils;
import org.qiyu.live.framework.redis.starter.config.IGenericJackson2JsonRedisSerializer;
import org.qiyu.live.framework.redis.starter.key.UserProviderCacheKeyBuilder;
import org.qiyu.live.user.constants.CacheAsyncDeleteCode;
import org.qiyu.live.user.dto.UserCacheAsyncDeleteDTO;
import org.qiyu.live.user.dto.UserDTO;
import org.qiyu.live.user.provider.dao.mapper.IUserMapper;
import org.qiyu.live.user.provider.dao.po.UserPO;
import org.qiyu.live.user.provider.service.IUserService;
import org.springframework.dao.DataAccessException;
import org.springframework.data.redis.core.RedisOperations;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.SessionCallback;
import org.springframework.stereotype.Service;

import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

import static org.qiyu.live.user.constants.UserProviderTopicNames.CACHE_ASYNC_DELETE_TOPIC;

@Service
@Slf4j
public class UserServiceImpl implements IUserService {

    @Resource
    private IUserMapper iUserMapper;

    @Resource
    private RedisTemplate<String, UserDTO> redisTemplate;

    @Resource
    private UserProviderCacheKeyBuilder userProviderCacheKeyBuilder;



    @Resource
    private MQProducer mqProducer;




    @Override
    public UserDTO getByUserId(Long userId) {
        if (userId == null) {
            return null;
        }

//        redisSerializer.serialize()
//       怎么拿到这个 key

        String key = userProviderCacheKeyBuilder.buildUserInfoKey(userId);
//        String key = "userInfo:" + userId;


        UserDTO userDTO = redisTemplate.opsForValue().get(key);
        if (userDTO != null) {
            return userDTO;
        }

        userDTO = ConvertBeanUtils.convert(iUserMapper.selectById(userId), UserDTO.class);
        if (userDTO!= null) {
            redisTemplate.opsForValue().set(key, userDTO,30, TimeUnit.MINUTES);
        }
        return userDTO;


//        UserPO userPO = iUserMapper.selectById(userId);
// PO persistent Object 持久化对象 就是直接从数据库中查到的数据
// 一般来说 后端标准化处理要将 PO 转化为 DTO
    }

    @Override
    public boolean updateUserInfo(UserDTO userDTO) {
        if (userDTO == null && userDTO.getUserId() == null) {
            return false;
        }

        //延迟双删
        //第一步：将缓存中的用户信息删除
        redisTemplate.delete(userProviderCacheKeyBuilder.buildUserInfoKey(userDTO.getUserId()));
        //第二步：通知rocketMQ再次删除数据

        UserCacheAsyncDeleteDTO userCacheAsyncDeleteDTO = new UserCacheAsyncDeleteDTO();
        userCacheAsyncDeleteDTO.setCode(CacheAsyncDeleteCode.USER_INFO_DELETE.getCode());

        userCacheAsyncDeleteDTO.setJson(JSON.toJSONString(userDTO));

        Message message = new Message();
        message.setBody(JSON.toJSONString(userCacheAsyncDeleteDTO).getBytes());
//        message.setTopic("user-update-cache");
        message.setTopic(CACHE_ASYNC_DELETE_TOPIC);
        message.setDelayTimeLevel(1);
        /**
         *         延迟等级1：1s～1s
         *         延迟等级2：5s～10s
         *         延迟等级3：10s～30s
         *         延迟等级4：30s～1m
         *         延迟等级5：1m～2m
         *         延迟等级6：2m～3m
         *         延迟等级7：3m～4m
         *         延迟等级8：4m～5m
         *         延迟等级9：5m～10m
         *         延迟等级10：10m～20m
         *         延迟等级11：20m～30m
         *         延迟等级12：30m～1h
         */
        try {
            mqProducer.send(message);
        } catch (Exception e) {
            log.error("消息发送失败", e);
        }

        return iUserMapper.updateById(ConvertBeanUtils.convert(userDTO, UserPO.class)) > 0 ?  true : false;
    }

    @Override
    public boolean insertUserInfo(UserDTO userDTO) {
        if (userDTO == null && userDTO.getUserId() == null) {
            return false;
        }
        return iUserMapper.insert(ConvertBeanUtils.convert(userDTO, UserPO.class)) > 0 ? true : false;
    }

    /**
     *     单个用户查询引入redis 提供查询效率，多用户查询怎么提高效率呢？
     *     userMapper.selectBatchIds(wrapper); 这个底层使用 shardingjdbc 去处理查询时 使用 union all  性能很差
     *     key 会分散到 100张表中去查询
     */
    @Override
    public Map<Long, UserDTO> batchQueryUserInfo(List<Long> userIds) {


        //事先查询redis
        //转换成key　list，批量查询
        Set<String> userIdkeyList = userIds.stream().map(userid -> userProviderCacheKeyBuilder.buildUserInfoKey(userid)).collect(Collectors.toSet());

        //查询结果　在做一次去重，疑问: redis的数据什么时候更新， 用户数据再更新时，同时修改redis中的数据
        List<UserDTO> userDTOS = redisTemplate.opsForValue().multiGet(userIdkeyList).stream().filter(userDTO -> userDTO != null).collect(Collectors.toList());


        //如果缓存中查询的数据量跟查询list相同
        if (!CollectionUtils.isEmpty(userDTOS) && userDTOS.size() == userIds.size()) {
            return userDTOS.stream().collect(Collectors.toMap(UserDTO::getUserId, userDTO -> userDTO));
        }

        List<Long> userIdInCache = userDTOS.stream().map(userDTO -> userDTO.getUserId()).collect(Collectors.toList());
        List<Long> userIdNotInCache = userIds.stream().filter(id -> !userIdInCache.contains(id)).collect(Collectors.toList());


//        将查询的IDlist分块交给不同线程去查询，最后在本地内存做归并
        Map<Long, List<Long>> userIdMap = userIdNotInCache.stream().collect(Collectors.groupingBy(userId -> userId % 100));  //根据余数去查询不同的数据表
        CopyOnWriteArrayList<UserDTO> dbQueryUserDTOS = new CopyOnWriteArrayList<>();


        //这里手动将IDlist分块，好处是并行处理的时候，每个并行的线程只需要去查一张表，因为分块的大小是100，分库分表也是100，拆分也是按照%100 去处理的，查询也是，因此每个流只需要查一张表
        //这里如果不手动去分，遍历去查寻
        userIdMap.keySet().parallelStream().forEach(key -> {
            dbQueryUserDTOS.addAll(ConvertBeanUtils.convertList(iUserMapper.selectBatchIds(userIdMap.get(key)), UserDTO.class));
        });

//        数据库查出的数据  可以 同步到缓存中，下次直接从缓存中拿

        if (!CollectionUtils.isEmpty(dbQueryUserDTOS)) {
            Map<String, UserDTO> dbQueryUserDTOSMap = dbQueryUserDTOS.stream().collect(Collectors.toMap(userDTO -> userProviderCacheKeyBuilder.buildUserInfoKey(userDTO.getUserId()), userDTO -> userDTO));
            redisTemplate.opsForValue().multiSet(dbQueryUserDTOSMap);

            //设置随机过期时间  executePipelined方法会将一系列命令打包成一个pipeline（管道），然后一次性发送到Redis服务器执行，从而提高性能。
            redisTemplate.executePipelined(new SessionCallback<Object>() {
                @Override
                public <K, V> Object execute(RedisOperations<K, V> operations) throws DataAccessException {
                    for (String rediskey : dbQueryUserDTOSMap.keySet()) {
                        operations.expire((K) rediskey, createRandomExpireTime(), TimeUnit.MINUTES);
                    }
                    return null; //返回null 表示不关心执行结果
                }
            });
            userDTOS.addAll(dbQueryUserDTOS);
        }

        return userDTOS.stream().collect(Collectors.toMap(UserDTO::getUserId, userDTO -> userDTO));


    }

    private int createRandomExpireTime() {
        return ThreadLocalRandom.current().nextInt(1000) * 60 * 30;
    }



}
