package com.innodata.riskEngine.service.impl;

import com.innodata.riskEngine.common.pojo.ChannelDataPO;
import com.innodata.riskEngine.common.pojo.SourcePO;
import com.innodata.riskEngine.common.utils.json.JsonUtil;
import com.innodata.riskEngine.common.utils.string.StringUtil;
import com.innodata.riskEngine.config.KafkaRiskRuleProperties;
import com.innodata.riskEngine.mock.DataGenerator;
import com.innodata.riskEngine.service.LoadTesterService;
import com.innodata.riskEngine.utils.KafkaTemplateUtil;
import com.innodata.riskEngine.utils.LocalCacheUtil;
import com.innodata.riskEngine.vo.LoadTestReq;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.MessageFormatter;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;

import java.text.MessageFormat;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;

/**
 * @Author liboshuai
 * @Date 2023/11/2 17:15
 */
@Slf4j
@Service
public class LoadTesterServiceImpl implements LoadTesterService {

    @Autowired
    private KafkaTemplateUtil kafkaTemplateUtil;
    @Autowired
    private KafkaRiskRuleProperties kafkaRiskRuleProperties;

    @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServers;

    private static boolean mockFlink = false;

    private final LocalCacheUtil<String, LotteryNumberTime> cache = new LocalCacheUtil<>(600000, 10, TimeUnit.MINUTES);

    /**
     * 持续压测
     */
    @Override
    public void loadTest(LoadTestReq loadTestReq) {
        Integer bankSize = loadTestReq.getBankSize();
        Integer channelSize = loadTestReq.getChannelSize();
        Integer userSize = loadTestReq.getUserSize();
        Integer lotteryNumberSize = loadTestReq.getLotteryNumberSize();

//        SourcePO sourcePO = DataGenerator.generateSourcePOList(bankSize,channelSize,userSize, lotteryNumberSize);
//        kafkaTemplateUtil.sendWithDetailedCallback(kafkaRiskRuleProperties.getProducerTopic(), JsonUtil.obj2JsonStr(sourcePO));
    }

    /**
     * 模拟Flink程序进行累计计算
     * @param consumerRecord Kafka源数据
     */
    @KafkaListener(topics = {"impSendOrder"})
    public void receiveNormal(ConsumerRecord<?, ?> consumerRecord) {
        if (!mockFlink) {
            return;
        }
        SourcePO sourcePO = JsonUtil.jsonStr2Obj(String.valueOf(consumerRecord.value()), SourcePO.class);
        if (!sourcePOFilter(sourcePO)) return;
        ChannelDataPO channelData = sourcePO.getChannelData();
        String userId = channelData.getUserId();
        LotteryNumberTime lotteryNumberTime = cache.get(userId);
        if (Objects.isNull(lotteryNumberTime)) {
            cache.put(userId, new LotteryNumberTime(channelData.getLotteryNumber(), sourcePO.getEventTimestamp()));
        } else {
            lotteryNumberTime.setLotteryNumberSum(lotteryNumberTime.getLotteryNumberSum() + channelData.getLotteryNumber());
            lotteryNumberTime.setLatestTimestamp(sourcePO.getEventTimestamp());
            cache.put(userId,lotteryNumberTime);
        }
    }

    @Data
    @NoArgsConstructor
    @AllArgsConstructor
    static class LotteryNumberTime {
        private Long lotteryNumberSum;
        private Long latestTimestamp;
    }

    /**
     * mock flink 非法数据过滤器
     */
    private boolean sourcePOFilter(SourcePO sourcePO) {
        if (sourcePO == null) {
            log.warn("《数据源过滤器》：sourcePO对象-数据源为空，计算跳过");
            return false;
        }
        ChannelDataPO channelData = sourcePO.getChannelData();
        if (channelData == null) {
            log.warn("《数据源过滤器》：渠道对象-数据为空，计算跳过");
            return false;
        }
        if (!StringUtils.hasText(channelData.getUserId())) {
            log.warn("《数据源过滤器》：userId字段-用户ID为空，计算跳过");
            return false;
        }
        return true;
    }

    /**
     * 定时打印mock Flink的计算结果
     */
    private void timedPrinting() {
        // 创建一个 Timer 实例
        Timer timer = new Timer();

        // 创建一个 TimerTask 实例
        TimerTask task = new TimerTask() {
            @Override
            public void run() {
                log.info("cache: {}", JsonUtil.obj2JsonStr(cache.getAll()));
            }
        };

        // 设置打印时间间隔（例如，每5分钟打印一次）
        long delay = 0;  // 初始延迟时间（毫秒）
        long period = TimeUnit.SECONDS.toMillis(10);  // 重复间隔时间（毫秒）

        // 开始定时任务
        timer.scheduleAtFixedRate(task, delay, period);
    }

    /**
     * 从指定的 Kafka 主题中消费最后 N 条消息。
     *
     * 这个方法首先获取每个分区的最新 offset，然后设置消费的起始位置为 "最新的 offset 减去 N"。
     * 然后，它使用一个 KafkaConsumer 来消费来自这些分区的消息，直到消费了 N 条消息。
     * 如果在任何分区上达到最早的消息，它将只消费那些可用的消息。
     *
     * @param topicName 要消费消息的 Kafka 主题的名称。
     * @param offset 要消费的最后的消息数量。
     */
    @Override
    public List<String> tailKafkaTopic(String topicName, long offset) {
        Properties props = new Properties();
        props.put("bootstrap.servers", bootstrapServers);
        props.put("group.id", "loadTesterTailKafka");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) {
            // 获取主题的所有分区
            List<PartitionInfo> partitionInfos = consumer.partitionsFor(topicName);
            List<TopicPartition> partitions = partitionInfos.stream()
                    .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition()))
                    .collect(Collectors.toList());

            consumer.assign(partitions);

            for (TopicPartition partition : partitions) {
                long endOffset = consumer.endOffsets(Collections.singletonList(partition)).get(partition);
                long startOffset = Math.max(0, endOffset - offset);
                consumer.seek(partition, startOffset);
            }

            int numRecordsConsumed = 0;
            List<String> resultList = new ArrayList<>();
            while (numRecordsConsumed < offset) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));
                for (ConsumerRecord<String, String> record : records) {
                    resultList.add(StringUtil.format("offset = {}, key = {}, value = {}",
                            record.offset(), record.key(), record.value()));
                    numRecordsConsumed++;
                    if (numRecordsConsumed >= offset) {
                        break;
                    }
                }
            }
            return resultList;
        } catch (Exception e) {
            log.error("Failed to tail Kafka topic", e);
        }
        return null;
    }
}
