package com.tyc.kafka.consumer;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;

import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;

/**
 * 类描述
 *
 * @author tyc
 * @version 1.0
 * @date 2022-10-19 13:52:17
 */
@Component("consumerTest")
@Slf4j
public class Consumer implements InitializingBean {
    @Autowired
    private KafkaConsumer<String, String> kafkaConsumer;

    @Autowired
    private ThreadPoolTaskExecutor taskExecutor;

    public void doConsume(){
        while (true){
            String key = "";
            String minOffset = "";
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofSeconds(1));
            if(null == records || records.isEmpty()){
                return;
            }
            List<String> tasks = new ArrayList<>();
            List<String> offsets = new ArrayList<>();
            // 先解析记录未数据提交
            for (ConsumerRecord<String, String> record : records) {
                tasks.add(record.value());
                // todo 先把待处理数据记录到 redis 一批数据作为一个key hash结构 key为offset value为数据
                String offset = String.valueOf(record.offset());
                if(StringUtils.isEmpty(key)){
                    minOffset = offset;
                }
                if(StringUtils.isEmpty(key)){
                    key = "unDoTasks:"+record.topic()+":"+record.partition()+":"+minOffset;
                }
                offsets.add(offset);
                // 记录到redis
                kafkaConsumer.commitAsync();
                String finalKey = key;
                taskExecutor.execute(()->{
                    doTask(tasks, finalKey,offsets);
                });
            }
        }
    }

    /**
     *
     * @param tasks 待处理任务
     * @param key 待处理任务在redis中的key值
     * @param offsets 待处理任务在redis中的value值
     */
    public void doTask(List<String> tasks,String key,List<String> offsets){
        try {
            for (String task : tasks) {
                TimeUnit.MILLISECONDS.sleep(50);
                // todo 完成任务后删除对应数据或任务全部完成后删除对应key
            }
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    @Override
    public void afterPropertiesSet() throws Exception {
        // todo 从redis中获取还未处理的任务执行
    }

//    public void doConsume(){
//        while (true){
//            // 每隔1秒拉取一次数据
//            ConsumerRecords<String, String> records = kafkaConsumer.poll(1000);
//            log.info("拉取数据量：{}",records.count());
//            try {
//                System.out.println(i);
//                TimeUnit.MINUTES.sleep(i%10);
//            } catch (InterruptedException e) {
//                e.printStackTrace();
//            }
//            for (ConsumerRecord<String, String> record : records) {
//                log.info("消费数据：{}",record.value());
//                try {
//                    // 测试重复消费
//                    TimeUnit.SECONDS.sleep(5);
//                } catch (InterruptedException e) {
//                    e.printStackTrace();
//                }
//            if(!records.isEmpty()){
//                taskExecutor.execute(()->{
//                    for (ConsumerRecord<String, String> record : records) {
//                        log.info("消费数据：{}",record.topic()+"_"+record.partition()+":"+record.offset());
//                        try {
//                            TimeUnit.SECONDS.sleep(10);
//                        } catch (InterruptedException e) {
//                            e.printStackTrace();
//                        }
//                    }
//                });
//            }
//        }
//    }
}
