package com.sgcc.dlsc.kafka.consumer;

import com.sgcc.dlsc.kafka.consumer.entity.BatchConsumeTask;
import com.sgcc.dlsc.kafka.consumer.util.ResultUtil;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.http.*;
import org.springframework.stereotype.Component;
import org.springframework.web.client.RestTemplate;

import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.*;

@Component
public class Consumer {
    private static final Logger LOGGER = LogManager.getLogger(Consumer.class);
    @Resource
    private KafkaConsumer<String, String> kafkaConsumer;
    @Resource
    private RestTemplate restTemplate;
    private static String url = "http://localhost:16000/syncLog";
    private HttpHeaders headers = new HttpHeaders();

    private ExecutorService executorService;
    private Future<?> future;
    int count = 0;
    private ConcurrentLinkedQueue<ConsumerRecords<String, String>> recordsQueue;
    //    @PostConstruct
    public void consume() {
        executorService = Executors.newSingleThreadExecutor();
        kafkaConsumer.subscribe(Collections.singletonList("audit-dev"));
        ConcurrentLinkedQueue<Map<TopicPartition, OffsetAndMetadata>> concurrentLinkedQueue = new ConcurrentLinkedQueue<>();
        ConcurrentMap<TopicPartition, OffsetAndMetadata> concurrentMap = new ConcurrentHashMap<>();
        long startTime = System.nanoTime();
        boolean polling = true;
        while (true) {
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            Set<TopicPartition> assignment = kafkaConsumer.assignment();
            Set<TopicPartition> partitions = records.partitions();

            future = executorService.submit(new Runnable() {
                @Override
                public void run() {
                    for (TopicPartition partition : partitions) {
                        List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
//                LOGGER.info("开始消费分区:" + partition.partition());
                        for (ConsumerRecord<String, String> record : partitionRecords) {
                            String topic = record.topic();
                            String message = record.value();
                            Map<String, String> data = new HashMap<>();
//                                LOGGER.info(topic + ":" + message);
                            data.put("topic", topic);
                            data.put("message", message);
                            headers.setContentType(MediaType.APPLICATION_JSON_UTF8);
                            HttpEntity<Map<String, String>> requestEntity = new HttpEntity<>(data, headers);
                            send(requestEntity, record, partition);
                        }
                    }
                }

                public void send(HttpEntity<Map<String, String>> requestEntity, ConsumerRecord<String, String> record, TopicPartition partition) {
                    try {
                        ResponseEntity<ResultUtil> responseEntity = restTemplate.exchange(url, HttpMethod.POST, requestEntity, ResultUtil.class);
//                                LOGGER.info(responseEntity);
                        int statusCodeValue = responseEntity.getStatusCodeValue();
                        if (statusCodeValue == 200) {
                            ResultUtil body = responseEntity.getBody();
                            if (body != null && body.getStatus() == 200) {
                                count++;
//                                        LOGGER.info("成功次数：" + count + "  返回：" + body.toString());
                                long offset = record.offset();
                                concurrentMap.put(partition, new OffsetAndMetadata(offset + 1));
                                concurrentLinkedQueue.add(concurrentMap);
                            } else {
                                LOGGER.error("消费发送失败，正在重试....");
                                send(requestEntity, record, partition);
                            }
                        } else {
                            LOGGER.error("消费发送失败，正在重试....");
                            send(requestEntity, record, partition);
                        }
                    } catch (Exception e) {
                        LOGGER.error("消费发送失败，正在重试....");
                        LOGGER.error(e.getMessage());
                        try {
                            Thread.sleep(5000);
                        } catch (InterruptedException e1) {
                            LOGGER.error(e1);
                        }
                        send(requestEntity, record, partition);
                    }

                }
            });
//
            LOGGER.info("record.size:" + records.count() + "  ---  Queue.size:" + concurrentLinkedQueue.size());
            while (concurrentLinkedQueue.size() >= 1) {
                Map<TopicPartition, OffsetAndMetadata> offsets = concurrentLinkedQueue.poll();
                kafkaConsumer.commitSync(offsets);
            }
            if (future != null && future.isDone()) {
                long endTime = System.nanoTime();
                long duration = (endTime - startTime);
                LOGGER.info(duration);

            }

        }
    }

    @PostConstruct
    public void batchConsume() {
        executorService = Executors.newSingleThreadExecutor();
        kafkaConsumer.subscribe(Collections.singletonList("audit-dev"));
        ConcurrentLinkedQueue<Map<TopicPartition, OffsetAndMetadata>> offsetQueue = new ConcurrentLinkedQueue<>();
        ConcurrentMap<TopicPartition, OffsetAndMetadata> offsetMap = new ConcurrentHashMap<>();
        recordsQueue=new ConcurrentLinkedQueue<>();
        boolean isPolling=true;
        executorService.submit(new BatchConsumeTask(recordsQueue,restTemplate,offsetQueue,offsetMap));

        while (true) {
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            Set<TopicPartition> assignment = kafkaConsumer.assignment();
            if (!records.isEmpty()){
                recordsQueue.offer(records);
            }
            LOGGER.info("recordsQueue:"+recordsQueue.size()+"  records.size:"+records.count()+
                    "  offsetQueue.size:" + offsetQueue.size());
            if (recordsQueue.size()<10){
                if (!isPolling){
//                    LOGGER.info("继续拉取数据...");
                    kafkaConsumer.resume(assignment);
                    isPolling=true;
                }
            }else {
//                LOGGER.info("队列已满，暂停拉取数据...");
                if (isPolling){
                    kafkaConsumer.pause(assignment);
                    isPolling=false;
                }
            }
            while (offsetQueue.size() >= 1) {
                Map<TopicPartition, OffsetAndMetadata> offsets = offsetQueue.poll();
                kafkaConsumer.commitSync(offsets);
            }


        }
    }
}
