package com.bigseller.study.consumer;

import com.bigseller.study.model.ImageObj;
import com.bigseller.study.utils.JSONUtil;
import com.sun.deploy.net.HttpUtils;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.Response;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.config.SslConfigs;
import sun.misc.IOUtils;
import sun.security.provider.MD5;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

public class SimpleConsumer {

    private static ThreadPoolExecutor orderPackPool = new ThreadPoolExecutor(10, 10, 2000, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>());

    private static final String BASE_OUT_DIR = "D:\\youdaoyun\\workspace_kafka\\kafka-study\\kafka-test\\src\\main\\resources\\out";

    public static void main(String[] args) {

        Properties properties = getConsumerProperties("bigseller.pic.group", 10);

        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);

        consumer.subscribe(Collections.singletonList("topic-pics"));
        while (true) {
            try {
                ConsumerRecords<String, String> records = consumer.poll(1000);
                //必须在下次poll之前消费完这些数据, 且总耗时不得超过SESSION_TIMEOUT_MS_CONFIG
                //建议开一个单独的线程池来消费消息，然后异步返回结果
                /**
                 * kafka 设置了 max.poll.interval.ms 决定了获取消息后提交偏移量的最大时间
                 * 超过设定时间没有提交偏移量 服务端 也会认为该消费者失效
                 * session.timeout.ms 超过这个时间服务端没有收到心跳,也会认为当前消费者失效
                 *
                 * 一旦认定当前消费者失效,就会移除当前消费者 则会触发reblance
                 *
                 * 要避免 Rebalance，还是要从 Rebalance 发生的时机入手。我们在前面说过，Rebalance 发生的时机有三个：
                 *
                 * 组成员数量发生变化
                 * 订阅主题数量发生变化
                 * 订阅主题的分区数发生变化
                 *
                 */

                int recordCount = records.count();
                System.out.println("拉取:数据量>"+recordCount);
                if (recordCount > 0) {
                    CountDownLatch countDownLatch = new CountDownLatch(recordCount);
                    for (ConsumerRecord<String, String> record : records) {
                        String value = record.value();
                        long id = System.currentTimeMillis();
                        ImageObj imageObj = JSONUtil.jsonToObject(value, ImageObj.class);
                        System.out.println("id" + id + "获取数据:" + imageObj.getRegular());
                        try {
                            orderPackPool.execute(() -> {
                                try {
                                    downloadFile(imageObj,id);
                                } catch (Exception e) {
                                    e.printStackTrace();
                                }
                                countDownLatch.countDown();
                            });
                        } catch (Exception e) {
                            e.printStackTrace();
                            countDownLatch.countDown();
                            continue;
                        }
                    }
                    //等待线程池任务结束
                    try {
                        countDownLatch.await();
                        System.out.println("___end---------------------------------------------------------------------------------");
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                    consumer.commitAsync();
                }
            } catch (Exception e) {
                try {
                    Thread.sleep(1000);
                } catch (Throwable ignore) {
                }
                e.printStackTrace();
            }
        }
    }

    private static void downloadFile(ImageObj imageObj,Long id) {
        String regularUrl = imageObj.getRegular();
        OkHttpClient client = new OkHttpClient();
        Request request = new Request.Builder()
                .url(regularUrl)
                .build();
        long idd = (long)(Math.random()*100000000);
        try (Response response = client.newCall(request).execute();

             FileOutputStream fileOutputStream = new FileOutputStream(new File(BASE_OUT_DIR+"/"+id+"_"+idd+".png"))) {
            byte[] bytes = response.body().bytes();
            fileOutputStream.write(bytes);
        } catch (IOException e) {
            e.printStackTrace();
        }


    }

    private static Properties getConsumerProperties(String groupId, int maxPoll) {

        Properties props = new Properties();
        System.setProperty("java.security.auth.login.config", SimpleConsumer.class.getResource("../../../../kafka_client_jaas.conf").getPath());
        //接入协议，目前支持使用SASL_SSL协议接入
        props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        //SASL鉴权方式，保持不变
        props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");

        //设置接入点，请通过控制台获取对应Topic的接入点
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "124.70.208.68:9092,124.70.208.68:9093,124.70.208.68:9094");

        //消息的反序列化方式
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        //设置需要手动提交
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        //两次poll之间的最大允许间隔
        //可更加实际拉去数据和客户的版本等设置此值，默认30s
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 30000);
        //每次poll的最大数量
        //注意该值不要改得太大，如果poll太多数据，而不能在下次poll之前消费完，则会触发一次负载均衡，产生卡顿
        if (maxPoll == 0) {
            maxPoll = 30;
        }
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPoll);
        //当前消费实例所属的消费组，请在控制台申请之后输入
        //属于同一个组的消费实例，会负载消费消息
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);

        return props;

    }
}
