package com.hb.resource.service.impl;

import com.fasterxml.jackson.databind.JsonNode;
import com.hb.common.utils.JsonUtil;
import com.hb.common.utils.http.HttpUtils;
import com.hb.resource.service.BusinessService;
import com.hb.resource.utils.KafkaUtil;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Service;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;

import java.io.IOException;
import java.util.Iterator;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.ExecutionException;

/**
 * @author hb
 * @date 2023/7/08
 * 图书数据源处理
 */

@Service("libraryService")
public class LibraryServiceImpl implements BusinessService {

    @Autowired
    @Qualifier("kafkaTemplate")
    private KafkaTemplate kafkaTemplate;

    @Value("${kafka.topic.gupiao}")
    private String topic;

    @Value("${data.resource.library-url: https://openlibrary.org/browse.json?q=&subject=textbooks&sorts=lending___last_browse+desc&limit=18&page=100}")
    private String url;

    @Override
    public String detailProcess(String url, String requestWay) throws IOException, ExecutionException, InterruptedException {
        //CloseableHttpResponse response = HttpUtils.sendRequest(url, requestWay);
        //将返回值转json格式
        String response = HttpUtils.sendGet(this.url, "");

        JsonNode jsonNode = JsonUtil.toJsonNode(response);
        Iterator<JsonNode> elements = jsonNode.elements();

        //创建topic
//        KafkaUtil.autoCreateTopic(topic, 3, (short) 1);
//
//        while (elements.hasNext()) {
//            JsonNode body = elements.next();
//            //保存至kafka
//            ListenableFuture send = kafkaTemplate.send(topic, body.toString());
//
//            send.addCallback(new ListenableFutureCallback() {
//
//                @Override
//                public void onSuccess(Object result) {
//                    //打印日志
//                }
//
//                @Override
//                public void onFailure(Throwable ex) {
//                    //打印日志
//                }
//            });
//        }
        return "处理图书服务";
    }

    @SuppressWarnings("InfiniteLoopStatement")
    public static void main( String[] args) throws Exception {
        args = new String[]{"-b", "bootstrap-servers", "-s", "asd", "-c", "sada", "-config-file" ,"dsf"};

        final Options opts = new Options();
        opts.addOption(Option.builder("b")
                .longOpt("bootstrap-servers").hasArg().desc("Kafka cluster bootstrap server string").build()
                )
                .addOption(Option.builder("s")
                        .longOpt("schema-registry").hasArg().desc("Schema Registry URL").build()
                )
                .addOption(Option.builder("c")
                        .longOpt("config-file")
                        .hasArg().desc("Java properties file with configurations for Kafka Clients").build()
                )
                .addOption(Option.builder("h")
                        .longOpt("help").hasArg(false).desc("Show usage information").build());
        Object[] objects = opts.getOptions().toArray();
        System.out.println(objects);

        final CommandLine cl = new DefaultParser().parse(opts, args);

        final String schemaRegistryUrl = cl.getOptionValue("schema-registry", "asd");

        final Properties defaultConfig = Optional.ofNullable(cl.getOptionValue("config-file", "null"))
                .map(path -> {
                    System.out.println("path: "+path);
                    return     new Properties();
                })
                .orElse(new Properties());

        System.out.println(defaultConfig);
//        final CommandLine cl = new DefaultParser().parse(opts, args);
//
//        final String bootstrapServers = cl.getOptionValue("b", DEFAULT_BOOTSTRAP_SERVERS);
//        final String schemaRegistryUrl = cl.getOptionValue("schema-registry", DEFAULT_SCHEMA_REGISTRY_URL);
//
//        final Properties defaultConfig = Optional.ofNullable(cl.getOptionValue("config-file", null))
//                .map(path -> {
//                    try {
//                        return buildPropertiesFromConfigFile(path);
//                    } catch (final IOException e) {
//                        throw new RuntimeException(e);
//                    }
//                })
//                .orElse(new Properties());
//
//        final Properties props = new Properties();
//        props.putAll(defaultConfig);
//        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
//        props.put(ConsumerConfig.GROUP_ID_CONFIG, "test-customers");
//        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
//        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
//        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//        props.put(AbstractKafkaSchemaSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaRegistryUrl);
//        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Topics.CUSTOMERS.keySerde().deserializer().getClass());
//        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Topics.CUSTOMERS.valueSerde().deserializer().getClass());
//        MonitoringInterceptorUtils.maybeConfigureInterceptorsConsumer(props);
//
//        try (final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) {
//            consumer.subscribe(Collections.singletonList("customers"));
//
//            while (true) {
//                // poll returns right away when there is data available.
//                // the timeout is basically configuring "long poll" behavior, how long to keep checking when there
//                // is no data available, so it's better to poll for a longer period of time
//                final ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(30));
//                for (final ConsumerRecord<String, String> record : records) {
//                    System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
//                }
//            }
//        }
    }
}
