package org.apache.ignite.examples.datagrid;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import org.apache.ignite.*;
import org.apache.ignite.cache.CacheMode;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.examples.model.Stock;

import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;


/**
 * Created by xbkaishui on 16/8/7.
 */
public class CacheKafkaStreamTest {

    public static void main(String[] args) throws IgniteException {


        String CACHE_NAME = "KLINE_DAY_CACHE";

        try (Ignite ignite = Ignition.start("config/example-ignite.xml")) {
            System.out.println();
            System.out.println(">>> Cache data streamer example started.");

            ignite.destroyCache(CACHE_NAME);


            CacheConfiguration<String, Stock> stockCfg = new CacheConfiguration<>(CACHE_NAME);
            stockCfg.setCacheMode(CacheMode.PARTITIONED); // Default.
            stockCfg.setIndexedTypes(String.class, Stock.class);
            // Auto-close cache at the end of the example.
            try (IgniteCache<String, Stock> cache = ignite.getOrCreateCache(stockCfg)) {

                long start = System.currentTimeMillis();

                IgniteDataStreamer<String, Stock> stmr = ignite.dataStreamer(CACHE_NAME);
                // Configure loader.
                stmr.perNodeBufferSize(1024);
                stmr.perNodeParallelOperations(8);
                runKafkaStream(stmr);

                long end = System.currentTimeMillis();

                System.out.println(">>> Loaded  keys in " + (end - start) + "ms.");
            } finally {
                // Distributed cache could be removed from cluster only by #destroyCache() call.
//                ignite.destroyCache(CACHE_NAME);
            }
        }
    }

    /**
     * 收集kafka stream
     *
     * @param stmr
     */
    private static void runKafkaStream(IgniteDataStreamer<String, Stock> stmr) {
        String topic = "topic.out.KLine_Day";
        int partitionsNum = 3;
        //init kafka zk
        Properties kafkaConf = new Properties();
        kafkaConf.put("zookeeper.connect", "ZJ-HZ-BN01-KAFKA-02:2181");
        kafkaConf.put("group.id", "ig_kf_test4");
        kafkaConf.put("auto.offset.reset", "smallest");
        kafkaConf.put("auto.commit.enable", "true");
        kafkaConf.put("auto.commit.interval.ms", "60000");
        kafkaConf.put("zookeeper.connectiontimeout.ms", "1000000");


        ConsumerConfig config = new ConsumerConfig(kafkaConf);
        ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);
        Map<String, Integer> topics = new HashMap<String, Integer>();
        topics.put(topic, partitionsNum);
        Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topics);
        ExecutorService threadPool = Executors.newFixedThreadPool(partitionsNum);
        List<KafkaStream<byte[], byte[]>> partitions = streams.get(topic);
        for (KafkaStream<byte[], byte[]> partition : partitions) {
            threadPool.execute(new MessageRunner(partition, stmr));
        }

        try {
            threadPool.awaitTermination(30, TimeUnit.DAYS);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

    }

    static class MessageRunner implements Runnable {
        private KafkaStream<byte[], byte[]> partition;
        private IgniteDataStreamer<String, Stock> stmr;

        MessageRunner(KafkaStream<byte[], byte[]> partition, IgniteDataStreamer<String, Stock> stmr) {
            this.partition = partition;
            this.stmr = stmr;
        }

        public void run() {
            ConsumerIterator<byte[], byte[]> iterator = partition.iterator();
            while (iterator.hasNext()) {
                //connector.commitOffsets();手动提交offset,当autocommit.enable=false时使用
                MessageAndMetadata<byte[], byte[]> item = iterator.next();
                try {
                    String data = new String(item.message(), "utf-8");
                    Stock st = Stock.parseStock(data);
                    stmr.addData(st.getsCode() + "_" + st.getMarketType()+System.currentTimeMillis(), st);
//                    System.out.println("added " + st);
                } catch (UnsupportedEncodingException e) {
                    e.printStackTrace();
                }
            }
        }
    }

}
