package net.self.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;

import java.util.Arrays;
import java.util.Properties;
/**
 * Created by zhenyu on 17-5-10.
 */

public class ExactlyOnceDynamicConsumer {

    private static final Logger logger = LoggerFactory.getLogger(ExactlyOnceDynamicConsumer.class);
//    private AccountSLP aslp;
//    private TieredPricingPolicySLP tppslp;
//    private FixedPricingPolicySLP fppslp;
    private JedisPool jedisPool;
    private static Producer<String, String> producer;

    /**
     */
    private void init(){
        String redis = System.getenv("A10_REDIS");
        String redis_password = System.getenv("A10_REDIS_PASSWORD");
        if(redis==null && redis_password==null){
            jedisPool = new JedisPool(new JedisPoolConfig(), "localhost",6379,60);
        }else{
            jedisPool = new JedisPool(new JedisPoolConfig(), redis,6379,60,redis_password);
        }
//        aslp = new AccountSLP(jedisPool);
//        tppslp = new TieredPricingPolicySLP(jedisPool);
//        fppslp = new FixedPricingPolicySLP(jedisPool);
        producer = SelfSignedKafkaProducer.createProducer();
    }

    public void readMessages() throws InterruptedException {
        init();
        KafkaConsumer<String, String> consumer = createConsumer();
        consumer.subscribe(Arrays.asList("__metadata"), new MyConsumerRebalancerListener(consumer));
        processRecords(consumer);
    }

    private static KafkaConsumer<String, String> createConsumer() {
        Properties props = new Properties();
        String brokerList = System.getenv("A10_BROKER_LIST");
        if ((brokerList) == null){
            props.put("bootstrap.servers", KafkaProducerConfig.BOOTSTRAP_SERVERS);
        } else {
            props.put("bootstrap.servers", brokerList);
        }

        // Below is a key setting to turn off the auto commit.
        String consumeGroup = "my-a10-app-18";

        props.put("group.id", consumeGroup);
        props.put("enable.auto.commit", "false");
        props.put("heartbeat.interval.ms", "2000");
        props.put("session.timeout.ms", "6001");

        // Control maximum data on each poll, make sure this value is bigger than the maximum single record size
        props.put("max.partition.fetch.bytes", "1400");

        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        return new KafkaConsumer<String, String>(props);
    }

    private void process(ConsumerRecord<String, String> record){
//        MetadataResponseWIthCommand resp = null;
//        try {
//            logger.info("handling {}", record.key());
//            ProcessMethodType processMethodType = ProcessMethodType.valueOf(record.key());
//            logger.info("handling {}", record.value());
//            switch (processMethodType) {
//                case ADDTIEREDPRICINGPOLICY:
//                    resp = tppslp.add(record);
//                    break;
//                case UPDATETIEREDPRICINGPOLICY:
//                    resp = tppslp.update(record);
//                    break;
//                case ADDFIXEDPRICINGPOLICY:
//                    resp = fppslp.add(record);
//                    break;
//                case UPDATEFIXEDPRICINGPOLICY:
//                    resp = fppslp.update(record);
//                    break;
//                case REGISTER:
//                    resp = aslp.register(record);
//                    break;
//                case UPDATEACCOUNT:
//                    resp = aslp.update(record);
//                    break;
//                case RECHARGE:
//                    resp = aslp.recharge(record);
//                    break;
//                case LOCK:
//                    resp = aslp.lock(record);
//                    break;
//                case UNLOCK:
//                    resp = aslp.unLock(record);
//                    break;
//                case REMOVEACCOUNT:
//                    resp = aslp.removeAccount(record);
//                    break;
//                case ADDFREEGAS:
//                    resp = aslp.addFreeGas(record);
//                    break;
//                case RESETCYCLEVOLUME:
//                    resp = aslp.resetCycleVolume(record);
//                    break;
//                case REPLACEMETER:
//                    resp = aslp.replaceMeter(record);
//                    break;
//                case BACKUP:
//                    resp = new MetadataResponseWIthCommand(null,null);
//                    aslp.backup(record);
//                    break;
//                case REISSUE:
//                    resp = aslp.reissue(record);
//                    break;
//                default:
//                    throw new Exception("Unknown metadata command " + record.key());
//            }
//            if (null!=resp.getCommand()){
//                String topic = "command";
//                producer.send(new ProducerRecord<String, String>(topic, null, JSONUtil.objectToStr(resp.getCommand())));
//            }
//            if (null!=resp.getMetadataResponse()){
//                String topic = "__metadata_response";
//                producer.send(new ProducerRecord<String, String>(topic, null, JSONUtil.objectToStr(resp.getMetadataResponse())));
//            }
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
    }

    private void processRecords(KafkaConsumer<String, String> consumer) {

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> record : records) {
                process(record);
                System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value());
            }
        }
    }

    public static void main(String[] str) throws InterruptedException {
        ExactlyOnceDynamicConsumer consumer = new ExactlyOnceDynamicConsumer();
        consumer.readMessages();
    }


}

