package dev_ops.tools.kafka;

import java.util.Properties;
import java.util.Random;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.dev_common.func.Functions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class KafkaProdPerformance {
    public static final Logger log = LoggerFactory.getLogger(KafkaProdPerformance.class);
    public static void main(String[] args) {
        send_Multi();
    }

    public static void send_Multi() {
        Properties props = Prop_env_Test();
        Producer<String, byte[]> producer = new KafkaProducer<String, byte[]>(props);
//        for (int i = 0; i < 1; i++) {
//            new Thread(new Runnable() {
//                @Override
//                public void run() {
//                    
//                }
//            }).start();
//        }
        final Random snRandom = new Random();
        int size = 0;
        long start = System.currentTimeMillis();
        while (size++ <1000) {
            Functions.sleep(20);
            ProducerRecord<String, byte[]> record = newRecord_Test("zhangxin-server", "user" + snRandom.nextInt(5),
                    "{\"bizType\":\"user\",\"versionCode\":138}".getBytes());
            try {
                producer.send(record, new Callback() {
                    public void onCompletion(RecordMetadata metadata, Exception e) {
                        if (e != null) {
                            e.printStackTrace();
                        }
                        log.info("The offset is: " + metadata.offset());
                    }
                });
            } catch (Exception e) {
                e.printStackTrace();
                break;
            }
        }
        log.info("cost:"+(System.currentTimeMillis()-start));
        Functions.sleep(120_000);
    }

    public static void send_more() {
        Properties props = Prop_env_Test();
        Producer<String, byte[]> producer = new KafkaProducer<String, byte[]>(props);
        for (int i = 0; i < 2; i++) {
            ProducerRecord<String, byte[]> record = newRecord_Test("zhangxin-server", "user",
                    "{\"bizType\":\"user\",\"bizAction\":\"headImg\",\"m\":\"test msg\",\"request_time\":\"2017-10-19 18:40:39,258\",\"versionCode\":138}"
                            .getBytes());
            try {
                producer.send(record);
            } catch (Exception e) {
                e.printStackTrace();
            }
            System.out.println(producer.metrics().toString());
        }
    }

    public static Properties Prop_env_Test() {
        Properties props = newCommonEnv();
        props.put("bootstrap.servers", "10.136.24.76:9091");
        // props.put("bootstrap.servers", "10.136.24.76:9092");
        return props;
    }

    /**
     * common settings, better not change.
     * 
     * @return
     */
    public static Properties newCommonEnv() {
        Properties props = new Properties();
        props.put("acks", "1");
        props.put("retries", 2);
        props.put("request.timeout.ms", 10000);
        props.put("batch.size", 1024 * 1024 * 2);// 1024 * 1024 * 2
        props.put("linger.ms", 3);
        props.put("buffer.memory", 1024 * 1024 * 4);//1024 * 16
        props.put("max.request.size", 2097152);
        props.put("metadata.fetch.timeout.ms", 5000); // 15_000
        props.put("key.serializer", KafkaUtils.String_ser);
        props.put("value.serializer", KafkaUtils.ByteArray_ser);
        // props.put("partitioner.class", "dev_ops.tools.ForceRandomPartitioner");
        return props;
    }

    public static <R, S> ProducerRecord<R, S> newRecord_Test(String topic, R key, S msg) {
        ProducerRecord<R, S> record = new ProducerRecord<R, S>(topic, null, System.currentTimeMillis(), key, msg);
        return record;
    }

}
