package com.sailing.lianxi.kafka;

import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Properties;
import java.util.UUID;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;

import com.sailing.lianxi.common.Constants;
import com.sailing.lianxi.common.ObjTool;
import com.sailing.lianxi.entity.UserInfo;
import com.sun.org.apache.bcel.internal.generic.NEW;
/**
 * 生产者
 * @author Administrator
 *
 */
public class MyProducer {
     
	/**
	 * 发送字符串数据
	 */
	public static void sendStringData(){
		  Producer<String, String> producer =null;
		try {
			Properties props = new Properties();
			//kafka服务器地址  
			props.put("bootstrap.servers",Constants.KAFKA_SERVERS);
			//
			props.put("client.id", "DemoProducer");
			/*  acks=0 生产者不会等待服务端的确认
	            acks=1 只会等待leader分区的确认
	            acks=all或者acks=-1 等待leader分区和follower分区的确认
	         */
			props.put("acks", "all");
			//生产者发送失败后，重试的次数
			props.put("retries", 0);
			//生产者每一批提交消息大小--批量发送必须1
			props.put("batch.size", 16384); //16M
			 /**
			  * 每隔10ms发送一次(不设置：默认立即发送)--批量发送必须2
			  * 这个时候相当于发送完一条数据sleep 10ms
			  */
			props.put("linger.ms", 10);
			//生产者缓冲大小，当缓冲区耗尽后，额外的发送调用将被阻塞。时间超过max.block.ms将抛出TimeoutException
	        props.put("buffer.memory", 33554432);//32M
	        //kafka消息key序列化类 若传入key的值，则根据该key的值进行hash散列计算出在哪个partition上  
	        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
	        //kafka消息序列化类 即将传入对象序列化为字节数组  
	        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
	        //自定义分区
//	        props.put("partitioner.class", "com.sailing.lianxi.kafka.MyPartition");
	        //创建kafka的生产者类
	        producer = new KafkaProducer<String,String>(props);
	        //发送消息
	        for(int i=0;i<3;i++){
	        	//如果不是创建topic时指定分区数，在生产时创建topic 则按照配置文件中配置的分区数和备份数创建topic
	        	int partition =i%Constants.KAFKA_PARTITION_NUM; 
	        	String msg = "a";
	        	ProducerRecord<String, String> message = new ProducerRecord<String, String>(Constants.TOPIC_NAME,partition,String.valueOf(i),msg+i); 
	        	//发送消息
	        	RecordMetadata recordMetadata =producer.send(message,new Callback(){
					public void onCompletion(RecordMetadata metadata,
							Exception exception) {
							if(null!=exception){
								System.out.println("插入记录报错,分区："+metadata.partition()+",offset:"+metadata.offset());
								System.out.println(exception.getMessage());
							}else {
								System.out.println("分区："+metadata.partition()+",offset:"+metadata.offset());
							}
						
					}}).get();
	        }
	        System.out.println("生产数据完成。。");
		} catch (Exception e) {
			e.printStackTrace();
		}finally{
			if(null!=producer){
				producer.close();
			}
		}
	}
	
	public static void sendByteData(){
	    Producer<String, String> producer =null;
        try {
            Properties props = new Properties();
            //kafka服务器地址  
            props.put("bootstrap.servers",Constants.KAFKA_SERVERS);
          
            props.put("client.id", "DemoProducer");
            /* acks=0 客户端不会等待服务端的确认
                acks=1 只会等待leader分区的确认
                acks=all或者acks=-1 等待leader分区和follower分区的确认
             */
            props.put("acks", "all");
            //生产者发送失败后，重试的次数
            props.put("retries", 0);
            //生产者批量提交消息大小--批量提交必须
            props.put("batch.size", 16384); //16M
             //默认立即发送，这里这是发送的延时毫秒数--批量提交必须
            props.put("linger.ms", 10);
            //生产者缓冲大小，当缓冲区耗尽后，额外的发送调用将被阻塞。时间超过max.block.ms将抛出TimeoutException
            props.put("buffer.memory", 33554432);//32M
            //kafka消息key序列化类 若传入key的值，则根据该key的值进行hash散列计算出在哪个partition上  
            props.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
            //kafka消息序列化类 即将传入对象序列化为字节数组  
            props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
            
            //创建kafka的生产者类
            producer = new KafkaProducer(props);
            //发送消息
            for(int i=0;i<10;i++){
                ProducerRecord message = new ProducerRecord<byte[], byte[]>(Constants.TOPIC_NAME,0,ObjTool.ObjectToByte(i),ObjTool.ObjectToByte(getUserInfo(i))); 
                //发送消息
                RecordMetadata recordMetadata =(RecordMetadata) producer.send(message).get();
                System.out.println("分区："+recordMetadata.partition()+"offset:"+recordMetadata.offset());
            }
            System.out.println("生产数据完成。。");
        } catch (Exception e) {
            e.printStackTrace();
        }finally{
            if(null!=producer){
                producer.close();
            }
        }
	}
	 static SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); 
	public static UserInfo getUserInfo(int index){
	    UserInfo userInfo = new UserInfo();
	    userInfo.setUuid("XS_"+index+""+UUID.randomUUID());
	    userInfo.setUserName("张三"+index);
	    userInfo.setBirth(sdf.format(new Date()));
	    userInfo.setAge(index);
	    return userInfo;
	}
	
	public static void main(String[] args) {
		sendStringData();
	}
}
