package com.yc.streaming.app;

import java.util.HashMap;
import java.util.Map;

import kafka.common.TopicAndPartition;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;

import com.yc.streaming.common.AppConfig;
import com.yc.streaming.common.AppConstants;

import scala.Tuple2;

public class KafkaMessageCount {
	 public static void main(String[] args) {
	        String brokers = AppConfig.get(AppConstants.APP_KAFKA_BROKERS);
	        String orderToipics = AppConfig.get(AppConstants.APP_KAFKA_TOPIC_ORDER);
	        int duration = Integer.parseInt(AppConfig.get(AppConstants.APP_DURATION));
	        if(args!=null && 1==args.length) {
	            duration =  Integer.parseInt(args[0]);
	        }

	    	SparkConf conf = new SparkConf()
			.set("spark.streaming.unpersist", "true") //Spark来计算哪些RDD需要持久化，这样有利于提高GC的表现。
			.set("spark.default.parallelism", "8")	//reduceByKey 执行时启动的线程数，默认是8个
			.set("spark.storage.memoryFraction", "0.5")
	    	.set("spark.shuffle.consolidateFiles", "true")
	    	.set("spark.streaming.kafka.maxRatePerPartition", ""+10000);
	    	
	        JavaStreamingContext context = new JavaStreamingContext(conf, Durations.seconds(duration));
	        Map<String, String> kafkaParams = new HashMap<String, String>();
	        kafkaParams.put("metadata.broker.list", brokers);
	        kafkaParams.put("auto.offset.reset", "smallest");
	        
	        Map<TopicAndPartition,Long> offset = new HashMap<TopicAndPartition,Long>();
	        for(int i=0; i<18 ;i++){
	        	offset.put(new TopicAndPartition(orderToipics, i), 0L);
	        }
	       
	        JavaDStream<String> messages = KafkaUtils.<String,String,StringDecoder,StringDecoder,String>createDirectStream(
	        		context,  
	        		String.class,
	                String.class,
	                StringDecoder.class,
	                StringDecoder.class, 
	                String.class,
	                kafkaParams,
	                offset,
	                new Function<MessageAndMetadata<String, String>,String>(){
		 				private static final long serialVersionUID = 1L;
						@Override
						public String  call(MessageAndMetadata<String, String> v1) throws Exception {
			               return v1.message();
						}}
	        		).persist(StorageLevel.MEMORY_AND_DISK_SER());
	        
	        JavaPairDStream<String, String> orderLines_update = messages.mapToPair(new PairFunction<String, String, String>() {
				private static final long serialVersionUID = 1L;
				@Override
				public Tuple2<String, String> call(String t) throws Exception {
					String line = t;
	                String[] items = line.split("\001");
	                String id = items[0];
	                String op_type = items[86].trim();
	                if("UPDATE".equals(op_type)){
	                	return new Tuple2<String, String>(id, "1");
	                }
	                return new Tuple2<String, String>(id, null);
	            }
			}).filter(new Function<Tuple2<String, String>, Boolean>() {
	        	private static final long serialVersionUID = 1L;
				@Override
				public Boolean call(Tuple2<String, String> t) throws Exception {
	                return t._2() != null;
				}
	        });
	        
	        JavaPairDStream<String, String> orderLines_DELETE = messages.mapToPair(new PairFunction<String, String, String>() {
				private static final long serialVersionUID = 1L;
				@Override
				public Tuple2<String, String> call(String t) throws Exception {
					String line = t;
					String[] items = line.split("\001");
					String id = items[0];
	                String op_type = items[86].trim();
	                if("DELETE".equals(op_type)){
	                	return new Tuple2<String, String>(id, "1");
	                }
	                return new Tuple2<String, String>(id, null);
	            }
			}).filter(new Function<Tuple2<String, String>, Boolean>() {
	        	private static final long serialVersionUID = 1L;
				@Override
				public Boolean call(Tuple2<String, String> t) throws Exception {
	                return t._2() != null;
				}
	        });
	        
	        

	        JavaPairDStream<String, Long> orderLines_optype = messages.mapToPair(new PairFunction<String, String, Long>() {
				private static final long serialVersionUID = 1L;
				@Override
				public Tuple2<String, Long> call(String t) throws Exception {
					String line = t;
	                String[] items = line.split("\001");
	                String id = items[0];
	                String op_type = items[86];
	                	return new Tuple2<String, Long>(op_type, 1L);
	            }
			}).reduceByKey(new Function2<Long, Long, Long>() {
				private static final long serialVersionUID = 1L;
				@Override
				public Long call(Long arg0, Long arg1) throws Exception {
					return arg0 + arg1;
				}
			});
	        
	        messages.count().print();
	        orderLines_update.count().print();
	        orderLines_DELETE.count().print();
	        orderLines_optype.print();
	        
	        context.start();
	        context.awaitTermination();
	        context.stop();
	        context.close();
	        		
	    }
}
