package com.aotain.coeus.spark;

import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;

import scala.Tuple2;

/**
 * CU 原始文件入库 
 * 2016/02/26  create
 * @author Administrator
 *
 */
public class CUFileImport {
	public static void main(String[] args) {
		   //System.out.println("0###OK################################");
		
		if (args.length != 2){
		   System.err.printf("Usage: %s <ZServer><Topic>");
		   System.exit(1);
		}          
		
		//HTTPDCSpark spark = new HTTPDCSpark();
		int nexit = SparkStreaming(args);
		   //System.out.println("1###OK################################");
	}
	
	public static int SparkStreaming(final String[] args){
		
		String ZServer = args[0];
		String topicss = args[1];
		
		String tableName = "test";// args[3];
		String columnFamily = "f";// args[4];
			    
		
		 String zkQuorum = ZServer;
		 String group = "test-consumer-group";
		 
		 String numThread = "2";
		    
			    
		//首先我们创建一个JavaStreamingContext对象， 它是处理流的功能的主入口. 
		//我们创建了一个本地的StreamingContext， 使用两个线程, 批处理间隔为1秒.
		SparkConf conf = new SparkConf().setAppName("CU Access Log Import");
		JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(60));
		
		final Broadcast<String> broadcastTableName =
		        jssc.sparkContext().broadcast(tableName);
		final Broadcast<String> broadcastColumnFamily =
		        jssc.sparkContext().broadcast(columnFamily);
		    
		int numThreads = Integer.parseInt(numThread);
	    Map<String, Integer> topicMap = new HashMap<String, Integer>();
	    String[] topics = topicss.split(",");
	    for (String topic : topics) {
	    	topicMap.put(topic, numThreads);
	    }
		    
		//使用这个context, 我们可以创建一个DStream， 代表来自TCP源的流数据。需要指定主机名和端口(如 localhost 和 9999).
		//JavaReceiverInputDStream<String> lines = jssc.socketTextStream(IP, Integer.parseInt(port));
		
		JavaPairReceiverInputDStream<String, String> messages =
		        KafkaUtils.createStream(jssc, zkQuorum, group, topicMap);

		JavaPairDStream<String,String>  lines =
				messages.mapToPair(new PairFunction<Tuple2<String,String>, String, String>() {
		          @Override
		          public Tuple2<String, String> call(Tuple2<String, String> tuple2) {
		        	  //得到行记录
//		     		 /*
//		     	     *      houseid 机房编号
//		     				sourceip 源IP
//		     				destip 目标IP
//		     				协议类型
//		     				sourceport 源端口
//		     				destport 目标端口
//		     				domainname 域名
//		     				url URL
//		     				Duration 时长
//		     				accesstime 访问时间
//		     	     */
		        	  String[] items = tuple2._2().split("\\|");
		        	  long accesstime = Long.parseLong(items[9]);
		        	  SimpleDateFormat sdfDate = new SimpleDateFormat("yyyyMMdd");
		        	  SimpleDateFormat sdfHour = new SimpleDateFormat("HH");
		        	  Date dStartTime = new Date(accesstime*1000L);
		        	  
		        	  String Date = sdfDate.format(dStartTime);
		        	  String Hour = sdfHour.format(dStartTime);
		        	  
		        	  Tuple2<String, String> tuple 
		        	  		= new Tuple2<String, String>(Date + "/" + Hour, tuple2._2);
		        	  
		        	  
		        	  return tuple;
		          }
		        });
		    
		
				
		//单词DStream 被mapped (one-to-one transformation) 成*(word, 1)对*的DStream ,
		//然后reduced 得到每一批单词的频度. 最后， wordCounts.print()会打印出每一秒产生的一些单词的统计值。 
		//注意当这些行执行时，Spark Streaming仅仅设置这些计算， 它并没有马上被执行。 当所有的计算设置完后，我们可以调用下面的代码启动处理
		jssc.start(); // Start the computation
		jssc.awaitTermination(); // Wait for the computation to terminate
				
		return 0;
	}
}
