package org.eking.bigdata.spark;

import java.util.*;
import java.util.regex.Pattern;

import org.apache.spark.SparkConf;
import org.apache.spark.TaskContext;
import org.apache.spark.api.java.*;
import org.apache.spark.api.java.function.*;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.Time;
import org.apache.spark.streaming.api.java.*;
import org.apache.spark.streaming.dstream.ReceiverInputDStream;
import org.apache.spark.streaming.kafka.*;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import scala.Tuple2;

import scala.Tuple2;
import org.apache.spark.streaming.kafka.*;

public class SparkStreamTest {
	private static final Pattern SPACE = Pattern.compile(" ");

	public static void main(String[] args) {
		System.out.println("xxxxxxxxxxxxxxxxxxxxxxxxxx====================>");
		// SparkStream();
		CountStream();
//		exp();
	}

	static void SparkStream() {

		// Create a local StreamingContext with two working thread and batch
		// interval of 1 second
		SparkConf conf = new SparkConf().setMaster("spark://127.0.0.1:7702").setAppName("NetworkWordCount");
		JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(1));
		JavaReceiverInputDStream<String> lines = jssc.socketTextStream("localhost", 9999);
		JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(x.split(" ")).iterator());

		// Count each word in each batch
		JavaPairDStream<String, Integer> pairs = words.mapToPair(s -> new Tuple2<>(s, 1));
		JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey((i1, i2) -> i1 + i2);

		// Print the first ten elements of each RDD generated in this DStream to
		// the console
		System.out.println("lele test begin");
		wordCounts.print();

		try {
			jssc.start(); // Start the computation
			jssc.awaitTermination();
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} // Wait for the computation to terminate
	}

	static void CountStream() {

		// StreamingExamples.setStreamingLogLevels();
		
		SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount");
		JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(5000));


		Map<String, Integer> topicMap = new HashMap<>();

		topicMap.put("test", 2);
		JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, "10.71.200.109:2182",
				"test1", topicMap);

		JavaDStream<String> UserLine = messages.map(new Function<Tuple2<String, String>, String>() {
			@Override
			public String call(Tuple2<String, String> tuple2) {
				String jsonstr =  tuple2._2();
				System.out.println("run here1 :" + jsonstr);
				System.out.println("run here2 :" + tuple2._1());
				JSONObject jobj = JSON.parseObject(jsonstr);
//				
//				HashMap<String, String> mp = new HashMap<String, String>();
//				
				String ClickUser = jobj.getString("ClickUser");
//				String value =  jobj.getString("ImageID");
//				mp.put(key, value);
				return ClickUser;
			}
		});
		
		JavaPairDStream<String, Integer> UserCount = UserLine.mapToPair(new PairFunction<String, String, Integer>() {
			@Override
			public Tuple2<String, Integer> call(String s) {
				return new Tuple2<>(s, 1);
			} }).reduceByKey(new Function2<Integer, Integer, Integer>() {
				@Override
				public Integer call(Integer i1, Integer i2) {
					return i1 + i2;
				}
			});
		UserCount.print();
		UserCount.foreachRDD(new VoidFunction2<JavaPairRDD<String, Integer>, Time>(){

			@Override
			public void call(JavaPairRDD<String, Integer> v1, Time v2) throws Exception {				
				v1.foreachPartition(new VoidFunction<Iterator<Tuple2<String, Integer>>>(){

					/**
					 * 
					 */
					private static final long serialVersionUID = 1L;

					@Override
					public void call(Iterator<Tuple2<String, Integer>> t) throws Exception {
						// TODO Auto-generated method stub
						Tuple2<String, Integer> t1; 
						while(t.hasNext()){
							t1 = t.next();
							
						}
						return ;
					}
					
				});
			}
			
		});
		
		
		try {
			jssc.start();
			jssc.awaitTermination();
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
	}

	static void KafkaSpark() {
		// init spark contect

		SparkConf conf = new SparkConf().setMaster("spark://127.0.0.1:7702").setAppName("NetworkWordCount");
		JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(2000));

		// init kafka contect
		// Properties props = new Properties();
		// HashMap<String, String> props = new HashMap<String, String>();

		// props.put("bootstrap.servers", "10.71.200.109:9092");
		// props.put("group.id", "test1");
		// props.put("enable.auto.commit", "true");
		// props.put("from.beginning", "true");
		// props.put("auto.commit.interval.ms", "1000");
		// props.put("key.deserializer",
		// "org.apache.kafka.common.serialization.StringDeserializer");
		// props.put("value.deserializer",
		// "org.apache.kafka.common.serialization.StringDeserializer");
		// KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
		// consumer.subscribe(Arrays.asList("test", "streams"));

		// JavaReceiverInputDStream Jris
		// OffsetRange offset = OffsetRange.create("test", 1, 0L, 100L);
		// JavaPairInputDStream<String, String> words =
		// KafkaUtils.createDirectStream(jssc,
		// java.lang.String.class,
		// java.lang.String.class,
		// org.apache. ckafka.common.serialization.StringSerializer.class,
		// org.apache.kafka.common.serialization.StringSerializer.class,
		// props,
		// offset);
		// StorageLevel SL = new StorageLevel();
		// SL.apply(false, true, true, 1);
		Map<String, Integer> Topic = new HashMap<String, Integer>();
		Topic.put("test", 1);
		// JavaPairReceiverInputDStream<String, String> word =
		// KafkaUtils.createStream(jssc, "10.71.200.109:2182", "test1", Topic,
		// StorageLevel.MEMORY_ONLY());
		////
		// word.print();
		// System.out.println("lele: " + word.toString());
		JavaPairReceiverInputDStream<String, String> word = KafkaUtils.createStream(jssc, "10.71.200.109:2182", "test1",
				Topic, StorageLevel.MEMORY_ONLY());
		word.print();

		try {
			jssc.start();
			jssc.awaitTermination();
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		// while(true){
		// System.out.println("test here 1");
		// word.start();
		// word.print();
		// }
		// Map<String, Object> kafkaParams = new HashMap<>();
		// kafkaParams.put("bootstrap.servers",
		// "localhost:9092,anotherhost:9092");
		// kafkaParams.put("key.deserializer", StringDeserializer.class);
		// kafkaParams.put("value.deserializer", StringDeserializer.class);
		// kafkaParams.put("group.id",
		// "use_a_separate_group_id_for_each_stream");
		// kafkaParams.put("auto.offset.reset", "latest");
		// kafkaParams.put("enable.auto.commit", false);
		//
		// Collection<String> topics = Arrays.asList("test");
		//
		// OffsetRange offset = OffsetRange.create("test", 1, 0L, 100L);
		// JavaPairInputDStream<String, String> stream =
		// KafkaUtils.createDirectStream(jssc,
		// java.lang.String.class,
		// java.lang.String.class,
		// org.apache.kafka.common.serialization.StringSerializer.class,
		// org.apache.kafka.common.serialization.StringSerializer.class,
		// kafkaParams,
		// offset);
		//
		// stream.mapToPair(record -> new Tuple2<>(record.key(),
		// record.value()));
	}

	static void exp() {
		// if (args.length < 4) {
		// System.err.println("Usage: JavaKafkaWordCount <zkQuorum> <group>
		// <topics> <numThreads>");
		// System.exit(1);
		// }

		// StreamingExamples.setStreamingLogLevels();
		SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount");
		// Create the context with 2 seconds batch size
		JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(5000));

		// int numThreads = Integer.parseInt(args[3]);
		Map<String, Integer> topicMap = new HashMap<>();
		// String[] topics = args[2].split(",");
		// for (String topic: topics) {
		// topicMap.put(topic, numThreads);
		// }
		topicMap.put("test", 2);
		JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, "10.71.200.109:2182",
				"test1", topicMap);

		JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() {
			@Override
			public String call(Tuple2<String, String> tuple2) {
				return tuple2._2();
			}
		});

		JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
			@Override
			public Iterator<String> call(String x) {
				return Arrays.asList(SPACE.split(x)).iterator();
			}
		});

		JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() {
			@Override
			public Tuple2<String, Integer> call(String s) {
				return new Tuple2<>(s, 1);
			}
		}).reduceByKey(new Function2<Integer, Integer, Integer>() {
			@Override
			public Integer call(Integer i1, Integer i2) {
				return i1 + i2;
			}
		});
		System.out.println("lele test begin");
		wordCounts.print();

		try {
			jssc.start();
			jssc.awaitTermination();
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
}
