package cn.spark.study.streaming;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import scala.Tuple2;

import java.util.List;

/**
 * 基于滑动窗口热点搜索词实时统计
 *
 * @author zhangj
 * @date 2020/11/19
 */
public class WindowHotWord {
	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("WindowHotWord");

		JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));

		//日志格式
		//leo hello
		//tom world
		JavaReceiverInputDStream<String> searchLogsDStream = jssc.socketTextStream("ymm1", 9999);

		//将搜索日志转换成,只有一个搜索词
		JavaDStream<String> searchWordsDStream = searchLogsDStream.map(new Function<String, String>() {
			@Override
			public String call(String searchLog) throws Exception {
				return searchLog.split(" ")[1];
			}
		});

		//将搜索词映射为(searchWorld,1)的格式
		JavaPairDStream<String, Integer> searchWordPairDStream = searchWordsDStream.mapToPair(new PairFunction<String, String, Integer>() {
			@Override
			public Tuple2<String, Integer> call(String searchWorld) throws Exception {
				return new Tuple2<String, Integer>(searchWorld, 1);
			}
		});

		//针对(searchWord,1)的tuple格式的DStream,执行reduceByKeyAndWindow操作
		//第二个参数,是窗口长度,这里是60s
		//第三个参数,是滑动间隔,这里是10s
		//也就是说每隔10s,将最近60s的数据,作为一个窗口,进行RDD集合,然后统一对RDD进行后续计算
		//10秒钟到了,会将之前60s的RDD,因为一个batch是5s,所以之前60s就有12个RDD,给聚合起来,统一进行reduceByKey操作

		JavaPairDStream<String, Integer> searchWordCountsDStream = searchWordPairDStream.reduceByKeyAndWindow(new Function2<Integer, Integer, Integer>() {
			@Override
			public Integer call(Integer v1, Integer v2) throws Exception {
				return v1 + v2;
			}
		}, Durations.seconds(60), Durations.seconds(10));


		JavaPairDStream<String, Integer> finalDStream = searchWordCountsDStream.transformToPair(new Function<JavaPairRDD<String, Integer>, JavaPairRDD<String, Integer>>() {
			@Override
			public JavaPairRDD<String, Integer> call(JavaPairRDD<String, Integer> searchWordCountsRDD) throws Exception {
				//将搜索词和出现频率进行反转
				JavaPairRDD<Integer, String> countSearchWordsRDD = searchWordCountsRDD.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
					@Override
					public Tuple2<Integer, String> call(Tuple2<String, Integer> tuple) throws Exception {
						return new Tuple2<Integer, String>(tuple._2, tuple._1);
					}
				});

				//然后执行降序排序
				JavaPairRDD<Integer, String> sortedCountSearchWordsRDD = countSearchWordsRDD.sortByKey(false);

				//再次反转,变成(searchWord,count)的格式
				JavaPairRDD<String, Integer> sortedSearchWordCountsRDD = sortedCountSearchWordsRDD.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
					@Override
					public Tuple2<String, Integer> call(Tuple2<Integer, String> tuple) throws Exception {
						return new Tuple2<String, Integer>(tuple._2, tuple._1);
					}
				});

				//然后take取出前三的热点搜索词
				List<Tuple2<String, Integer>> hogSearchWordCounts = sortedSearchWordCountsRDD.take(3);

				for (Tuple2<String, Integer> wordCount : hogSearchWordCounts) {
					System.out.println(wordCount._1 + ": " + wordCount._2);
				}
				return searchWordCountsRDD;
			}
		});

		//这个无关紧要,只是为了触发job的执行,所以必须要有output操作
		finalDStream.print();

		jssc.start();
		jssc.awaitTermination();
		jssc.close();
	}
}
