package cn.lsh.spark;

import org.apache.commons.collections.IteratorUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import scala.Tuple2;

import java.io.Serializable;
import java.util.*;

/**
 * 分组取top N
 * 实现：先转换成JavaPairRDD，在groupByKey分组，最后组内设置定长数组排序，取前N条
 */
public class Scores {

	public static void main(String[] args) {
		SparkConf conf = new SparkConf();
		conf.setMaster("local[4]").setAppName("scores");

		JavaSparkContext jsc = new JavaSparkContext(conf);

		JavaRDD<String> lines = jsc.textFile("file:/bigdata/hadoop-test/input/spark/scores.txt");
		JavaPairRDD<String, Integer> scoreRdd = lines.mapToPair(line -> new Tuple2<>(line.split(" ")[0], Integer.parseInt(line.split(" ")[1])));
		// error(scoreRdd);
		// normal(scoreRdd);
		reduceByKey(lines);

		jsc.stop();

	}

	/**
	 * 先根据key分组，然后每组在Executor里对整个list进行排序，数据量大可能出现内存溢出
	 *
	 * @param scoreRdd
	 */
	public static void error(JavaPairRDD<String, Integer> scoreRdd) {
		JavaPairRDD<String, Iterable<Integer>> stringIterableJavaPairRDD = scoreRdd.groupByKey();
		stringIterableJavaPairRDD.map(o -> {
			//代码再Executor端执行，数据量大的情况下可能出现OOM
			List<Integer> list = IteratorUtils.toList(o._2.iterator());
			list.sort(Comparator.reverseOrder());
			return new Tuple2(o._1(), list.subList(0, 3));
		}).foreach(o -> System.out.println(o));
	}

	/**
	 * 先根据key分组，然后每组在Executor里通过一个定长数组去的前topN
	 *
	 * @param scoreRdd
	 */
	public static void normal(JavaPairRDD<String, Integer> scoreRdd) {
		JavaPairRDD<String, Iterable<Integer>> stringIterableJavaPairRDD = scoreRdd.groupByKey();
		stringIterableJavaPairRDD.map(o -> {
			//设置定长为N的数组
			Integer[] top3 = new Integer[3];
			Iterator<Integer> iterator = o._2().iterator();
			while (iterator.hasNext()) {
				int v = iterator.next();
				for (int i = 0; i < top3.length; i++) {
					if (top3[i] == null) {
						top3[i] = v;
						break;
					} else if (v > top3[i]) {
						for (int j = top3.length - 1; j > i; j--) {
							top3[j] = top3[j - 1];
						}
						top3[i] = v;
						break;
					}
				}
			}
			return new Tuple2(o._1(), Arrays.asList(top3));
		}).foreach(o -> System.out.println(o));
	}

	/**
	 * 1、先通过sortByKey完成对数据的二次排序
	 * 2、然后通过aggregateByKey进行分组聚合
	 *
	 * @param lines
	 */
	public static void reduceByKey(JavaRDD<String> lines) {
		JavaPairRDD<BjScore, String> jpRddRDD = lines.mapToPair(line -> new Tuple2<>(new BjScore(line.split(" ")[0], Integer.parseInt(line.split(" ")[1])), ""));
		JavaPairRDD<String, Integer> stringIntegerRDD = jpRddRDD.sortByKey(false).mapToPair(o -> new Tuple2<>(o._1().getBj(), o._1().getScore()));
		JavaPairRDD<String, ArrayList<Object>> stringArrayListJavaPairRDD = stringIntegerRDD.aggregateByKey(new ArrayList<>(), (list, t2) -> {
			if (list.size() < 3) {
				list.add(t2);
			}
			return list;
		}, (t1, t2) -> t1);
		stringArrayListJavaPairRDD.foreach(o -> System.out.println(o));
	}

	public static class BjScore implements Comparable<BjScore>, Serializable {
		private String bj;

		private int score;

		public BjScore() {

		}

		public BjScore(String bj, int score) {
			this.bj = bj;
			this.score = score;
		}

		public String getBj() {
			return bj;
		}

		public void setBj(String bj) {
			this.bj = bj;
		}

		public int getScore() {
			return score;
		}

		public void setScore(int score) {
			this.score = score;
		}

		@Override
		public String toString() {
			return bj + " " + score;
		}

		@Override
		public int compareTo(BjScore o) {
			int i = this.bj.compareTo(o.getBj());
			if (i == 0) {
				i = Integer.compareUnsigned(this.score, o.getScore());
			}
			return i;
		}
	}
}
