package wordWount

import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}
import org.junit.{After, Before, Test}

import scala.collection.mutable.ArrayBuffer

/**
 * 算子练习
 */
class WordCount {
	val conf: SparkConf = new SparkConf()
	var sc: SparkContext = null
	var outpath: String = "out"
	import util.MyPredef._

	@Before
	def init() {

		conf.setAppName("WordCount")
		conf.setMaster("local")
		sc = new SparkContext(conf)

		outpath.delete
	}

	@After
	def after() {
		sc.stop()
	}


	/**
	 * reduceByKey : 分区内和分区间是同一个函数
	 */
	@Test
	def wc1(): Unit = {
		val text = sc.textFile("in")
		println("分区数量: ", text.getNumPartitions)
		val split = text.flatMap(_.split(" "))

		val res: RDD[(String, Int)] = split.map((_, 1)).reduceByKey(_ + _)
		res.foreach(println)
	}

	/**
	 * combineByKey : 子定义分区内、分区间
	 */
	@Test
	def wc2(): Unit = {
		val text = sc.textFile("in")
		val split = text.flatMap(_.split(" "))


		val value: RDD[(String, Int)] = split.map((_, 1))

		// 初始化，分区内，分区间
		val res: RDD[(String, Int)] = value.combineByKey(x => x, (a: Int, b: Int) => a + b, (m: Int, n: Int) => m + n)

		res.foreach(println)
	}

	/**
	 * groupByKey : 没有combine，性能不好
	 */
	@Test
	def wc3(): Unit = {
		val text = sc.textFile("in")
		val split = text.flatMap(_.split(" "))
		val value: RDD[(String, Int)] = split.map((_, 1))

		val group: RDD[(String, Iterable[Int])] = value.groupByKey()
		val unit: RDD[(String, Int)] = group.mapValues(_.toList.size)
		unit.foreach(println)
	}

	/**
	 * aggregateByKey : 没有combine，性能不好
	 */
	@Test
	def wc4(): Unit = {
		val text = sc.textFile("in")
		val value: RDD[(String, Int)] = text.flatMap(_.split(" ")).map((_, 1))


		val res1: RDD[(String, Int)] = value.aggregateByKey(0)((a: Int, b: Int) => a + b, (m: Int, n: Int) => m + n)
		val res2: RDD[(String, Int)] = value.aggregateByKey(6)((a: Int, b: Int) => a + b, (m: Int, n: Int) => m + n)

		res1.foreachPartition(x => println(x.toArray.mkString))
		println("------------")
		res2.foreachPartition(x => println(x.toArray.mkString))

	}

	/**
	 * FoldByKey : 给定初始值的reduceByKey, 每个分区多一个初始值
	 * (v: V) => cleanedFunc(createZero(), v), 初始值
	 * cleanedFunc, 分区内
	 * cleanedFunc, 分区内
	 * partitioner，分区间
	 * 分区内和分区间相同的函数聚合
	 */
	@Test
	def wc5(): Unit = {
		val text = sc.textFile("in")
		val value: RDD[(String, Int)] = text.flatMap(_.split(" ")).map((_, 1))

		val res1 = value.foldByKey(0)(_ + _)
		val res2 = value.repartition(3).foldByKey(2)(_ + _)

		res1.foreachPartition(x => println(x.toArray.mkString))
		println("------------")
		res2.foreachPartition(x => println(x.toArray.mkString))
	}

	@Test
	def mapTest(): Unit = {
		val ListRDD: RDD[Int] = sc.parallelize(1 to 10)

		// 每条数据发送到Executor一次，网络开销大,内存消耗小
		// 调用数据条数次
		val mapRDD: RDD[Int] = ListRDD.map(_ * 2)

		// 每个partition的数据发送到Executor一次，网络开销小，内存消耗大
		// 调用分区数次
		val mapPartitionsRDD: RDD[Int] = ListRDD.mapPartitions(datas => {
			datas.map(_ * 2)
		})

		mapRDD.foreach(println)
		mapRDD.foreachPartition(_.foreach(print))
		mapPartitionsRDD.foreach(println)
	}

	/**
	 * 1、textFile("")：hadoopFile文件的切分规则分区,可以设置最小分区数
	 * makeRDD(seq)、parallelize(seq): 指定分区数，
	 * RangePartitioner
	 * (0 until numSlices).iterator.map { i =>
	 * val start = ((i * length) / numSlices).toInt
	 * val end = (((i + 1) * length) / numSlices).toInt
	 * (start, end)
	 */
	@Test
	def createRDD() = {
		//		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		//		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local"))


		// 1)从集合创建RDD，makeRDD调用的是parallelize，设置分区数
		val makeRDD: RDD[Int] = sc.makeRDD(Array(1, 2, 3, 4, 5), 2)
		println(s"makeRDD:${makeRDD.getNumPartitions}")

		val parallelizeRDD: RDD[Int] = sc.parallelize(Array(1, 2, 3, 4))
		println(s"parallelizeRDD:${parallelizeRDD.getNumPartitions}")

		// 2)从本地文件系统、分布式文件系统创建RDD，
		// 设置最小分区数，具体hadoop读取文件的分片规则，
		val fileRDD: RDD[String] = sc.textFile("testData/input/word/word.txt", 2)

	}

	@Test
	def mapPartitionsWithIndexTest(): Unit = {
		// val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[Int] = sc.parallelize(1 to 10, 2)

		// 多个参数用模式匹配
		val indexRDD: RDD[String] = listRDD.mapPartitionsWithIndex {
			case (nums, datas) => {
				datas.map((data: Int) => s"分区号:${nums}数据：${data}")
			}
		}
		indexRDD.foreach(println)


		val res: RDD[(String, Int)] = sc.textFile(path = "in").flatMap(_.split(" ")).map((_, 1)).mapPartitions {
			case datas => {
				datas.map(x => (x._1, x._2 * 2))
			}
		}
		res.foreach(println)
	}

	def testGlom(): Unit = {
		"""
		  |glom：把每个分区的数据放到一个 Array中
		""".stripMargin
		// val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[Int] = sc.parallelize(1 to 10, 3)

		val glomRDD: RDD[Array[Int]] = listRDD.glom()

		glomRDD.collect().foreach(arr => println(arr.mkString(",")))

	}

	/**
	 * 数据去重，可以指定分区数
	 */
	@Test
	def testDistinct(): Unit = {

		//		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[Int] = sc.parallelize(Array(1, 2, 1, 2, 3, 4, 5), 4)

		// val distinctRDD: RDD[Int] = listRDD.distinct()
		val res: RDD[String] = sc.textFile("in").flatMap(_.split(" ")).distinct(2)
		//		res.foreach(println)
		res.saveAsObjectFile(outpath)
	}

	/**
	 * 缩减分区数
	 * 默认不shuffle，只是简单的合并
	 */
	@Test
	def testCoalesce(): Unit = {

		//		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[Int] = sc.parallelize(1 to 10, 4)

		val coalesce: RDD[Int] = listRDD.coalesce(3)

		//		coalesce.collect().foreach(println)

		val glomRDD1: RDD[Array[Int]] = listRDD.glom()
		glomRDD1.foreach(arr => println(arr.mkString(",")))

		val glomRDD2: RDD[Array[Int]] = coalesce.glom()
		glomRDD2.foreach(arr => println(arr.mkString(",")))

	}

	@Test
	def testCoalesce2(): Unit = {

		val rdd1: RDD[String] = sc.textFile("in", 4).flatMap(_.split(" "))
		val rdd2: RDD[String] = rdd1.coalesce(2)

		rdd1.foreachPartition(x => {
			println(x.toList)
		})
		println("=-=-=-=-=-=-=-=-=-=-=-=-")
		rdd2.foreachPartition(x => {
			println(x.toList)
		})

	}

	/**
	 * coalesce(numPartitions, shuffle = true)
	 * 调用的是HashPartitioner,key是0到numPartitions的随机数
	 */
	@Test
	def testRepartition(): Unit = {
		val rdd1: RDD[String] = sc.textFile("in", 6).flatMap(_.split(" "))
		val rdd2: RDD[String] = rdd1.repartition(3)

		rdd1.foreachPartition(x => {
			println(x.toList)
		})
		println("=-=-=-=-=-=-=-=-=-=-=-=-")
		rdd2.foreachPartition(x => {
			println(x.toList)
		})
	}

	/**
	 * 每个分区排序;分区之间也是有序的
	 */
	@Test
	def testSortBy(): Unit = {

		// val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val rdd1: RDD[Int] = sc.parallelize(Array(1, 10, 8, 9, 4, 6), 2)
		val rdd2: RDD[Int] = rdd1.sortBy(x => x, numPartitions = 3)


		rdd1.foreachPartition(x => {
			println(x.toList)
		})
		println("=-=-=-=-=-=-=-=-=-=-=-=-")
		rdd2.foreachPartition(x => {
			println(x.toList)
		})


		val rdd3: RDD[String] = sc.textFile("in", 4).flatMap(_.split(" "))
		val rdd4: RDD[String] = rdd3.sortBy(x => x, numPartitions = 3)

		rdd3.foreachPartition(x => {
			println(x.toList)
		})
		println("=-=-=-=-=-=-=-=-=-=-=-=-")
		rdd4.foreachPartition(x => {
			println(x.toList)
		})

	}

	@Test
	def testSortByKey(): Unit = {
		"""
		  |按照Key排序
		  |正序、倒序
		""".stripMargin

		//		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[(String, Int)] = sc.parallelize(Array(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8), ("a", 5)), 2)
		listRDD.sortByKey(false).collect().foreach(println)

	}

	/**
	 * 并集,简单的合并，分区数是原来的和
	 */
	@Test
	def testUnion(): Unit = {

		// val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val rdd1: RDD[String] = sc.textFile("in", 4).flatMap(_.split(" "))
		val rdd2: RDD[String] = rdd1.sortBy(x => x, numPartitions = 3)

		val res = rdd1.union(rdd2)

		res.foreachPartition(x => {
			println(x.toArray.mkString(", "))
		})
		println(res.getNumPartitions)

	}

	/**
	 * join 是内连接,任何一个RDD没有key都会被忽略
	 * cogroup 是全连接，只要有一个RDD有的key就不会忽略;相同的key做合并
	 */
	@Test
	def testJoin(): Unit = {
		//		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD1: RDD[(String, Int)] = sc.parallelize(Array(("a", 3), ("a", 2), ("c", 4)), 2)
		val listRDD2: RDD[(String, Int)] = sc.parallelize(Array(("a", 30), ("a", 20), ("b", 40)), 2)

		val tmp1: RDD[(String, (Int, Int))] = listRDD1.join(listRDD2)
		val res1: RDD[(String, Int, Int)] = tmp1.map {
			case (k1, (v1, v2)) => (k1, v1, v2)
		}

		val tmp2: RDD[(String, (Iterable[Int], Iterable[Int]))] = listRDD1.cogroup(listRDD2)
		val res2: RDD[(String, Int, Int)] = tmp2.flatMap {
			case (key, (values1, values2)) => {
				var res: ArrayBuffer[(String, Int, Int)] = new ArrayBuffer[(String, Int, Int)]()

				val it1 = values1.toIterator
				val it2 = values2.toIterator
				while (it1.hasNext || it2.hasNext) {
					if (it1.hasNext && it2.hasNext) res += ((key, it1.next(), it2.next()))
					if (it1.hasNext && it2.isEmpty) res += ((key, it1.next(), 0))
					if (it1.isEmpty && it2.hasNext) res += ((key, 0, it2.next()))
				}
				res
			}
		}

		val tmp3: RDD[(String, (Int, Option[Int]))] = listRDD1.leftOuterJoin(listRDD2)
		val res3: RDD[(String, Int, Int)] = tmp3.map {
			case (k1, (v1, Some(x))) => (k1, v1, x)
			case (k1, (v1, None)) => (k1, v1, 0)
		}

		val tmp4: RDD[(String, (Option[Int], Int))] = listRDD1.rightOuterJoin(listRDD2)
		val tmp5: RDD[(String, (Option[Int], Option[Int]))] = listRDD1.fullOuterJoin(listRDD2)
		println("=-=-=-=-=-= join -=-=-=-=-=-=-")

		res1.foreachPartition(x => {
			println(x.toList.mkString(", "))
		})

		println("=-=-=-=-=-= cogroup -=-=-=-=-=-=-")
		res2.foreachPartition(x => {
			println(x.toList.mkString(", "))
		})

		println("=-=-=-=-=-= left join -=-=-=-=-=-=-")
		res3.foreachPartition(x => {
			println(x.toList.mkString(", "))
		})
	}

	/**
	 * left join : (Int, Option[Int])
	 * 分区数量是较大的，如果没有设置并行度，还是 HashPartitioner
	 */
	@Test
	def testLeftJoin(): Unit = {
		//		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD1: RDD[(String, Int)] = sc.parallelize(Array(("a", 3), ("a", 2), ("c", 4)), 3)
		val listRDD2: RDD[(String, Int)] = sc.parallelize(Array(("a", 30), ("a", 20), ("b", 40)), 2)


		val tmp3: RDD[(String, (Int, Option[Int]))] = listRDD1.leftOuterJoin(listRDD2)

		println("=-=-=-=-=-= left join -=-=-=-=-=-=-")
		listRDD1.foreachPartition(x => {
			println(x.toList.mkString(", "))
		})

		println("=-=-=-=-=-= left join -=-=-=-=-=-=-")
		listRDD2.foreachPartition(x => {
			println(x.toList.mkString(", "))
		})

		println("=-=-=-=-=-= left join -=-=-=-=-=-=-")
		tmp3.foreachPartition(x => {
			println(x.toList.mkString(", "))
		})
		println("stop")
	}

	/**
	 * Action 算子：触发 sc.runJob
	 * reduce、aggregate、
	 */
	@Test
	def testAction(): Unit = {
		val test: RDD[(String, Int)] = sc.textFile("in", 4).flatMap(_.split(" ")).map((_, 1))
		val tuple: (String, Int) = test.reduce {
			case ((k1, v1), (k2, v2)) => (k1 + " " + k2, v1 + v2)
		}
		println(tuple)

		val test1: RDD[String] = sc.textFile("in", 2).flatMap(_.split(" "))
		val str: String = test1.aggregate("")((x: String, y: String) => x + y, (x: String, y: String) => x + "\t" + y)
		println(str)

	}
}

object TestOperate {

	def testGroupBy(): Unit = {
		"""
		   groupBy： 按照函数的返回值分组
		   分区数不变
		""".stripMargin

		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[Int] = sc.parallelize(1 to 10, 4)
		val groupRDD: RDD[(Int, Iterable[Int])] = listRDD.groupBy(_ % 5)
		groupRDD.collect().foreach(println)
		println(s"makeRDD:${groupRDD.getNumPartitions}")
		val outPath = "testData/output/word"
		import util.MyPredef.delete
		outPath.delete()
		groupRDD.saveAsTextFile(outPath)

	}


	def testPartitionBy(): Unit = {
		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD1: RDD[(Int, String)] = sc.parallelize(Array((1, "a"), (2, "a"), (3, "a"), (4, "a"), (5, "a")), 2)

		listRDD1.repartition(3).glom().collect().foreach(x => println(x.mkString(",")))
		listRDD1.partitionBy(new HashPartitioner(3)).glom().collect().foreach(x => println(x.mkString(",")))
		//		listRDD1.partitionBy(new RangePartitioner[Int](3)).glom().collect().foreach(x => println(x.mkString(",")))


	}

	def testAggregateByKey(): Unit = {
		"""
		  |(zeroValue: U):初始值，第一次只有一个值，没办法两两计算，分区内初始值
		  |(seqOp: (U, V) => U, 分区内相同key的操作
		  |      combOp: (U, U) => U)  分区间相同key间的操作
		  |
		  |      和combineByKey是一样的，只是写法上是柯里化的写法
		  |
		  |要求：取出每个分区相同的key的最大值，然后分区间求和
		""".stripMargin

		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[(String, Int)] = sc.parallelize(Array(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8), ("a", 5)), 2)
		listRDD.glom().collect().foreach(x => println(x.mkString("_")))
		val aggregateRDD: RDD[(String, Int)] = listRDD.aggregateByKey(100)(math.max(_: Int, _: Int), _ + _)
		aggregateRDD.glom().collect().foreach(x => println(x.mkString("_")))

		// reduceByKey 和是一样的
		val reduceByKey: RDD[(String, Int)] = listRDD.aggregateByKey(0)(_ + _, _ + _)


	}

	def testFoldByKey(): Unit = {
		"""
		  |给定初始值的reduceByKey
		  |(v: V) => cleanedFunc(createZero(), v), 初始值
		  |      cleanedFunc, 分区内
		  |      cleanedFunc, 分区内
		  |      partitioner，分区间
		  |
		  |分区内和分区间相同的函数聚合
		""".stripMargin
		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[(String, Int)] = sc.parallelize(Array(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8), ("a", 5)), 2)

		listRDD.foldByKey(0)(_ + _).collect().foreach(println)
	}

	def combineByKey(): Unit = {
		"""
		  |有初始值，初始结构可以改变
		  |createCombiner: V => C,  创建分区规则
		  |mergeValue: (C, V) => C,   分区内
		  |mergeCombiners: (C, C) => C,   分区间
		  |
		  |需求：相同的key求均值
		""".stripMargin
		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[(String, Int)] = sc.parallelize(Array(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8), ("a", 5)), 2)

		val combineRDD: RDD[(String, (Int, Int))] = listRDD.combineByKey(
			(_, 1),
			(c: (Int, Int), v) => (c._1 + v, c._2 + 1),
			(m1: (Int, Int), m2: (Int, Int)) => (m1._1 + m2._1, m1._2 + m2._2)
		)
		combineRDD.map {
			case (k, v) => (k, v._1.toDouble / v._2.toDouble)
		}.collect().foreach(println)


		val combineRDD2: RDD[(String, Int)] = listRDD.combineByKey(x => x, (a1: Int, a2: Int) => a1 + a2, (a1: Int, a2: Int) => a1 + a2)

		combineRDD2.collect().foreach(println)

	}

	def testReduceByKey(): Unit = {
		"""
		  |func: (V, V) => V 输出输出类型是一样的；调用的是默认的分区器HashPartitioner
		  |		(v: V) => v,
		  |   	func,
		  |     func,
		  |     partitioner
		""".stripMargin
		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[(String, Int)] = sc.parallelize(Array(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8), ("a", 5)), 2)

		listRDD.glom().collect().foreach(x => println(x.mkString("_")))
		val reduceRDD: RDD[(String, Int)] = listRDD.reduceByKey(_ + _)

		reduceRDD.glom().collect().foreach(x => println(x.mkString("_")))
	}


	def testAction(): Unit = {
		"""
		  |takeOrdered():排序后的前几个 take
		  |aggregate():分区内分区间都有初始值，aggregateByKey跟区间没有初始值
		  |saveAsTextFile("")
		  |saveAsSequenceFile("")
		  |saveAsObjectFile("")
		  |
		  |countByKey():统计每个key出现的次数
		  |foreach(func):对每个Executor上的数据遍历
		  |foreachPartition():以partition为单位执行，内存消耗大，可能导致OOM，和mapPartition是一样的
		  |
		  |每个行动算子触发一次 sc.runJob()；TaskScheduler submit job
		""".stripMargin
		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[(String, Int)] = sc.parallelize(Array(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8), ("a", 5)), 2)
		//		listRDD.foreachPartition()

	}

	def main(args: Array[String]): Unit = {

		// createTest()
		// testMap()
		// testMapPartitionsWithIndex()
		// testGlom()
		// testGroupBy()
		// testDistinct()
		// testCoalesce()
		// testRepartition()
		// testSortBy()
		// testUnion()

		// 键值对PairRDD Operate
		// testPartitionBy()
		// testAggregateByKey()
		// testFoldByKey()
		// combineByKey()
		// testReduceByKey()
		// testSortByKey()
		// testJoin()
		//行动算子
		testAction()

	}
}

object CheckpointTest {
	def main(args: Array[String]): Unit = {
		val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local[*]"))
		val listRDD: RDD[(String, Int)] = sc.parallelize(Array(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8), ("a", 5)), 2)
		val value1: RDD[(String, Int)] = listRDD.map(x => {
			println(x);
			x
		})
		sc.setCheckpointDir("cp")
		value1.cache()
		value1.checkpoint()
		val value: RDD[(String, Int)] = value1.reduceByKey(_ + _)
		value.collect()
		println(value.toDebugString) // 行动算子执行才会触发checkpoint
	}
}

