package spark.core

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  *
  */
object SparkTest {
	def main(args: Array[String]): Unit = {
		val conf: SparkConf = (new SparkConf).setMaster("local").setAppName("TTT")
		val sc = new SparkContext(conf)
		val l1 = List(("012","大毛"),("013","二毛"),("014","三毛"))
		val l2 = List(("012",10),("012",0),("012",30),("013",40),("013",40))
		val rdd1: RDD[(String,String)] = sc.parallelize(l1)
		val rdd2: RDD[(String,Int)] = sc.parallelize(l2)
		
		//join
		val rdd3: RDD[(String, (String, Int))] = rdd1.join(rdd2).sortBy(_._2._2)//ArrayBuffer((012,(大毛,0)), (012,(大毛,10)), (012,(大毛,30)), (013,(二毛,40)), (013,(二毛,40)))
		val arr: Array[(String, (String, Int))] = rdd3.collect()
		println(arr.toBuffer)
		
		//leftOuterJoin
		val rdd4: RDD[(String, (String, Option[Int]))] = rdd1.leftOuterJoin(rdd2)//ArrayBuffer((012,(大毛,Some(10))), (012,(大毛,Some(0))), (012,(大毛,Some(30))), (013,(二毛,Some(40))), (013,(二毛,Some(40))), (014,(三毛,None)))
		val rdd5: Array[(String, (String, Option[Int]))] = rdd4.collect()
		println(rdd5.toBuffer)
		
		//reduceByKey 合并具有相同键的值
		val rdd6: RDD[(String, Int)] = rdd2.reduceByKey(_+_) // List((012,40), (013,80))
		println(rdd6.collect.toList) // List((012,40), (013,80))
		
		
		// aggregateByKey 有初始值得reduce
		val rdd8: RDD[(String, Int)] = rdd2.aggregateByKey(0)(_+_,_+_)
		println(rdd8.collect().toList) // List((012,40), (013,80))
		
		// groupByKey 对具有相同键的值进行分组,每个key对应一个元祖
		val rdd7: RDD[(String, Iterable[Int])] = rdd2.groupByKey()
		println(rdd7.collect().toList) //List((012,CompactBuffer(10, 0, 30)), (013,CompactBuffer(40, 40)))
		
		// 返回一个 key 对应的两个 rdd 的迭代器
		val rdd14: RDD[(String, (Iterable[String], Iterable[Int]))] = rdd1.cogroup(rdd2) // List((012,(CompactBuffer(大毛),CompactBuffer(10, 0, 30))), (013,(CompactBuffer(二毛),CompactBuffer(40, 40))), (014,(CompactBuffer(三毛),CompactBuffer())))
		println(rdd14.collect().toList)
		
		
		// mapValues key不变，只操作value
		val rdd9: RDD[(String, String)] = rdd2.mapValues(_+"被加了")
		println(rdd9.collect().toList)
		
		// 每个 value 返回一个迭代器,然后把迭代器转换成多个元祖
		val rdd11: RDD[(String, Int)] = rdd2.flatMapValues(x => x to x+5)
		println(rdd11.collect().toList)
		
		//只返回 key 的rdd
		val rdd12: RDD[String] = rdd1.keys
		println(rdd12.collect().toList)
		//值返回value的rdd
		val rdd13: RDD[String] = rdd1.values
		println(rdd13.collect().toList)
		
//		combineByKey[C](
//			createCombiner: V => C,// 初始化操作,每个分区第一个进入的 value
//			mergeValue: (C, V) => C,// merge 操作
//			mergeCombiners: (C, C) => C): RDD[(K, C)] //

//		val rdd15: RDD[(String, (Int, Int))] = rdd2.combineByKey(
//			(v) => (v, 1),
//			(acc: (Int, Int), v) => (acc._1 + v, acc._2 + 1),
//			(acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2)
//		)
		val rdd15: RDD[(String, (Int, Int))] = rdd2.combineByKey(
			(v) => {println(v);(v, 1)},
			(acc: (Int, Int), v) => {println("acc:"+acc);println("v:"+v);(acc._1 + v, acc._2 + 1)},
			(acc1: (Int, Int), acc2: (Int, Int)) => {println("acc1:"+acc1);println("acc2:"+acc2);(acc1._1 + acc2._1, acc1._2 + acc2._2)}
		)
		
		val rdd16: List[(String, (Int, Int))] = rdd15.collect().toList
		println(rdd16)
	}
}

/**
  * 共享变量（广播变量）
  */
object ScalaTest1 {
	def main(args: Array[String]): Unit = {
		val conf: SparkConf = (new SparkConf).setMaster("local").setAppName("TTT")
		val sc = new SparkContext(conf)
		
		val rdd2 = sc.parallelize(List("大毛","二毛","三毛","四毛","五毛","六毛","七毛"),3)
		val rdd3 = sc.parallelize(List(("大毛","二毛"),("三毛","四毛"),("五毛","六毛"),("七毛")))
		//foreachPartition
		val rdd1: RDD[Int] = sc.parallelize(List(1, 2, 3, 4, 5, 6, 7, 8, 9), 3)
		
		// 每个分区的到结果 是Action
		rdd1.foreachPartition(x => x.reduce(_ + _))
		
		val arr = Array(1,2,4,5,6)
		val value: Broadcast[Array[Int]] = sc.broadcast(arr)
		val unit: RDD[(String, String)] = rdd2.keyBy(x=> x)
		println(unit)
		
	}
}