package com.offcn.spark.p2

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Auther: BigData-LGW
 * @ClassName: Transformation
 * @Date: 2020/12/6 16:25
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object TransformationApp {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setAppName("TransformationApp")
            .setMaster("local[*]")
        val sc = new SparkContext(conf)
//        mapPartition(sc)
//        sample(sc)
//        union(sc)
//        join(sc)
        coalesce(sc)
        sc.stop()
    }
    def mapPartition(sc: SparkContext)={
        val array = 1 to 10
        val listRDD:RDD[Int] = sc.parallelize(array)
        var retRDD:RDD[Int] = listRDD.map(num => num *2)
        retRDD = listRDD.mapPartitions((partition:Iterator[Int]) => {
            val list = partition.toList
            println(s"分区中的数据为：${list.mkString("[", ", ", "]")}")
            list.map(_ * 2).toIterator
        })
        retRDD.foreach(println)
        println("-----------------------mapPartitionsWithIndex----------------------------------")
        listRDD.mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
            list.map(_ / 2).toIterator
        }).count()
    }
    def sample(sc:SparkContext)={
        val list = sc.parallelize(1 to 100000)
        val sampled1 = list.sample(true,0.01)
        println("sampled1 count:"+sampled1.count())
        val sampled2 = list.sample(false,0.01)
        println("sampled2 count:"+sampled2.count())
    }
    def union(sc:SparkContext)={
        val listRDD1 = sc.parallelize(1 to 5,2)
            .mapPartitionsWithIndex((index,partition) => {
                val list = partition.toList
                println(s"-->listRDD1的分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
                list.toIterator
            })
        val listRDD2 = sc.parallelize(6 to 10,2)
            .mapPartitionsWithIndex((index,partition) => {
                val list = partition.toList
                println(s"-->listRDD2的分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
                list.toIterator
            })
        val unionRDD = listRDD1.union(listRDD2)
        unionRDD.mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"unionRDD的分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
            list.toIterator
        }).count()
    }
    def join(sc : SparkContext)={
        case class Student(id:Int,name:String,province:String)
        case class Score(sid:Int,course:String,score:Double)
        val stuRDD = sc.parallelize(List(
            Student(1, "唐玉峰", "安徽·合肥"),
            Student(2, "李梦", "山东·济宁"),
            Student(3, "胡国权", "甘肃·白银"),
            Student(4, "陈延年", "甘肃·张掖"),
            Student(5, "马惠", "辽宁·葫芦岛"),
            Student(10086, "刘炳文", "吉林·通化")
        ))
        val scoreRDD = sc.parallelize(List(
            Score(1, "chinese", 95.5),
            Score(2, "english", 55.5),
            Score(3, "math", 20.5),
            Score(4, "pe", 32.5),
            Score(5, "physical", 59),
            Score(10000, "Chemistry", 99.5)
        ))
        val id2Stu:RDD[(Int,Student)] = stuRDD.map(stu => (stu.id,stu))
        val id2Score:RDD[(Int,Score)] = scoreRDD.map(score => (score.sid,score))
        println("==============inner join================")
        val joinedRDD:RDD[(Int,(Student,Score))] = id2Stu.join(id2Score)
        joinedRDD.foreach{
            case (id,(stu,score)) => {
                println(s"id为${id}的学生信息为:${stu}，其考试成绩信息为：${score}")
            }}
        println("==============left outer join================")
        val leftJoin:RDD[(Int,(Student,Option[Score]))] = id2Stu.leftOuterJoin(id2Score)
        leftJoin.foreach{
            case (id,(stu,scoreOption))=> {
                println(s"id为${id}的学生信息为:${stu}，其考试成绩信息为：${scoreOption.getOrElse("UnKnow")}")
            }
        }
        println("==============right outer join================")
        val rightJoin:RDD[(Int,(Option[Student],Score))] = id2Stu.rightOuterJoin(id2Score)
        rightJoin.foreach{case (id,(stuOption,score)) => {
            println(s"id为${id}的学生信息为:${stuOption.getOrElse("UnKnow")}，其考试成绩信息为：${score}")
        }}
        println("==============full outer join================")
        val fulloin :RDD[(Int,(Option[Student],Option[Score]))] = id2Stu.fullOuterJoin(id2Score)
        fulloin.foreach{
            case (id,(stuOption,scoreOption)) => {
                println(s"id为${id}的学生信息为:${stuOption.getOrElse("UnKnow")}，其考试成绩信息为：${scoreOption.getOrElse("UnKnow")}")
            }
        }
    }
    def coalesce(sc :SparkContext)={
        val lsitRDD = sc.parallelize(1 to 20,4)
            .mapPartitionsWithIndex((index,partition) => {
                val list = partition.toList
                println(s"-->listRDD的分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
                list.toIterator
            })
        val coalesceRDD = lsitRDD.coalesce(2).mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"coalesceRDD的分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
            list.toIterator
        })

        val rdd = coalesceRDD.repartition(4).mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"增大分区之后rdd的分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
            list.toIterator
        })
        rdd.count
    }
}
