package com.ahaha.transformation

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object transformationTest {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
          .setMaster("local[*]")
          .setAppName(s"${transformationTest.getClass.getSimpleName}")
        val sc = new SparkContext(conf)


        //mapPartitionsOps(sc)
        //mapPartitionsWithIndexOps(sc)
        //sampleTest(sc)
        //unionTest(sc)
        //distinctTest(sc)
        //joinTest(sc)
        //repartitionTest(sc)
        //groupByKeyTest(sc)
        reduceByKeyTest(sc)





        sc.stop()
    }

    def mapPartitionsOps(sc: SparkContext): Unit = {
        val list = 1 to 10
        val listRDD: RDD[Int] = sc.parallelize(list)
        val res = listRDD.mapPartitions(partition => {
            val list = partition.toList
            println("partitions :" + list.mkString("[", ",", "]"))
            list.map(_ * 1).toIterator
        })

        res.foreach(println)

    }

    def mapPartitionsWithIndexOps(sc: SparkContext): Unit = {
        val list = 1 to 10
        val listRDD: RDD[Int] = sc.parallelize(list)

        val res = listRDD.mapPartitionsWithIndex((index, partition) => {
            val list = partition.toList
            println(s"paririon<${index}>:中的数据为：" + list.mkString("[", ",", "]"))
            list.map(_ * 2).toIterator
        })

        res.foreach(println)
    }

    def sampleTest(sc: SparkContext): Unit = {
        val list = 1 to 100000
        val listRDD: RDD[Int] = sc.parallelize(list)

        var sampled = listRDD.sample(false, 0.01)
        println("无放回的抽样数据集：" + sampled.count())

        sampled = listRDD.sample(true, 0.01)

        println("有放回的抽样数据集：" + sampled.count())
    }

    def unionTest(sc:SparkContext): Unit ={
        var listRDD1 = sc.parallelize(1 to 10, 2)
        var listRDD2 = sc.parallelize(11 to 20, 2)
        listRDD1 = listRDD1.mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"listRDD1中partition<$index>中的数据为：" +list.mkString("[",",","]"))
            list.map(_*1).toIterator
        })


        listRDD2 = listRDD2.mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"listRDD2中partition<$index>中的数据为：" +list.mkString("[",",","]"))
            list.map(_*1).toIterator
        })

        var res = listRDD1.union(listRDD2)

        res = res.mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"res中partition<$index>中的数据为：" +list.mkString("[",",","]"))
            list.map(_*1).toIterator
        })

        res.foreach(println)

    }

    def distinctTest(sc:SparkContext): Unit ={
        var listRDD = sc.parallelize(List(1, 2, 3, 4, 1, 5, 4, 3))

        listRDD = listRDD.mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"listRDD中partition<$index>中的数据为：" +list.mkString("[",",","]"))
            list.map(_*1).toIterator
        })

        listRDD.distinct().mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"去重后listRDD中partition<$index>中的数据为：" +list.mkString("[",",","]"))
            list.map(_*1).toIterator
        }).count()
    }

    def joinTest(sc:SparkContext): Unit ={
        case class Student(id: Int, name: String, age: Int)
        case class Score(sid: Int, course: String, score: Float)

        val stuRDD: RDD[Student] = sc.parallelize(List(
            Student(1, "张三", 15),
            Student(2, "李四", 16),
            Student(3, "王五", 17),
            Student(4, "赵六", 18),
            Student(10086, "田七", 28)
        ))
        val scoreRDD:RDD[Score] = sc.parallelize(List(
            Score(1, "语文", 70.5f),
            Score(2, "数学", 80.5f),
            Score(3, "英语", 30.5f),
            Score(4, "体育", 99f),
            Score(10010, "语文", 70.5f)
        ))

        val id2stu:RDD[(Int,Student)] = stuRDD.map(stu => (stu.id,stu))
        val id2Score:RDD[(Int,Score)] = scoreRDD.map(score => (score.sid, score))
        println("stuRDD和scoreRDD表进行inner join返回交集")
        val info:RDD[(Int,(Student,Score))] = id2stu.join(id2Score)
        info.foreach{case (id,(stu,score)) => println(s"id: ${id}, student: ${stu}, score: ${score}")}

        println("stuRDD和scoreRDD表进行leftouterjoin返回交集")
        val leftjoin:RDD[(Int,(Student,Option[Score]))] = id2stu.leftOuterJoin(id2Score)
        leftjoin.foreach{case (id,(stu,scoreoption)) => println(s"id: ${id}, student: ${stu}, score: ${scoreoption.getOrElse("unknow")}")}

        println("stuRDD和scoreRDD表进行rigthouterjoin返回交集")
        val rigthjoin:RDD[(Int,(Option[Student],Score))] = id2stu.rightOuterJoin(id2Score)
        rigthjoin.foreach{case (id,(stuoption,score)) => println(s"id: ${id}, student: ${stuoption.getOrElse("unknow")}, score: ${score}")}

        println("stuRDD和scoreRDD表进行fullOuterJoin返回交集")
        val fulljoin:RDD[(Int,(Option[Student],Option[Score]))] = id2stu.fullOuterJoin(id2Score)
        fulljoin.foreach{case (id,(stuoption,scoreoption)) => println(s"id: ${id}, student: ${stuoption.getOrElse("unknow")}, score: ${scoreoption.getOrElse("unknow")}")}
    }

    def repartitionTest(sc:SparkContext): Unit ={
        var rdd = sc.parallelize(1 to 10, 4)

        println("起始的分区个数：" + rdd.getNumPartitions)

        rdd = rdd.coalesce(2)
        println("coalesce后分区个数：" + rdd.getNumPartitions)

        rdd = rdd.repartition(100)
        println("repartition后分区个数：" + rdd.getNumPartitions)
    }


    def groupByKeyTest(sc:SparkContext): Unit ={
        case class Student(id: Int, name: String, province: String)
        val stuRDD: RDD[Student] = sc.parallelize(List(
            Student(1, "张三", "山东"),
            Student(2, "李四", "日照"),
            Student(3, "王五", "五莲"),
            Student(4, "赵六", "洪凝"),
            Student(10086, "田七", "龙口")
        ))
        val province2stu = stuRDD.map(stu => (stu.province, stu)).groupByKey()

        province2stu.foreach{ case (str, students) =>{
            println(s"${str}地区的学生有： ${students.toList}")
        }}
    }

    def reduceByKeyTest(sc:SparkContext): Unit ={
        val lines = sc.parallelize(List(
            "hello you",
            "hello me",
            "hello lan lan"
        ))

        val pairs = lines.flatMap(_.split("\\s+")).map((_, 1))
        var res = pairs.reduceByKey(_ + _)
        println("reducebykey后的结果")
        res.foreach(println)
        res = pairs.foldByKey(0)(_+_)
        println("foldbykey后的结果")
        res.foreach(println)
    }

}
