package com.offcn.bigdata.spark.p1.p2

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 学习Spark RDD的transformation操作
 * flatMap
 * filter
 *
 * ------------------------------------
 *
 * mapPartition & mapPartitionWithIndex
 * sample
 * union
 * distinct
 * groupByKey & groupBy
 * join
 * reduceByKey & foldByKey
 * repartiton & coalesce
 * combineByKey & aggregateByKey
 *
 * sortByKey
 */
object _01SparkTransformationOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[*]")
            .setAppName(s"${_01SparkTransformationOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)

//        repartitionOps(sc)
        rbkAndfbkOps(sc)
        sc.stop()
    }

    /**
     * 重分区:
     *      原先有N个分区，经过重分区算子之后，变成了M个分区，那么可能大于N，也可能小于N。
     *      coalesce(partition , shuffle = false)   : 通常用作分区减小的操作，江源县的分区减少到指定个数的分区
     *                                                通常分区前后的分区个数存在整数倍关系。是一个窄依赖操作。前后的分区对应关系是1:N
     *      repartition  :  coalesce(partition, shuffle = true),是一个宽依赖操作
     */
def repartitionOps(sc: SparkContext): Unit ={
    val rdd = sc.parallelize(1 to 100000 , 10)
    println("---------------------------过滤前---------------------------")
    rdd.mapPartitionsWithIndex((index , it) => {
        val list = it.toList
        println(s"rdd{${index}分区中的数据个数：${list.size}")
        list.map(num => num).toIterator
    }).count()
    println("---------------------------过滤后---------------------------")
    val filtered = rdd.filter(num => num % 3 != 0)
    filtered.mapPartitionsWithIndex((index, it) => {
        val list = it.toList
        println(s"过滤之后filtered{${index}分区中的数据个数：${list.size}")
        list.map(num => num).toIterator
    }).count()
    println("-------------------------分区合并(减小)---------------------------")
    val coalesceRDD = filtered.coalesce(5)
    coalesceRDD.mapPartitionsWithIndex((index , it) => {
        val list = it.toList
        println(s"减少分区之后rdd分区{${index}中的数据个数：${list.size}")
        list.map(num => num).toIterator
    }).count()
    println("-------------------------分区合并(增大)---------------------------")
    val repartitioned = filtered
        .repartition(15)
        //coalesce(15 , shuffle = true)
    repartitioned.mapPartitionsWithIndex((index , it) => {
        val list = it.toList
        println(s"增大分区之后rdd分区{${index}中的数据个数：${list.size}")
        list.map(num => num).toIterator
    }).count()
}

    /**
     * reduceByKey和flodByKey
     * reduceByKey((A1 , A2) => A3)
     * flodByKey(zero)((A1 , A2) => A3)
     * 关系相当于scala集合中的reduce和fold的关系
     */
    def rbkAndfbkOps(sc:SparkContext): Unit ={
        val lines = sc.parallelize(List(
            "hello moto",
            "hello nokia",
            "hello xiaomi",
            "hello huawei",
            "shit sangsumg"
        ))
        val pairs:RDD[(String , Int)] = lines.flatMap(_.split("\\s+")).map((_ , 1))

        pairs.reduceByKey((v1 , v2) => v1 + v2).foreach(println)
        println("------------------------------------------------------------")
        pairs.foldByKey(0)(_+_).foreach(println)
    }
    /**
     *join(otherRDD):和otherRDD进行join操作,类似于java中的inner join
     *      要求该rdd和other都是k-v,也就是说通过k作为外键进行管理
     *关键操作:
     *      A表(id)和B(aid)为两张表的关联字段
     *       交叉连接
     *          xxxx A a[across] join B b
     *          没有关联条件,也就意味着有笛卡尔积,不用
     *       内连接
     *          xxx A a, B b where a.id = b.aid
     *          xxx A a, B b where a.id = b.aid
     *          返回这两张表中的交集部分
     *       外连接
     *          左外连接:xxx A a left outer join B b on a.id = b.aid
     *              返回左表所有和右表关联上的结果，没有关联的显示null
     *          右外连接:xxx A a right outer join B b on a.id = b.aid
     *              和左外链接相反
     *        全连接
     *           xxx A full outer join B b on a.id = b.aid
     *           左外链接 + 右外连接
     */
def joinOps(sc : SparkContext): Unit ={
//    case class Student(id : Int , name : String , age : Int , gender : Int)
//    case class Score(sid : Int , course : String , score : Double )
//    val stus = sc.parallelize(List(
//        Student(1 , "赵一" , 18 , 0),
//        Student(2 , "钱二" , 19 , 1),
//        Student(3 , "孙三" , 20 , 0),
//        Student(4 , "李四" , 21 , 0),
//        Student(5 , "周五" , 22 , 1)
//    ))
//    val scores = sc.parallelize(List(
//        Score(2 , "语文" , 60),
//        Score(3 , "数学" , 70),
//        Score(1 , "英语" , 80),
//        Score(4 , "体育" , 90),
//        Score(105 , "信息技术" , 100)
//    ))
//
//    //关联学习信息及其考试成绩
//    val id2Stu :RDD[(Int , Student)] = stus.map(stu => (stu.id , stu))
//    val id2Score:RDD[(Int , Score)] = scores.map(score => (score.sid , score))
//    println("-----------------------join结果---------------------")
//    val joinedRDD:RDD[(Int , (Student,Score))] = id2Stu.join(id2Score)
//    joinedRDD.foreach{case (id , (stu , score)) =>{
//        println(s"学生id：${id}, name: ${stu.name}, 性别：${stu.gender}, 科目：${score.course}, 成绩: ${score.score}")
//    }}
//    println("----------------------left join结果------------------")
//    val leftjoinedRDD:RDD[(Int , (Student , Option[Score]))] = id2Stu.leftOuterJoin(id2Score)
//    leftjoinedRDD.foreach{case (id , (StuOption , score)) => {
//
//    }}

}
}
