package com.shujia.core.transformations

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object PartitionByOpt {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("union合并")

    val sc = new SparkContext(conf)

    val studentTupleRDD: RDD[(String, String, Int, String, String)] = sc.textFile("spark/data/students.txt")
      .map(_.split(",")) // [xxx,xxx,xx,xx,xxx]
      .map {
        case Array(id: String, name: String, age: String, gender: String, clazz: String) => (id, name, age.toInt, gender, clazz)
      }.repartition(2)

    println(s"studentTupleRDD的分区为：${studentTupleRDD.getNumPartitions}")


    /**
     * map的处理逻辑是针对每条数据进行单独处理，处理完的结果立刻流向下一个算子进行处理
     */
    //    val resRDD: RDD[(String, String)] = studentTupleRDD.map {
    //      case (id: String, name: String, age: Int, gender: String, clazz: String) => {
    //        println("================== 与数据库建立连接 ========================") // 这里如果是连接并操作数据库的话，是不是就意味着每条数据都需要与数据库建立连接，浪费资源
    //        (name, "数加: " + clazz)
    //      }
    //    }

    /**
     * mapPartition
     * 针对每一个分区的数据进行处理
     */
    val resRDD: RDD[(String, String)] = studentTupleRDD.mapPartitions((element: Iterator[(String, String, Int, String, String)]) => {
      println("================== 与数据库建立连接 ========================")
      element.map {
        case (id: String, name: String, age: Int, gender: String, clazz: String) => {
          (name, "数加: " + clazz)
        }
      }
    }
    )

    resRDD.foreach(println)
  }

}
