import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

/**
 * ClassName: TransformationOp <br/>
 * Description: <br/>
 * date: 2020/8/4 11:29<br/>
 *
 * @author Hesion<br/>
 * @version
 * @since JDK 1.8
 */
class TransformationOp {
  var conf = new SparkConf().setMaster("local[*]").setAppName("transformation_op")
  var sc = new SparkContext(conf)

  @Test
  def mapPatitions(): Unit = {
    //数据生成
    //算子使用
    //获取结果
    //mapPartitions()和map的粒度不同，map的粒度是每个条数据
    sc.parallelize(Seq(1, 2, 3, 4, 5, 6), 2).mapPartitions(
      iter => {
        //这个iter是scala的集合类型
        iter.map(_ * 10)
      }
    ).collect()
      .foreach(println)
  }

  /**
   * 带有分区号的
   */
  @Test
  def mapPartitionsWithIndex(): Unit = {
    sc.parallelize(Seq(1, 2, 3, 4, 5, 6), 2)
      .mapPartitionsWithIndex((index, iter) => {
        println("index:" + index)
        iter.foreach(println)
        iter
      }).collect()
  }

  @Test
  def combinerByKey(): Unit = {
    val initialScores = Array(("Fred", 88.0), ("Fred", 95.0), ("Fred", 91.0), ("Wilma", 93.0), ("Wilma", 95.0), ("Wilma", 98.0))
    val d1 = sc.parallelize(initialScores)

    // 定义一个元组类型(科目计数器,分数)
    // type的意思是以后再这个代码中所有的类型为(Int, Double)都可以被记为MVType
    type MVType = (Int, Double)
    d1.combineByKey(
      score => (1, score), // 以分数作为参数,对分数进行标记
      // 注意这里的c1就是createCombiner初始化得到的结果(1, score)，对相同的key进行标记+1，分数累加
      (c1: MVType, newScore) => (c1._1 + 1, c1._2 + newScore),
      // 对不同分区，相同key的(n, score)进行累加
      (c1: MVType, c2: MVType) => (c1._1 + c2._1, c1._2 + c2._2)
    ).map {
      case (name, (num, socre))
      => (name, socre / num)
    }.collect.foreach(println)
  }

  @Test
  def aggregateByKey(): Unit ={
    val rdd: RDD[(String, Int)] = sc.parallelize(Seq(("手机", 80), ("电脑", 80), ("平板", 100)))
    rdd.aggregateByKey(0.8)((zeroValue,item)=>item*zeroValue,(curr,agg)=>curr+agg).collect().foreach(println(_))
  }


}
