package com.atguigu.bigdata.test

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author: yqb
 * @Date: 2022/6/4 22:21 
 * @Description: Demon 
 * @Version: 1.0
 * */
object Need02 {
    def main(args: Array[String]): Unit = {

        val need02: SparkConf = new SparkConf().setMaster("local[*]").setAppName("Need02")
        val context = new SparkContext(need02)

        /**
         * 小功能：获取每个数据分区的最大值
         * mapPartition  特点:  分区
         */

        val value: RDD[Int] = context.makeRDD(List(1, 2, 3, 4), 2)

        // 函数名字  IN  OUT
        val value1: RDD[Int] = value.mapPartitions(
            iter => {
                //                iter.filter(_ % 2 == 0) // 符合的留下
                List(iter.max).iterator


            }
        )
        value1.collect().foreach(println)


        context.stop()

    }

}
