package com.niit.spark.rdd.test

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * Date:2025/4/28
 * Author：Ys
 * Description:
 */
object MapPartitionsExercise {
  def main(args: Array[String]): Unit = {
    val sparkConf = new
        SparkConf().setMaster("local[*]").setAppName("MapPartitionsExercise")
    val sc = new SparkContext(sparkConf)
    sc.setLogLevel("ERROR")

    val logRDD = sc.parallelize(Seq("User123 2025-04-27 10:00:00 GET /page1",
      "User456 2025-04-27 10:00:01 POST /page2", "User789 2025-04-27 10:00:02 GET /page3"))

    val resRdd: RDD[(String, String)] = logRDD.mapPartitions(log => { //是以为分区进行处理的
         //log 是一个分区下的很多条数据【数组】   对log去进行操作，也就是对分区下每一条数据进行操作
      /*
        1分区 =log 【"User123 2025-04-27 10:00:00 GET /page1", ==line
                "User456 2025-04-27 10:00:01 POST /page2",   "User789 2025-04-27 10:00:02 GET /page3"】
        2分区 =log【"User123 2025-04-27 10:00:00 GET /page1",   ==line
                      "User456 2025-04-27 10:00:01 POST /page2",   "User789 2025-04-27 10:00:02 GET /page3"】
        3分区 =log【"User123 2025-04-27 10:00:00 GET /page1",  ==line
                      "User456 2025-04-27 10:00:01 POST /page2",   "User789 2025-04-27 10:00:02 GET /page3"】
       */
      log.map(line => { //line 就是分区下的一条数据  是以数组进行处理的
        val parts: Array[String] = line.split(" ")
        (parts(0), parts(1))
      })
    })

    resRdd.collect().foreach(println)

    sc.stop()
  }
}