package com.shujia.youhua

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import java.text.SimpleDateFormat
import java.util.Date

//使用mapPartitions替代普通map Transformation算子
object MapPartitionsDemo {
  def main(args: Array[String]): Unit = {
    val ss: SparkSession = SparkSession.builder()
      .master("local")
      .appName("缓存，避免使用重复的RDD")
      .getOrCreate()
    val sc: SparkContext = ss.sparkContext

    val lineRDD: RDD[String] = sc.textFile("spark/data/shebao.txt")
//    val resRDD1: RDD[(String, String, String)] = lineRDD.map(_.split(","))
//      .map {
//        case Array(id: String, name: String, d1: String) => {
//          println("======================我来了！================================")
//          //2020-11-01 00:00:00
//          //2020年11月01日
//          val sdf1: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
//          val date1: Date = sdf1.parse(d1)
//          val sdf2: SimpleDateFormat = new SimpleDateFormat("yyyy年MM月dd日")
//          val s1: String = sdf2.format(date1)
//          (id, name, s1)
//        }
//      }
//    resRDD1.foreach(println)

    val rdd2: RDD[String] = lineRDD.repartition(2)
    //使用mapPartitions替代普通map Transformation算子
    val resRDD: RDD[(String, String, String)] = rdd2.map(_.split(","))
      .mapPartitions((itr: Iterator[Array[String]]) => {
        println("======================我来了！================================")
        val sdf1: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
        val sdf2: SimpleDateFormat = new SimpleDateFormat("yyyy年MM月dd日")
        //处理分区中的数据
        //一个分区中的数据被封装在迭代器中
        itr.map {
          case Array(id: String, name: String, d1: String) => {
            val date1: Date = sdf1.parse(d1)
            val s1: String = sdf2.format(date1)
            (id, name, s1)
          }
        }
      })

    resRDD.foreach(println)

  }
}
