package com.shujia.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import java.text.SimpleDateFormat
import java.util.Date

object Demo3MapPartition {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()
    val sparkContext: SparkContext = sparkSession.sparkContext

    val dataRDD: RDD[String] = sparkContext.textFile("spark/data/ant_user_low_carbon.txt")

    val kvRDD: RDD[(String, String, String)] = dataRDD.mapPartitions((itr: Iterator[String]) => {
      itr.map((line: String) => {
        val infos: Array[String] = line.split("\t")
        (infos(0), infos(1), infos(2))
      })
    })

    //    map的逻辑是RDD中的处理每一条数据
//    val resRDD: RDD[(String, Long, String)] = kvRDD.map((kv: (String, String, String)) => {
//      //这句话是是在map中的，所以针对每一个数据都要new一个SimpleDateFormat对象
//      val sdf = new SimpleDateFormat("yyyy/MM/dd")
//      println("----------------创建了一个SimpleDateFormat对象----------------")
//
//      val dateObj: Date = sdf.parse(kv._2)
//      val ts: Long = dateObj.getTime
//      (kv._1, ts, kv._3)
//    })
//
//    resRDD.foreach(println)


    //mapPartitions针对一个分区的数据进行处理
    val resRDD2: RDD[(String, Long, String)] = kvRDD.mapPartitions((itr: Iterator[(String, String, String)]) => {
      /**
       * 将时间字段转成时间戳
       */
      val sdf = new SimpleDateFormat("yyyy/MM/dd")
      println("----------------创建了一个SimpleDateFormat对象----------------")
      itr.map((kv: (String, String, String)) => {
        val dateObj: Date = sdf.parse(kv._2)
        val ts: Long = dateObj.getTime
        (kv._1, ts, kv._3)
      })
    })

    resRDD2.foreach(println)

  }

}
