package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import java.text.SimpleDateFormat
import java.util.Date

object Demo3MapPartition {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    val dataRDD: RDD[String] = sc.textFile("data/ant_user_low_carbon.txt")


    val kvRDD: RDD[(String, String, String)] = dataRDD.mapPartitions(iter => {
      iter.map(line => {
        //如果只是简单的拆分数据，使用map和mappartition没有区别
        val split: Array[String] = line.split("\t")
        (split(0), split(1), split(2))
      })
    })

    val resultRDD: RDD[(String, Long, String)] = kvRDD.mapPartitions(iter => {
      /**
       *
       * 可以将一些初始化的代码房子mapPartitions中，减少占用的内存空间
       */
      //将时间字段转换成时间戳
      //在这里创建的对象，是一个分区创建一个
      val format = new SimpleDateFormat("yyyy/MM/dd")

      iter.map {
        case (id: String, sdate: String, p: String) =>
          val dateObj: Date = format.parse(sdate)
          val ts: Long = dateObj.getTime
          (id, ts, p)
      }

    })

    resultRDD.foreach(println)

  }

}
