package com.fwmagic.spark.core.cases.threadsafe

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object TaskThreadSafe4 {
    def main(args: Array[String]): Unit = {
        val isLocal = args(0).toBoolean

        val conf: SparkConf = new SparkConf().setAppName(this.getClass.getSimpleName)
        if (isLocal) {
            conf.setMaster("local[*]")
        }

        val sc = new SparkContext(conf)

        val lines: RDD[String] = sc.textFile(args(1))

        /**
          * DateUtilsNoSerClass对象在Executor端创建
          * 1.DateUtilsNoSerClass类不需要实现序列化
          * 2.DateUtilsNoSerClass是Class,不是单例的，每条数据都创建一个新的对象，太浪费资源
          *
          * 优化：可以使用MapPartitionis方法，让每一个分区共用一个对象，分区里面是迭代器，一条一条执行的，没有线程安全问题
          */

        val mapRDD: RDD[(String, Long)] = lines.mapPartitions(iter => {
            val dateUtils = new DateUtilsNoSerClass
            val tuples: Iterator[(String, Long)] = iter.map(line => {
                val time: Long = dateUtils.getTime(line)
                (line, time)
            })
            tuples
        })
/*
        val mapRDD: RDD[(String, Long)] = lines.map(line => {
            val dateUtils = new DateUtilsNoSerClass
            val time: Long = dateUtils.getTime(line)
            (line, time)
        })
*/

        val tuples = mapRDD.collect()

        tuples.foreach(println)

        sc.stop()
    }
}
