package cn.hnzycfc.spark.datagen.youtube

import cn.hnzycfc.spark.datagen.youtube.command.DataGenCli
import cn.hnzycfc.spark.datagen.youtube.util.{RandomYoutubeDataUtil, SparkAppHelper}
import org.apache.log4j.Logger

import java.util.UUID

/**
 * Youtube数据生成
     * 数据源来自于Kaggle，参考https://www.kaggle.com/datasnaek/youtube-new?select=USvideos.csv
 */
object YoutubeDataGen {
    private val logger: Logger = Logger.getLogger(YoutubeDataGen.getClass)

    def main(args: Array[String]): Unit = {

        logger.info("YoutubeDataGen[Normal] started...")

        // 解析命令行参数
        val cmdEntity = DataGenCli.parse(args)
        val spark = SparkAppHelper.getSparkSession

        // 生成若干RDD
        logger.info(s"开始生成RDD分区种子: (分区数量 ${cmdEntity.numPartition}、分区数据条数 ${cmdEntity.numPerPartition})")
        val partitionRDD = spark.sparkContext
            .makeRDD((1 to 10000).map(n => UUID.randomUUID().toString))
            .repartition(cmdEntity.numPartition)

        // 在RDD中生成YoutubeVideo数据
        logger.info(s"RDD开始执行数据生成...")
        val dataRDD = partitionRDD.mapPartitions(iter => {
            (1 to cmdEntity.numPerPartition)
                .map(n => RandomYoutubeDataUtil.randomYoutubeEntity)
                .iterator
        })

        val originDF = spark.createDataFrame(dataRDD)
        originDF.repartition()

        SparkAppHelper.sinkData(originDF, cmdEntity)
    }
}
