package cn.hnzycfc.spark.datagen.youtube

import cn.hnzycfc.spark.datagen.youtube.command.DataGenCli
import cn.hnzycfc.spark.datagen.youtube.util.{RandomYoutubeDataUtil, SparkAppHelper}
import org.apache.log4j.Logger

import java.util.{Random, UUID}

/**
 * 数据倾斜造数程序
 */
object YoutubeSkewDataGen {
    private val logger: Logger = Logger.getLogger(YoutubeSkewDataGen.getClass)

    def main(args: Array[String]): Unit = {

        logger.info("YoutubeDataGen[Normal] started...")

        // 解析命令行参数
        val cmdEntity = DataGenCli.parse(args)
        val spark = SparkAppHelper.getSparkSession

        // 生成若干RDD
        logger.info(s"开始生成RDD分区种子: (分区数量 ${cmdEntity.numPartition}、分区数据条数 ${cmdEntity.numPerPartition})")
        val partitionRDD = spark.sparkContext
            .makeRDD((1 to 10000).map(n => UUID.randomUUID().toString))
            .repartition(cmdEntity.numPartition)


        // 广播倾斜的分区与Category ID
        // 1. 生成倾斜的Category ID
        val skewCategoryIdArray = Array(RandomYoutubeDataUtil.randomCategory_id
            , RandomYoutubeDataUtil.randomCategory_id
            , RandomYoutubeDataUtil.randomCategory_id)
        logger.info(s"生成倾斜的CategoryID:${skewCategoryIdArray.toList}")

        // 2. 建立分区与Category ID映射
        val r = new Random
        val pIdCategoryIdMap = (0 until cmdEntity.numPartition).map { pId =>
                    pId -> skewCategoryIdArray(r.nextInt(skewCategoryIdArray.size))
                }
                .toMap

        // 2. 广播
        val broadcastSkewCategoryId = spark.sparkContext
            .broadcast(pIdCategoryIdMap)

        // 在RDD中生成YoutubeVideo数据
        logger.info(s"RDD开始执行数据生成...")
        val dataRDD = partitionRDD.mapPartitionsWithIndex((pId, iter) => {
            // 从广播中获取映射的CategoryID
            val categoryId = broadcastSkewCategoryId.value.get(pId)

            // 避免序列化Driver的Logger
            val loggerExector: Logger = Logger.getLogger("cn.hnzycfc.spark.datagen.youtube.YoutubeSkewDataGen")
            loggerExector.info(s"分区对应的CategoryID为:${categoryId}")

            categoryId match {
                case Some(category) => (1 to cmdEntity.numPerPartition)
                    .map(n => RandomYoutubeDataUtil.randomSkewYoutubeEntity(category))
                    .iterator
                case None => {
                    loggerExector.warn(s"未找到与分区(id:${pId}对应的CategoryID)")
                        (1 to cmdEntity.numPerPartition)
                            .map(n => RandomYoutubeDataUtil.randomYoutubeEntity())
                            .iterator
                }
            }
        })

        val originDF = spark.createDataFrame(dataRDD)
        originDF.repartition()

        import spark.sqlContext.implicits._

        originDF.groupBy($"category_id")
            .count().as("total_cnt")
            .show(200)

        SparkAppHelper.sinkData(originDF, cmdEntity)
    }
}
