import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 3、Spark面试题
 * 假设点击日志文件(click.log)中每行记录格式如下
 *    INFO 2019-09-01 00:29:53 requestURI:/click?app=1&p=1&adid=18005472&industry=469
 *    INFO 2019-09-01 00:30:31 requestURI:/click?app=2&p=1&adid=18005472&industry=469
 *    INFO 2019-09-01 00:31:03 requestURI:/click?app=1&p=1&adid=18005472&industry=469
 *    INFO 2019-09-01 00:31:51 requestURI:/click?app=1&p=1&adid=18005472&industry=469
 * 另有曝光日志(imp.log)格式如下：
 *    INFO 2019-09-01 00:29:53 requestURI:/imp?app=1&p=1&adid=18005472&industry=469
 *    INFO 2019-09-01 00:29:53 requestURI:/imp?app=1&p=1&adid=18005472&industry=469
 *    INFO 2019-09-01 00:29:53 requestURI:/imp?app=1&p=1&adid=18005472&industry=469
 * 3.1、用Spark-Core实现统计每个adid的曝光数与点击数，将结果输出到hdfs文件；
 *     输出文件结构为adid、曝光数、点击数。注意：数据不能有丢失（存在某些adid有imp，没有clk；或有clk没有imp）
 * 3.2、你的代码有多少个shuffle，是否能减少？（提示：仅有1次shuffle是最优的）
 */

object Subject3 {
  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)
    val conf = new SparkConf().setAppName(this.getClass.getCanonicalName.init).setMaster("local[*]")
    val sc = new SparkContext(conf)

    //读click.log
    val clickLog: RDD[String] = sc.textFile("data/click.log")
    val clickRDD: RDD[(String, (Int, Int))] = clickLog.map { line =>
      //先以&为分隔符再以=为分隔符，获得adid
      val str: Array[String] = line.split("&")
      val adId: String = str(str.length - 2).split("=")(1)
      //(adid, (点击数，曝光数)
      (adId, (1, 0))
    }

    //读imp.log
    val impLog: RDD[String] = sc.textFile("data/imp.log")
    val impRDD: RDD[(String, (Int, Int))] = impLog.map { line =>
      //先以&为分隔符再以=为分隔符，获得adid
      val str: Array[String] = line.split("&")
      val adId: String = str(str.length - 2).split("=")(1)
      //(adid, (点击数，曝光数)
      (adId, (0, 1))
    }

    //统计同个adid的总点击数和曝光数，仅在此处有一次shuffle
    val rdd: RDD[(String, (Int, Int))] = clickRDD.union(impRDD).reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2))

    rdd.foreach(println(_))

    //结果输出到hdfs
    rdd.saveAsTextFile("hdfs://linux121:9000/spark/test")

    sc.stop()
  }
}
