package part03

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object ExposeAndClick {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName(this.getClass.getCanonicalName.init).setMaster("local[*]")
    val sc = new SparkContext(conf)

    val rdd1 = sc.textFile("data/click.log")
    val rdd2 = sc.textFile("data/imp.log")

    //3.1 用Spark-Core实现统计每个adid的曝光数与点击数，将结果输出到hdfs文件；输出文件结构为adid、曝光数、点击数。注意：数据不能有丢失
    val clickRDD = rdd1.map(_.split("=")(5))
      .map((_, 1))
      .reduceByKey(_ + _)

    rdd2.map(_.split("=")(5))
      .map((_, 1))
      .reduceByKey(_ + _)
      .fullOuterJoin(clickRDD)
      .mapValues({ case (expose, click) =>
        (expose, click) match {
          case (Some(ex), Some(clk)) => (ex, clk)
          case (Some(ex), None) => (ex, 0)
          case (None, Some(clk)) => (0, clk)
          case (None, None) => (0, 0)
        }
      })
      .sortByKey()
      .collect.foreach(println)
    //3.2 你的代码有多少个shuffle，是否能减少？
    //读文件
    val impRDD = rdd2.map(_.split("=")(5))
      .map((_, (1, 0)))
    val clickRDD1 = rdd1.map(_.split("=")(5))
      .map((_, (0, 1)))

    // union
    val result: RDD[(String, (Int, Int))] = impRDD.union(clickRDD1)
      .reduceByKey((tuple, count) => (tuple._1 + count._1, tuple._2 + count._2))

    //写hdfs文件
    result.saveAsTextFile("hdfs://centos7-1:9000/data/result")
    sc.stop()
  }
}
