import org.apache.spark.{SparkConf, SparkContext}

/**
 * ClassName: Homework3 <br/>
 * Description: <br/>
 * date: 2021/7/9 8:46<br/>
 *
 * @author Hesion<br/>
 * @version
 * @since JDK 1.8
 */
object Homework3 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("HomeWork3").setMaster("local")
    val sc = new SparkContext(conf)
    sc.setLogLevel("warn")

    val clickLog = sc.textFile("data/click.log")
    val impLog = sc.textFile("data/imp.log")

    val clickRDD = clickLog.map { line =>
      val arr = line.split("\\s+")
      val adid = arr(3).substring(arr(3).lastIndexOf("=") + 1)
      (adid, (1, 0))
    }.reduceByKey((x, y) => (x._1+x._1,0))

    // 读文件
    val impRDD = impLog.map { line =>
      val arr = line.split("\\s+")
      val adid = arr(3).substring(arr(3).lastIndexOf("=") + 1)
      (adid, (0, 1))
    }.reduceByKey((x, y) => (0, x._2 + y._2))

    // join
    val clkRDD = clickRDD.union(impRDD)
      .reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2))

    // 写hdfs
    clkRDD.saveAsTextFile("hdfs://linux121:9000/data/")

    sc.stop()


  }
}
