package homework3

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object ADStat {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("IpCount")
    val sc = new SparkContext(conf)

    //1、从文件中读取数据
    val clickRDD: RDD[(String, Int)]  = sc.textFile("data/click.log").map { line =>
      val strs = line.split(" ")
      val adid =strs(3).substring(strs(3).lastIndexOf("="))
      (adid, 1)
    }

    val impRDD: RDD[(String, Int)]  = sc.textFile("data/imp.log").map { line =>
      val strs = line.split(" ")
      val adid =strs(3).substring(strs(3).lastIndexOf("="))
      (adid, 1)
    }

    //将两个RDD join在一起
    val resultRDD: RDD[(String, (Int, Int))] = clickRDD.fullOuterJoin(impRDD).mapValues(v => (v._1.getOrElse(0), v._2.getOrElse(0)))

    //最后进行reduce
    val result: RDD[(String, (Int, Int))] = resultRDD.aggregateByKey((0,0))(
      (result, count) => (result._1 + count._1, result._2 + count._2),
      (result, count) => (result._1 + count._1, result._2 + count._2)
    )

    println(result.collect().toBuffer)


    //保存到hdfs中
    result.saveAsTextFile("hdfs:///tmp/homework")


    sc.stop()
  }

}
