package clickAndImp

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object clickAndImp {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("ClickImpLog").setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("warn")

    val clickLogs = sc.textFile("data/click.log")
    val impLogs = sc.textFile("data/imp.log")


    //clickLogs.foreach(println)

    val clickLog: RDD[(String, (Int, Int))] = clickLogs.map { line =>
      val fields: Array[String] = line.split("\\s+")
      val acid: String = fields(3).substring(fields(3).lastIndexOf("=")+1)
      (acid,(1,0))
    }

    //clickLog.foreach(println)

    val impLog: RDD[(String, (Int, Int))] = impLogs.map { line =>
      val fields: Array[String] = line.split("\\s+")
      val acid: String = fields(3).substring(fields(3).lastIndexOf("=")+ 1)
      (acid,(0, 1))
    }
    //利用union是窄依赖，合并数据以减少shuffle使用，最后只使用reduceByKey一个使用shuffle的算子
    //impLog.foreach(println)
    clickLog.union(impLog).reduceByKey(
      (x,y)=>(x._1+y._1,x._2+y._2)
    ).saveAsTextFile("hdfs://linux121:9000//data/clickAndImp")

  }
}
