package cn.lagou.spark.ads

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object AdsStatistics {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getCanonicalName)
      .master("local[*]")
      .getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("warn")

    val clickLog: RDD[String] = sc.textFile("data/click.log")
    val clkRDD: RDD[(String, (Int, Int))] = clickLog.map { line =>
      val arr: Array[String] = line.split("\\s+")
      val adid: String = arr(3).substring(arr(3).lastIndexOf("=") + 1)
      (adid, (1, 0))
    }
    //    clickLog.map{
    //      line => val arr: Array[String] = line.split("\\s+")
    //        println(s"${arr.length} \t "+arr.mkString(","))
    //        arr.length
    //    }.collect.foreach(println)

    val impLog: RDD[String] = sc.textFile("data/imp.log")
    val impRDD: RDD[(String, (Int, Int))] = impLog.map { line =>
      val arrImp: Array[String] = line.split("\\s+")
      val adidImp: String = arrImp(3).substring(arrImp(3).lastIndexOf("=") + 1)
      (adidImp, (0, 1))
    }

    val RDD: RDD[(String, (Int, Int))] = clkRDD.union(impRDD)
      .reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2),1)

    RDD.sortBy(_._1).collect.foreach(println)

    // 写hdfs
    RDD.saveAsTextFile("hdfs://linux121:9000/spark-result/")


    sc.stop()
  }
}
