package kk.learn.spark.work._3

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

/**
 * <p>
 *
 * </p>
 *
 * @author KK
 * @since 2021-03-05
 */
object ClickImp {

  /**
   *
   * @param log 单条日记记录
   * @return 返回这条日志记录中的adid数组
   */
  def getAdids(log: String): Array[String] = {

    val uri = log.split("\\s+")(3)
    val paramStr = uri.split("\\?", 2)(1)
    val params = paramStr.split("&")

    params.map(_.split("="))
      .filter(_ (0).equals("adid"))
      .map(_ (1))
  }

  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .appName("ClickImp")
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
    sc.setLogLevel("warn")

    // 读取文件
    val clicks: RDD[String] = sc.textFile("code/data/click.log")
    val imps: RDD[String] = sc.textFile("code/data/imp.log")

    clicks.map(getAdids)// 将日志记录转化为adid集合
      .flatMap(a => a.map(s => (("click",  s), 1)))// 附加上click标识
      .union(imps.map(getAdids)
        .flatMap(a => a.map(s => (("imp",  s), 1))))// 和imp标识进行union
      .reduceByKey(_+_)// 计数
      .map{
        case ((x, y), z) => (y, (x, z))
      }
      .groupByKey() // 行转列
      .mapValues(a => {
        val map = a.toMap
        (map.getOrElse("imp", 0), map.getOrElse("click", 0))
      })
      .map{
        case (x, (y, z)) => (x, y, z)
      }
      .coalesce(1)// 输出成一个文件
      .saveAsTextFile("hdfs://linux121:9000//spark/clickImp/output/")// 保存文件

    spark.close()
  }
}
