package com.lagou.no3

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object No3 {
    def main(args: Array[String]): Unit = {
        //设置日志级别
        Logger.getLogger("org").setLevel(Level.WARN)

        //创建sparksession对象
       val spark = SparkSession
                .builder()
                .appName(s"${this.getClass.getCanonicalName}")
                .master("local[*]")
                .getOrCreate()
        //创建sparkContext对象
        val sc = spark.sparkContext

        //创建click的rdd
        val clickRdd = sc.textFile("data/click.log")
                .map { line =>
                    val arr = line.split("\\s+")
                    (arr(3).substring(arr(3).lastIndexOf("=") + 1), 1)
                }.reduceByKey(_ + _)

        //创建imp的rdd
        val impRdd = sc.textFile("data/imp.log")
                .map{line =>
                    val arr = line.split("\\s+")
                    (arr(3).substring(arr(3).lastIndexOf("=") + 1),1)
                }
                .reduceByKey(_+_)

        /*      采用  fullOuterJoin 会有两次shuffle

        val unionRdd: RDD[(String, (Option[Int], Option[Int]))] = clickRdd.fullOuterJoin(impRdd)

        val res = unionRdd.map(r => (r._1,r._2._1.getOrElse(0),r._2._2.getOrElse(0)))*/
        // 采用union + reduceByKey 会有两次shuffle
        val res: RDD[(String, Int)] = clickRdd.union(impRdd).reduceByKey(_+_)
        //写入到hdfs
        res.repartition(1).saveAsTextFile("hdfs://node01:9000/data/")
        sc.stop()
    }
}
