import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object StatClickLog {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName(this.getClass.getCanonicalName.init)
    val sc = new SparkContext(conf)
    // 设置本地文件切分大小
    sc.hadoopConfiguration.setLong("fs.local.block.size", 128*1024*1024)

    // map task：数据准备
    val clickRDD: RDD[(String, Int)] = sc.textFile("file:///D:\\BaiduNetdiskDownload\\lagoubigdata\\fourthPhrase\\大数据正式班第四阶段模块一\\scala编程\\0-讲义和代码\\代码\\sparkPartOne\\data\\click.log")
      .map { line =>
        val fields = line.split("\\s+")
        var startIndex = fields(3).indexOf("&adid=")
        var endIndex = fields(3).indexOf("&industry")
        var adid = fields(3).substring(startIndex+6,endIndex)
        (adid, 1)
      }.reduceByKey(_+_)

    val impRDD: RDD[(String, Int)] = sc.textFile("file:///D:\\BaiduNetdiskDownload\\lagoubigdata\\fourthPhrase\\大数据正式班第四阶段模块一\\scala编程\\0-讲义和代码\\代码\\sparkPartOne\\data\\imp.log")
      .map { line =>
        val fields = line.split("\\s+")
        var startIndex = fields(3).indexOf("&adid=")
        var endIndex = fields(3).indexOf("&industry")
        var adid = fields(3).substring(startIndex+6,endIndex)
        (adid, 1)
      }.reduceByKey(_+_)

    // join有shuffle操作
    //val resultRDD: RDD[(String, (String, String))] = clickRDD.join(impRDD)

    //println(resultRDD.count())
    println("用Spark-Core实现统计每个adid的曝光数与点击数，将结果输出到hdfs文件；输出文件结构为adid、曝光数、点击数。注意：数据不能有丢失")
    val result = clickRDD
      .fullOuterJoin(impRDD)
      .map {
        case (id, (nameOption, addressOption)) => {
          (id, nameOption.getOrElse("NULL"), addressOption.getOrElse("NULL"))
        }
      }
    result
      .foreachPartition(iter => iter.foreach(println))
    //Thread.sleep(1000000)
    result.coalesce(1,false).saveAsTextFile("file:///D:\\BaiduNetdiskDownload\\lagoubigdata\\fourthPhrase\\大数据正式班第四阶段模块一\\scala编程\\0-讲义和代码\\代码\\sparkPartOne\\data\\StatClickLog.log")
    sc.stop()
  }
}