package com.test.cn.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

object SparkQuestion {

  def main(args: Array[String]): Unit = {
    // 创建SparkContext
    val conf = new SparkConf().setAppName(this.getClass.getCanonicalName.init).setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    // 设置本地文件切分大小
    sc.hadoopConfiguration.setLong("fs.local.block.size", 128*1024*1024)

    // 业务逻辑
    //分别将文件进行处理
    val clickStrRDD: RDD[String] = sc.textFile("spark_scala_home_work/src/data/click.log")
    val clickRdd: RDD[(String, Int)] = clickStrRDD.flatMap { case text => {
      //INFO 2019-09-01 00:29:53 requestURI:/click?app=1&p=1&adid=18005472&industry=469&adid=31
      val splits: Array[String] = text.split("&")
      var arrayBuffer = ArrayBuffer[(String, Int)]()
      for (elem <- splits) {
        if (elem.contains("adid")) {
          val strings: Array[String] = elem.split("=")
          arrayBuffer += Tuple2(strings(1), 1)
        }
      }
      arrayBuffer
    }
    }.reduceByKey(_+_)


    // 业务逻辑
    //分别将文件进行处理
    val impStrRDD: RDD[String] = sc.textFile("spark_scala_home_work/src/data/imp.log")
    val impRdd: RDD[(String, Int)] = impStrRDD.flatMap { case text => {
      //INFO 2019-09-01 00:29:53 requestURI:/click?app=1&p=1&adid=18005472&industry=469&adid=31
      val splits: Array[String] = text.split("&")
      var arrayBuffer = ArrayBuffer[(String, Int)]()
      for (elem <- splits) {
        if (elem.contains("adid")) {
          val strings: Array[String] = elem.split("=")
          arrayBuffer += Tuple2(strings(1), 1)
        }
      }
      arrayBuffer
    }
    }.reduceByKey(_+_)




   val value: RDD[(String, (Option[Int], Option[Int]))] = impRdd.fullOuterJoin(clickRdd)

    for (elem <- value.collect()) {
      print(elem)
    }

    Thread.sleep(100000000)

    // 关闭SparkContext
    sc.stop()


  }

}
