package com.sunzm.spark.core.exercise

import com.sunzm.spark.core.exercise.SparkRDDExercise.method2
import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object SparkRDDExeercise {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("RDD转换类算子示例")
      .setMaster("local[*]")
    val sc: SparkContext = new SparkContext(conf)

    //方法一：
    //method1(sc)
    method2(sc)

    sc.stop
  }

  def method2(sc: SparkContext): Unit = {

    val dataRDD: RDD[String] = sc.textFile("data/spark/rdd/{click.log,exposure.log}")

    val reduceRDD = dataRDD.map(line => {
      if (StringUtils.contains(line, "/c")) {
        //字符串切割,按照 adId 把字符串进行切割，形成一个数组
        val fields: Array[String] = StringUtils.splitByWholeSeparatorPreserveAllTokens(line, "adId=")
        //INFO 2016-07-25 00:29:53 requestURI:/c?app=0&p=1&did=18005472&industry=469&adId=31
        //取31

        val adId = fields(1)
        (adId, (1, 0))

      } else {

        //曝光日志
        val fields: Array[String] = StringUtils.splitByWholeSeparatorPreserveAllTokens(line, "adId=")

        //fields是一个数组,从0开始，这是取数组第1个元素
        val adId = fields(1)

        (adId, (0, 1))
      }
    }).reduceByKey((t1, t2) => {
      (t1._1 + t2._1, t1._2 + t2._2)
    })
    //打印
    reduceRDD.foreach {
      case (adId, (clickCount, exposureCount)) => {
        println(s"adId: ${adId},点击次数：${clickCount},曝光次数： ${exposureCount}")
      }
    }

  }

}

