package com.headfirst.dmp.report.AppAnalyzeRpt

import com.headfirst.dmp.beans.LogBean
import com.headfirst.dmp.utils.AreaAnalyzeUtils
import org.apache.commons.lang.StringUtils
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 媒体报表分析
  *
  * 使用工具:sprk-core
  *
  * 注意点：因为需要匹配规则数据，且规则数据基本不需要改变，因此这里使用BroadCast去广播变量，减少数据加载的IO开销
  *
  */
object AppAnalyzeRPT_Broadcast extends App {

  //1.验证参数
  if (args.length != 3) {
    print(
      """
        |com.headfirst.dmp.report.AreaAnalyzeRPT_V2
        |需要参数：
        |       logInputPath
        |       resultOutputPath
        |       rulesFileInputPath
      """.stripMargin)
    sys.exit(-1)
  }

  //2.接受参数
  val Array(logInputPath, resultOutputPath, rulesFileInputPath) = args


  //3.创建saprkcontext对象
  val conf: SparkConf = new SparkConf()
  conf.setAppName(s"${this.getClass.getSimpleName}")
  conf.setMaster("local[*]") //本地测试使用local，提交到集群则注释掉该配置
  conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") //设置序列化方式采用KryoSerializer方式（默认的是java序列化）

  val context: SparkContext = new SparkContext(conf)

  //4.读取规则文件并进行广播
  //此处一定要进行collect收集，否则文件是切片的，广播出去的只是一部分规则文件
  //另外此处不能在外边定义一个hashMap，然后在每次map算子中往里面塞值，因为hashMap在Driver端定义，而map算子是在Executor端执行的
  val map: Map[String, String] = context.textFile(rulesFileInputPath)
    .map(line => {
      val arr: Array[String] = line.split("\t", -1)
      (arr(4), arr(1))
    }).collect().toMap

  val broadcastValue: Broadcast[Map[String, String]] = context.broadcast(map)


  //5.读取文件并处理
  val data: RDD[(String, List[Double])] = context.textFile(logInputPath)
    .map(_.split(",", -1))
    .filter(_.length >= 85)
    .map(LogBean(_))
    .filter(log => !log.appid.isEmpty || !log.appname.isEmpty)
    .map(log => {

      //如果appname为空，则从规则文件中获取
      var appname: String = log.appname

      if (StringUtils.isBlank(appname)) {
        val value: Map[String, String] = broadcastValue.value
        appname = value.getOrElse(log.appid, "unknowAppName")
      }


      //拼接指标数据

      //原始请求、有效请求、广告请求
      val reqList: List[Double] = AreaAnalyzeUtils.caculateReq(log.requestmode, log.processnode)
      //参与竞价的次数，参与竞价的次数
      val bidList: List[Double] = AreaAnalyzeUtils.caculateBid(log.adplatformproviderid, log.iseffective, log.isbilling, log.isbid, log.adorderid, log.iswin)
      //展示数,点击数
      val showList: List[Double] = AreaAnalyzeUtils.caculateShowAndClick(log.requestmode, log.iseffective)
      //广告消费 ，广告成本
      val costList: List[Double] = AreaAnalyzeUtils.caculateCost(log.adplatformproviderid, log.iseffective, log.isbilling, log.iswin, log.adorderid, log.adcreativeid, log.winprice, log.adpayment)


      //返回值
      (appname, reqList ++ bidList ++ showList ++ costList)

    })

  //将数据进行分组聚合，这里使用的是zip操作
  //zip两个list，list1=1,2,3   list2=2,4,6  得到：list3((1,2),(3,4),(5,6))
  data.reduceByKey((list1, list2) => {
    list1.zip(list2).map(t => t._1 + t._2)
  }).map(t => {
    t._1 + "," + t._2 + "," + t._2.mkString(",")
  }).saveAsTextFile(resultOutputPath)


  //关流
  context.stop()
}
