package com.headfirst.dmp.report.AreanalyzeRpt

import com.headfirst.dmp.utils.AreaAnalyzeUtils
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 分析统计按照不同地域的各项指标
  *
  * 使用工具：Spark-Core
  */
object AreaAnalyzeRPT_V2 {

  def main(args: Array[String]): Unit = {

    //1.验证参数
    if (args.length != 2) {
      print(
        """
          |com.headfirst.dmp.report.AreaAnalyzeRPT_V2
          |需要参数：
          |       logInputPath
          |       resultOutputPath
        """.stripMargin)
      sys.exit(-1)
    }

    //2.接受参数
    val Array(logInputPath, resultOutputPath) = args

    //3.创建sparksession对象
    val conf = new SparkConf()
    conf.setMaster("local[*]") //本地测试使用local，提交到集群则注释掉该配置
    conf.setAppName("AreaAnalyzeRPT")
    conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") //设置序列化方式采用KryoSerializer方式（默认的是java序列化）

    val session: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    //4.读取文件
    val logFile: DataFrame = session.read.parquet(logInputPath)

    /**
      * 5.处理数据
      *
      * 思路：
      * 这个就是按key进行聚合，最终是使用到reduceByKey;
      * 需要思考key和value的设计；
      * 此处想法是使用（省，市）为key，使用List（。。。。）或者tuple9（。。。）来作为value
      */
    //先使用List的方式
    //注意，DataSet无法使用ReduceByKey,需要使用.rdd将其转换成RDD格式
    import session.implicits._;

    val data: RDD[((String, String), List[Double])] = logFile.map(row => {
      val provincename = row.getAs[String]("provincename")
      val cityname = row.getAs[String]("cityname")

      val adplatformproviderid = row.getAs[Int]("adplatformproviderid")
      val requestmode = row.getAs[Int]("requestmode")
      val processnode = row.getAs[Int]("processnode")
      val iseffective = row.getAs[Int]("iseffective")
      val isBilling = row.getAs[Int]("isbilling")
      val isbid = row.getAs[Int]("isbid")
      val isWin = row.getAs[Int]("isWin")
      val ADOrderid = row.getAs[Int]("ADOrderid")
      val adcreativeid = row.getAs[Int]("adcreativeid")
      val winPrice = row.getAs[Double]("winpirce")
      val adPayment = row.getAs[Double]("adPayment")

      //原始请求、有效请求、广告请求
      val reqList: List[Double] = AreaAnalyzeUtils.caculateReq(requestmode, processnode)
      //参与竞价的次数，参与竞价的次数
      val bidList: List[Double] = AreaAnalyzeUtils.caculateBid(adplatformproviderid, iseffective, isBilling, isbid, ADOrderid, isWin)
      //展示数,点击数
      val showList: List[Double] = AreaAnalyzeUtils.caculateShowAndClick(requestmode, iseffective)
      //广告消费 ，广告成本
      val costList: List[Double] = AreaAnalyzeUtils.caculateCost(adplatformproviderid, iseffective, isBilling, isWin, ADOrderid, adcreativeid, winPrice, adPayment)

      //返回值
      ((provincename, cityname), reqList ++ bidList ++ showList ++ costList)

    }).rdd     //注意此处计算一定要转成RDD，否则无法使用reduceByKey

    //将数据进行分组聚合，这里使用的是zip操作
    //zip两个list，list1=1,2,3   list2=2,4,6  得到：list3((1,2),(3,4),(5,6))
    data.reduceByKey((list1,list2) => {
      list1.zip(list2).map(t=> t._1+t._2)
    }).map(t =>{
      t._1._1+","+t._1._2+","+t._2.mkString(",")
    }).saveAsTextFile(resultOutputPath)



    //关流
    session.stop()

  }

}
