package com.yanduo.report

import com.yanduo.beans.Log
import com.yanduo.utils.RptUtils
import org.apache.spark.sql.{DataFrame, Dataset, SQLContext, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 广告在每个地域的投放情况统计
  * 地域报表实现- spark core 方式
  *
  * https://www.bilibili.com/video/BV1F4411i7jK?p=19
  *
  * @author Gerry chan
  * @version 1.0
  */
object AreaAnalyseRptV2 {
  def main(args: Array[String]): Unit = {
    //0 校验参数个数
    if (args.length != 1) {
      println(
        """
          |cn.dmp.report.ProCityRptV3
          |参数：
          | logInputPath
          | resultOutputPath
        """.stripMargin)
      sys.exit()
    }

    // 1 接收程序参数
    val Array(logInputPath, resultOutputPath) = args

    //2 创建sparkConf --> sparkContext
    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")

    //RDD 序列化到磁盘，worker 与worker 之前的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(sparkConf)
    val spark = new SparkSession(sc)

    // 读取parquet文件
    val parquetData: DataFrame = spark.read.parquet(logInputPath)

    // 对DataFrame处理，读取出来的是Row对象
    val results: Dataset[((String, String), List[Double])] = parquetData.map(row => {
      // 是不是原始请求, 有效请求， 广告请求， List(原始请求，有效请求， 广告请求)
      val reqMode = row.getAs[Int]("requestmode")
      val prcNode = row.getAs[Int]("processnode")

      //参与竞价,竞价成功， List(参与竞价， 竞价成功)
      val effTive = row.getAs[Int]("iseffective")
      val bill = row.getAs[Int]("isbilling")
      val bid = row.getAs[Int]("isbid")
      val orderId = row.getAs[Int]("adorderid")
      val win = row.getAs[Int]("iswin")
      val winPrice = row.getAs[Double]("winprice")
      val adPayMent = row.getAs[Double]("adpayment")

      val reqList: List[Double] = RptUtils.caculateReq(reqMode, prcNode)
      val rtbList: List[Double] = RptUtils.caulateRtb(effTive, bill, bid, orderId, win, winPrice, adPayMent)
      //广告：点击与展示
      val showClickList: List[Double] = RptUtils.caculateShowClick(reqMode, effTive)

      //返回元组, 格式((省份，市),List[Double])使用++拼接List
      ((row.getAs[String]("provicename"), row.getAs[String]("cityname")), reqList ++ rtbList ++ showClickList)
    })

    import org.apache.spark.sql.functions._
    //知识点： mkString 将List转换成String
    results.rdd.reduceByKey((list1, list2) => {
      // zip 得到list的合并[(1,2),(3,4),(4,5)], 再使用map 以元组中的2个值相加
      list1.zip(list2).map(t => t._1 + t._2)
    }).map(t => t._1._1+","+t._1._2+","+t._2.mkString(",")).saveAsTextFile(resultOutputPath)


    //方法二：读取原始日志文件实现txt
    sc.textFile(logInputPath)
      .map(_.split(",", -1))
      .filter(_.length >= 85)
      .map(arr => {
        val log = Log(arr)
        val reqList: List[Double] = RptUtils.caculateReq(log.requestmode, log.requestmode)
        val rtbList: List[Double] = RptUtils.caulateRtb(log.iseffective, log.isbilling, log.isbid, log.adorderid, log.iswin,
          log.winprice, log.adpayment)
        //广告：点击与展示
        val showClickList: List[Double] = RptUtils.caculateShowClick(log.requestmode, log.iseffective)

        // 返回值
        ((log.provincename, log.cityname),reqList++ rtbList++ showClickList)
      }).reduceByKey((list1, list2) => {
      list1.zip(list2).map(t=>t._1+t._2)
    }).map(t => t._1._1+","+t._1._2+","+t._2.mkString(",")).saveAsTextFile(resultOutputPath)

    spark.stop()

  }

}
