package cn.dmp.report

import cn.dmp.beans.Log
import cn.dmp.utils.RptUItils
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}


/**
  * 广告投放的地域分布统计
  * 实现方式：（读取parquet文件）
  *     (1)sparkSQL实现（就用sparkSession）。
  *     (2)spark算子实现（即spark core，就用sparkContext，可以还用sparkSession只是不能用sql查询））
  */
object AreaAnalyseRpt_sparkcore {
  def main(args: Array[String]): Unit = {

    /**
      * (2)spark算子实现（即spark core，就用sparkContext））
      *
      */

    // 0.判断参数
    if (args.length != 2){
      println(
        """
          |cn.dmp.report.ProCityRpt_Version3
          |参数：
          |   logInputPath
          |   resultOutputPath
        """.stripMargin)
    }

    // 1.接收程序参数
    val Array(logInputPath, resultOutputPath) = args

    // 2.创建SparkSession
    val sparkSession: SparkSession = SparkSession.builder().appName("ProCityRpt_Version3").master("local[*]")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") //spark优化：spark.serializer修改磁盘序列化方式
      .enableHiveSupport()
      .getOrCreate()

    // 3.读取parquet文件
    val parquetData: DataFrame = sparkSession.read.parquet(logInputPath)
    // 4.业务逻辑
    import sparkSession.implicits._
    val res: RDD[String] = parquetData.map(row => {
      // 是不是 原始请求，有效请求，广告请求。List[Double](原始请求，有效请求，广告请求)
      // 广告展示，点击。List[Double](广告展示，点击)
      val reqMode: Int = row.getAs[Int]("Requestmode")
      val proNode: Int = row.getAs[Int]("Processnode")
      // 参与竞价,竞价成功,消费,成本。List[Double](参与竞价,竞价成功, 消费, 成本)
      val effective: Int = row.getAs[Int]("Iseffective")
      val billing: Int = row.getAs[Int]("Isbilling")
      val bid: Int = row.getAs[Int]("Isbid")
      val adorderid: Int = row.getAs[Int]("Adorderid")
      val win: Int = row.getAs[Int]("Iswin")
      val winPrice: Double = row.getAs[Double]("Winprice")
      val adPayment: Double = row.getAs[Double]("Adpayment")
      // 为了让代码看着不那么冗余，写了一个RptUItils类，定义了三个方法。去得到三种标记。共九个标记
      val reqList: List[Double] = RptUItils.caculateReq(reqMode, proNode)
      val rtbList: List[Double] = RptUItils.caculateRtb(effective, billing, bid, adorderid, win, winPrice, adPayment)
      val showClickList: List[Double] = RptUItils.caculateShowClick(reqMode, effective)

      // 返回元祖
      ((row.getAs[String]("Provincename"), row.getAs[String]("Cityname")), reqList ++ rtbList ++ showClickList)
    }).rdd.reduceByKey((list1, list2) => {
      list1.zip(list2).map(t => t._1 + t._2)//!!!!!!!!!!!!!!!!!最关键的一行代码!!!!!!!!!!!!!!!!!!!!!!!!：
      /**
        * List(1,2,3).zip(List(10,20,30)) = List((1,10), (2,20), (3,30))
        * .map(t => t._1 + t._2) 之后就等于： List(11,22,33)
        */
    }).map(t => t._1._1 + "," + t._1._2 + "," + t._2.mkString(","))
    //奇怪？为什么Dataset[((String, String), List[Double])]这里没有reduceByKey函数？？？？？我这里将其转换为rdd才有该reduceByKey函数
    res.collect().foreach(println)//打印RDD
    res.saveAsTextFile(resultOutputPath)


    //本地测试运行：!!!!!注意提前设置好参数：输入目录和输出目录
    /**
      * 输入目录：E:\大数据培训视频\项目+spark面试+flink+项目\项目\DMP\biz2parquet
      * 输出目录：E:\大数据培训视频\项目+spark面试+flink+项目\项目\DMP\AreaAnalyseRpt_sparkCore
      */


    // sparkContext的实现方式，注意读取的时候SparkContext的textFile读取原文件而不读parquet文件。
    val sc: SparkContext = sparkSession.sparkContext
    val res2: RDD[String] = sc.textFile("E:\\大数据培训视频\\项目+spark面试+flink+项目\\项目\\DMP\\data\\2016-10-01_06_p1_invalid.1475274123982.log.FINISH")
      .map(_.split(",", -1))
      .filter(_.length >= 85)
      .map(arr => {
        val log = Log(arr)
        val req: List[Double] = RptUItils.caculateReq(log.Requestmode, log.Processnode)
        val rtb: List[Double] = RptUItils.caculateRtb(log.Iseffective, log.Isbilling, log.Isbid, log.Adorderid, log.Iswin, log.Winprice, log.Adpayment)
        val showClick: List[Double] = RptUItils.caculateShowClick(log.Requestmode, log.Iseffective)

        ((log.Provincename, log.Cityname), req ++ rtb ++ showClick)
      }).reduceByKey((list1, list2) => {
      list1.zip(list2).map(t => t._1 + t._2)
    }).map(t => t._1._1 + "," + t._1._2 + "," + t._2.mkString(","))
    res2.collect().foreach(println)
    res2.saveAsTextFile("E:\\大数据培训视频\\项目+spark面试+flink+项目\\项目\\DMP\\AreaAnalyseRpt_sparkCore_sparkContext")


    sparkSession.stop()
  }
}
