package club.monkeywood.ad.dmp.report

import club.monkeywood.ad.dmp.bean.Log
import club.monkeywood.ad.dmp.util.RptUtils
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}


/**
	* 广告在在每个地域的投放情况统计
	*
	* 实现方式---Spark Core
	* 输入：文本
	* 输出：文本
	*/
object AreaAnalyseRptByRDDWithTxt {

	def main(args: Array[String]): Unit = {

		// 0 校验参数个数
		if (args.length != 2) {
			println(
				"""
					|club.monkeywood.ad.dmp.report.AreaAnalyseRptByRDDWithTxt
					|参数：
					| logInputPath
					| resultOutputPath
				""".stripMargin)
			sys.exit()
		}

		// 1 接受程序参数
		val Array(logInputPath,resultOutputPath) = args

		// 2 创建sparkconf->SparkSession
		val sparkConf = new SparkConf()
		sparkConf.setAppName(s"${this.getClass.getSimpleName}")
		sparkConf.setMaster("local[*]")
		//使用KryoSerializer更快
		// RDD 序列化到磁盘 worker与worker之间的数据传输
		sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

		val ss = SparkSession.builder()
			.appName(s"${this.getClass.getSimpleName}")
			.config(sparkConf)
			.getOrCreate()

		// 读取数据 --》parquet文件
		val rdd: RDD[String] = ss.sparkContext.textFile(logInputPath)

		val ret = rdd
			.map(_.split(",", -1))
			.filter(_.length >= 85)
			.map(arr => {
				val log = Log(arr)

				val req = RptUtils.caculateReq(log.requestmode, log.processnode)
				val rtb = RptUtils.caculateRtb(log.iseffective, log.isbilling,log.isbid, log.adorderid, log.iswin, log.winprice, log.adpayment)
				val showClick = RptUtils.caculateShowClick(log.requestmode, log.iseffective)

				((log.provincename, log.cityname), req ++ rtb ++ showClick)
				// (省，地市，媒体，渠道，操作系统，网络类型,...，List(9个指标数据))
			}).reduceByKey((list1, list2) => {
				list1.zip(list2).map(t => t._1 + t._2)
			}).map(t => t._1._1+","+t._1._2+","+t._2.mkString(","))
			.saveAsTextFile(resultOutputPath)

		ss.stop()
	}

}
