package cn.dmp.report

import cn.dmp.utils.{AdRptKpi, JedisPools}
import org.apache.commons.lang.StringUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

import scala.collection.mutable.ListBuffer

/**
  * redis解决了 spark broadcast不能广播动态数据
  */
object RptAppAnalysis_Redis {

    def main(args: Array[String]): Unit = {

        // 检验参数 -> 配置文件
        if (args.length != 2) {
            println(
                """
                  |cn.dmp.report.RptAppAnalysis
                  |参数：dataInputPath, outputPath
                """.stripMargin)
            sys.exit()
        }

        val Array(dataInputPath, outputPath) = args

        // sparkcontext
        val sparkConf = new SparkConf().setMaster("local[*]").setAppName("媒体分析")
        val sc = new SparkContext(sparkConf)

        // 读取数据 - parquet
        val sqlc = new SQLContext(sc)
        // 数据文件 - dataframe
        val parquet = sqlc.read.parquet(dataInputPath)

        // 聚合数据
        parquet.mapPartitions(itr => {
            val jedis = JedisPools.getJedis
            val lb = new ListBuffer[(String, List[Double])]() // executor
            itr.foreach(row => {
                val appId = row.getAs[String]("appid")
                var appName = row.getAs[String]("appname")

                if (StringUtils.isEmpty(appName)) {
                    if (StringUtils.isNotEmpty(appId)) {
                        appName = jedis.hget("appdict", appId)
                        if (StringUtils.isEmpty(appName)) appName = appId
                    } else appName = "未知"
                }
                lb.append((appName, AdRptKpi(row)._2))
            })
            jedis.close()
            lb.toIterator
        }).reduceByKey((list1, list2) => list1.zip(list2).map(t => t._1+ t._2))
          .foreach(println)


        sc.stop()
    }

}
