package cn.sheep.violet.report

import cn.sheep.violet.config.ConfigHandler
import cn.sheep.violet.utils.{Jpools, RptKpi}
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

/** 媒体分析-core实现方式
  * author: old sheep
  * QQ: 64341393 
  * Created 2018/10/17
  */
object AppAnalysisCoreOptimize {

    def main(args: Array[String]): Unit = {

        val sparkConf = new SparkConf()
        sparkConf.setAppName("媒体分析-core实现方式")
        sparkConf.setMaster("local[*]")
        sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

        val sc = new SparkContext(sparkConf)
        val sqlContext = new SQLContext(sc)

        // 读取
        val dataFrame = sqlContext.read.parquet(ConfigHandler.parquetFilePath)

        // 报表的维度
        dataFrame.mapPartitions(iter => {
            // 获取一个连接
            val jedis = Jpools.getJedis
            val result = iter.map(row => {
                // 维度字段
                var appName = row.getAs[String]("appname")
                val appId = row.getAs[String]("appid")

                if (StringUtils.isEmpty(appName)) { // appname为空了
                    if (StringUtils.isNotEmpty(appId)) { // 去字典文件中找
                        appName = jedis.hget("appdict", appId) //MySQL||Redis
                        if (StringUtils.isEmpty(appName)) appName = "未知"
                    } else {
                        appName = "未知"
                    }
                }
                (appName, RptKpi(row))
            })
            jedis.close()
            result
        })
            .reduceByKey((list1, list2) => list1.zip(list2).map(tp => tp._1 + tp._2))
            .sortBy(tp => tp._2.head, false)
            .saveAsTextFile("F:\\violet\\report\\app1")

        sc.stop()
    }

}
