package com.oracle.violet.first.report

import com.oracle.violet.first.config.ConfigHandler
import com.oracle.violet.first.until.{CaculateUntils, Jpools}
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object MediaAnysRedis {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("媒体分析")
      .setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sc = new SparkContext(conf)
    val sQLContext = new SQLContext(sc)

    //读取文件
    val dataFrame: DataFrame = sQLContext.read.parquet(ConfigHandler.parquetFilePath)

        val sourceCode = dataFrame.mapPartitions(iter => {
          val jedis = Jpools.getJedis
          val result = iter.map(row => {
            val appid = row.getAs[String]("appid")
            var appname = row.getAs[String]("appname")
           if(StringUtils.isEmpty(appname)){
             if(StringUtils.isNotEmpty(appid)){
               appname=jedis.hget("violet",appid)
                  if(StringUtils.isEmpty(appname))
               appname="未知"
             }else{
               appname="其他"
             }
             }
            (appname, CaculateUntils.sourceData(row))
          })
          jedis.close()
          result
        }).reduceByKey((list1, list2) => list1.zip(list2).map(tp => tp._1 + tp._2))
          .sortBy(tp => tp._2.head, false)
          .saveAsTextFile("H:\\甲骨文培训\\大数据\\Linux\\大纲\\项目二\\VIOLET\\appdict")


        sc.stop()

  }

}
