package cn.dmp.report

import cn.dmp.util.{AdApi, JedisPools}
import org.apache.commons.lang.StringUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

import scala.collection.mutable.ListBuffer

/**
  * Created by Administrator on 2018/4/25.
  */
object RepAppRedis {

  def main(args: Array[String]): Unit = {

    if (args.length != 1){
      println(
        """
          |dataInputPath,InputPath1,outputPath2....
          |参数不合法，请输入正确参数个数：dataInputPath
        """.stripMargin)
      sys.exit(1)
    }
    val conf = new SparkConf()
      .setMaster("local[*]").setAppName("RepAppRedis")
      .set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    val sc = new SparkContext(conf)  //spark离线job的入口

    //输入参数 （模糊匹配）
    val Array(dataInputPath)=args

    val sQLContext = new SQLContext(sc)
    val dataFrame = sQLContext.read.parquet(dataInputPath)

    dataFrame.mapPartitions(
      it=>{
          val jedis = JedisPools.getJedis
          val list=new ListBuffer[(String,List[Double])]() //executor
       it.foreach(
          row=> {
            val appid = row.getAs[String]("appid")
            var appname = row.getAs[String]("appname")

            if (StringUtils.isEmpty(appname)){
              if (StringUtils.isNotEmpty(appid)){
                appname = jedis.hget("appdic",appid)
                if (StringUtils.isEmpty(appname)) appname=appid
              }else "未知"
            }
            list.append((appname,AdApi(row)._2))
          })
        jedis.close()
       list.toIterator
    }).reduceByKey((list1,list2)=>list1.zip(list2).map(t=>t._1+t._2))
    .foreach(println)

    sc.stop()
  }
}
