package com.need3

import com.typesafe.config.{Config, ConfigFactory}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
import redis.clients.jedis.Jedis

import scala.collection.mutable.ListBuffer

/**
  * Created by zhuang on 2018/3/4.
  */
object DealAppNameByRedis extends App {
  //屏蔽日志
  //Logger.getLogger("org").setLevel(Level.WARN)
  private val load: Config = ConfigFactory.load()
  val conf = new SparkConf().setMaster("local[*]").setAppName(this.getClass.getSimpleName)
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
  val sc = new SparkContext(conf)
  //拿到sqlcontext对象，为了转换能parque文件
  val context: SQLContext = new SQLContext(sc)

  //读取APP文件
  private val file: RDD[String] = sc.textFile(load.getString("AppNameById"))
  //存入Redis
  file.foreachPartition(
    t => {
      val redis: Jedis = GetRedis.getRedis()
      t.foreach(t => {
        val split = t.split("\t", -1)
        //开始过滤
        if (split.length >= 5) {
          //如果都不是空的话我就存入
          val s = if (!split(4).equals("") && !split(1).equals("")) {
            redis.hset("AppName_AppId", split(4), split(1))
          }
        }
      })
      redis.close()
    })
  //读取文件
  private val parquet: DataFrame = context.read.parquet(load.getString("DataForParquet"))
  //处理数据
  private val partitions = parquet.mapPartitions(t => {
    var lb = ListBuffer[(String, List[Double])]()
    val redis1: Jedis = GetRedis.getRedis()
    t.foreach(t => {
      //拿应用id
      var appid = t.getAs[String]("appid")
      //拿应用名称
      var appname = t.getAs[String]("appname")
      //开始处理id和appname的对应关系
      val appnamed: String = if (!appname.equals("")) {
        //println("name不是空")
        appname
      } else {
        //println("name是空")
        if (!appid.equals("")) {
          //println(redis1.hget("AppName_AppId", appid))
          //因为需要关资源所以需要定义一个变量接收参数
          val hget: String = redis1.hget("AppName_AppId", appid)
          //如果数据库中appid有对应的值就拿，否则直接赋值
          if (!hget.isEmpty) {
            hget
          } else {
            appid
          }
        } else {
          "未知"
        }
      }
      var requestmode = t.getAs[Int]("requestmode")
      var processnode = t.getAs[Int]("processnode")
      var iseffective = t.getAs[Int]("iseffective")
      var isbilling = t.getAs[Int]("isbilling")
      var isbid = t.getAs[Int]("isbid")
      var iswin = t.getAs[Int]("iswin")
      var adorderid = t.getAs[Int]("adorderid")
      //根据计算逻辑表格，拿出价格，下面使用
      val winprice = t.getAs[Double]("winprice")
      val adpayment = t.getAs[Double]("adpayment")
      //定义一个list用于聚合
      //list（原始请求,有效请求,广告请求,参与竞价数,竞价成功数,展示量,点击量,广告成本,广告消费）
      var list = ListBuffer[Double]()
      //开始判断
      if (requestmode == 1 && processnode >= 1) list.append(1) else list.append(0)
      if (requestmode == 1 && processnode >= 2) list.append(1) else list.append(0)
      if (requestmode == 1 && processnode == 3) list.append(1) else list.append(0)
      if (iseffective == 1 && isbilling == 1 && isbid == 1 && adorderid != 1) list.append(1) else list.append(0)
      if (iseffective == 1 && isbilling == 1 && iswin == 1) list.append(1) else list.append(0)
      if (requestmode == 2 && iseffective == 1) list.append(1) else list.append(0)
      if (requestmode == 3 && iseffective == 1) list.append(1) else list.append(0)
      if (iseffective == 1 && isbilling == 1 && iswin == 1) list.append(winprice / 1000) else list.append(0)
      if (iseffective == 1 && isbilling == 1 && iswin == 1) list.append(adpayment / 1000) else list.append(0)
      lb.append((appnamed, list.toList))
    })
    redis1.close()
    lb.toIterator
  })
  private val key = partitions.reduceByKey({
    (list1, list2) => list1.zip(list2).map(t => t._1 + t._2)
  })
  private val map1 = key.map(t => {
    Row(t._1, t._2(0), t._2(1), t._2(2), t._2(3), t._2(4), t._2(5), t._2(6), t._2(7), t._2(8))
  })
  var schema =
    StructType(
      List(
        StructField("媒体类别", StringType),
        StructField("原始请求", DoubleType),
        StructField("有效请求", DoubleType),
        StructField("广告请求", DoubleType),
        StructField("参与竞价数", DoubleType),
        StructField("竞价成功数", DoubleType),
        StructField("展示量", DoubleType),
        StructField("点击量", DoubleType),
        StructField("广告成本", DoubleType),
        StructField("广告消费", DoubleType)
      ))
  private val df = context.createDataFrame(map1, schema)
  df.rdd.foreach(println)
  sc.stop()
}
