package com.need2

import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}

import scala.collection.mutable.ListBuffer

/**
  * 用sparksql写
  * Created by zhuang on 2018/3/2.
  */
object ComputeDataSQL extends App {

  private val load: Config = ConfigFactory.load()
  val conf = new SparkConf().setMaster("local[*]").setAppName(this.getClass.getSimpleName)
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
  val sc = new SparkContext(conf)
  //拿到sqlcontext对象，为了转换能parque文件
  val context: SQLContext = new SQLContext(sc)
  //隐式转换导入
  import context.implicits._

  //读取文件
  private val df: DataFrame = context.read.parquet(load.getString("DataForParquet"))
  //注册表
  df.registerTempTable("t_data")
  //（原始请求,有效请求,广告请求,参与竞价数,竞价成功数,展示量,点击量,广告成本,广告消费）
  private val sql: DataFrame = context.sql(
    """
select provincename,cityname,
if(requestmode=1 and processnode >=1,1,0)as ys,
if(requestmode = 1 and processnode >= 2,1,0)as yx,
if(requestmode = 1 and processnode = 3,1,0)as gg,
if(iseffective = 1 and isbilling = 1 and isbid = 1 and adorderid != 1,1,0)as cy,
if(requestmode = 2 and iseffective = 1,1,0)as jj,
if(requestmode = 3 and iseffective = 1,1,0)as zs,
if(requestmode = 3 and iseffective = 1,1,0)as dj,
if (iseffective = 1 and isbilling = 1 and iswin = 1,winprice / 1000,0)as cb,
if (iseffective = 1 and isbilling = 1 and iswin = 1,adpayment / 1000,0)as xf
from t_data
    """.stripMargin)

  //再注册一张表进行聚合
  sql.registerTempTable("t_dated")
  private val sql1: DataFrame = context.sql(
    """
select provincename,cityname,
sum(ys)as 原始请求,
sum(yx)as 有效请求,
sum(gg)as 广告请求,
sum(cy)as 参与竞价数,
sum(jj)as 竞价成功数,
sum(zs)as 展示量,
sum(dj)as 点击量,
sum(cb)as 广告成本,
sum(xf)as 广告消费
from t_dated group by provincename,cityname order by provincename
    """.stripMargin)
  //写出
  // sql1.write.mode(SaveMode.Overwrite).json(load.getString("DataForJson"))

  sc.stop()
}
