package com.kgc.bigdata.spark.sql

import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Spark SQL内置函数
  *
  * 需求：根据用户访问日志统计每天的访问量(pv)
  */
object FunctionApp {

  def main(args: Array[String]) {
    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("FunctionApp")

    val sc = new SparkContext(sparkConf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    import sqlContext.implicits._

    //模拟每个的访问日志信息
    val accessLog = Array(
      "2016-12-27,001",
      "2016-12-27,001",
      "2016-12-27,002",
      "2016-12-28,003",
      "2016-12-28,004",
      "2016-12-28,002",
      "2016-12-28,002",
      "2016-12-28,001"
    )

    //根据集合数据生成RDD
    val accessLogRDD = sc.parallelize(accessLog).map(row => {
      val splited = row.split(",")
      Row(splited(0), splited(1).toInt)
    })

    //定义DataFrame的结构
    val structTypes = StructType(Array(
      StructField("day", StringType, true),
      StructField("userId", IntegerType, true)
    ))

    //根据数据以及Schema信息生成DataFrame
    val accessLogDF = sqlContext.createDataFrame(accessLogRDD, structTypes)

    //导入Spark SQL内置的函数
    import org.apache.spark.sql.functions._

    /**
      * 求每天所有的访问量(pv)
      *
      * 执行结果为：
      * [2016-12-27,3]
      * [2016-12-28,5]
      */
    accessLogDF.groupBy("day").agg(count("userId").as("pv"))
      .select("day", "pv")
      .collect.foreach(println)

    /**
      * 求每天的去重访问量(uv)
      *
      * 执行结果为：
      * [2016-12-27,2]
      * [2016-12-28,4]
      */
    accessLogDF.groupBy("day").agg(countDistinct('userId).as("uv"))
      .select("day", "uv")
      .collect.foreach(println)

    sc.stop()
  }

}
