

package cn.spark.study.sql

import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.IntegerType


object StatFunc {
  def main(args: Array[String]): Unit = {
    val sc = new ContextUtil().getContextL("StatFunc")
    val sqlC = new SQLContext(sc)
    dailyUV(sc,sqlC)
    
  }
  
  def dailyUV(sc:SparkContext,sqlC:SQLContext):Unit={
    val log = Array(
        "2015-10-01,1122","2015-10-01,1122","2015-10-01,1123","2015-10-01,1124","2015-10-01,1124",
        "2015-10-02,1122","2015-10-01,1121","2015-10-01,1123","2015-10-01,1123")
   
        //构造初始化RDD 在转换成元素为Row的RDD
    val rowRDD = sc.parallelize(log, 1).map { row => Row(row.split(",")(0),row.split(",")(1).toInt) }
    val structType = StructType(Array(StructField("date",StringType,true),StructField("userid",IntegerType,true)))
    
    //要使用sqlC 的内置函数，必须导入SQLContext的隐式转化
    import sqlC.implicits._
    sqlC.createDataFrame(rowRDD, structType)
    //uv 每天都有很多用户每个用户访问多次，uv是去重后的用户访问数
    
    
           .groupBy("date")
           .agg('date, sumDistinct('userid)).rdd.map { row => Row(row(1),row(2)) }.foreach { println }
     
  }
}