package com.xiaochao

import com.xiaochao.bigdata.constant.Constants
import com.xiaochao.bigdata.spark.accumulator.SessionAggrStatAccumulator
import org.apache.spark.sql.{SQLContext, SparkSession}

object SparkDriverDemo {


  def main(args: Array[String]): Unit = {



    val spark = SparkSession
      .builder()
      .appName("SparkSessionZipsExample")
      .master("local")
      .enableHiveSupport()
      .getOrCreate()
    //set new runtime options
    spark.conf.set("spark.sql.shuffle.partitions", 6)
    spark.conf.set("spark.executor.memory", "2g")

    //dataset
    val numDS = spark.range(5, 100, 5)

    val sparkContext = spark.sparkContext


//
//    val wordRDD = sparkContext.textFile("file:///Users/apple/Desktop/hello.txt",2)
//
////    val wsFlatMapRDD = wordRDD.flatMap(_.split(" "))
////    wsFlatMapRDD.foreach(println(_))
//
//
//    wordRDD.flatMap(_.split(" "))//下划线是占位符，flatMap是对行操作的方法，对读入的数据进行分割
//      .map((_,1))//将每一项转换为key-value，数据是key，value是1
//      .reduceByKey(_+_)//将具有相同key的项相加合并成一个
//      .collect()//将分布式的RDD返回一个单机的scala array，在这个数组上运用scala的函数操作，并返回结果到驱动程序
//      .foreach(println)//循环打印
//
//
//    val sqlContext = spark.sqlContext;
//    //导入隐饰操作，否则RDD无法调用toDF方法
//    import spark.implicits._
//
//    val df = sparkContext.makeRDD(List((1,"a","bj"),(2,"b","sh"),(3,"c","gz"),(4,"d","bj"),(5,"e","gz"))).toDF("id","name","addr");
//
//    df.createTempView("user")
//
//    sqlContext.sql("select * from user").show()



//    val list = List(1,2,4,5,6)
//
//    val  rdd = sparkContext.parallelize(list)
//
//    rdd.filter(_>3).foreach(println(_))

    // 使用accumulator()()方法（curry），创建自定义的Accumulator
    val myAcc = new SessionAggrStatAccumulator()
    sparkContext.register(myAcc,"sessionAggrStatAccumulator")

    // 模拟使用一把自定义的Accumulator
    val arr = Array(Constants.TIME_PERIOD_1s_3s, Constants.TIME_PERIOD_4s_6s)
    val rdd = sparkContext.parallelize(arr, 1)
    rdd.foreach { myAcc.add(_)

    }
    println(myAcc)

  }


}
