import org.apache.spark.{SparkConf, SparkContext}

/**
  * @author td
  * @date 2017/11/23
  */
object Demo {

  /**
    * 对各分区的元素进行聚合,聚合的初始值为zeroValue
    * @param tuple1
    * @param num
    */
  def sqlOp(tuple1: (Int, Int), num: Int): (Int, Int) = {
    (tuple1._1+num,tuple1._2+1)
  }

  /**
    * 对各分区元素聚合的结果再次进行聚合,聚合的初始值为zeroValue
    * @param tuple1
    * @return
    */
  def combOp(tuple1: (Int,Int),tuple2: (Int,Int)): (Int,Int) = {
    (tuple1._1+tuple2._1,tuple1._2+tuple2._2);
  }

  def main(args: Array[String]): Unit = {

    val rdd = List(1,2,3,4,5,6,7,8,9)

    val resutlt = rdd.par.aggregate((0,0))(sqlOp,combOp);

    println(resutlt._1)

    println(resutlt._2)
    val avg = resutlt._1/resutlt._2;
    println("平均值是 "+avg)
    val conf = new SparkConf().setAppName("WordCount").setMaster("local");
    val sc = new SparkContext(conf);
     // histogramTest(sc);
    //meanTest(sc);
    //sampleStdevTest(sc);
    aggregateByKeyTest(sc)


  }

  /**
    * 测试直方图函数
    */
  def histogramTest(sc:SparkContext): Unit = {

    val hgRdd  = sc.parallelize(List(1.1,1.2,1.3,2.0,2.1,7.4,7.6,8.8,9.0),3)
  //  val tuple = hgRdd.histogram(5);
    val tuple = hgRdd.histogram(Array(1.1,2.68,4.26,5.84,7.42,9.0),true);

    for (index<- 0 until tuple.length) {
      println(tuple(index))
    }

    /*tuple._1.map(item => print(item+","));
    println("--------------------------")
    tuple._2.map(println(_))*/
  }

  /**
    * 计算平均值、近似平均值
    * @param sc
    */
  def meanTest(sc:SparkContext): Unit ={
    val hgRdd  = sc.parallelize(List(1.1,1.2,1.3,2.0,2.1,7.4,7.6,8.8,9.0),3)
    val avg = hgRdd.mean();
    // 第一个参数超时时间,第二个参数可信度
    val avgApprox = hgRdd.meanApprox(100,0.9)

    println(avg)
    avgApprox.map(println(_))
  }

  /**
    * 样本的标准差测试,标准差反应样本的离散程度
    * 偏差
    * stats 包含平均值、标准偏差、最大值、最小值
    * 标准差通常是相对于样本数据的平均值而定的，通常用M±SD来表示
    * @param sc
    */
  def sampleStdevTest(sc:SparkContext): Unit = {

    val hgRdd  = sc.parallelize(List(1.1,1.2,1.3,2.0,2.1,7.4,7.6,8.8,9.0),3)
    val stdev = hgRdd.sampleStdev();
    val varuabce = hgRdd.sampleVariance();
    println("样本标准差"+stdev)
    println("样本偏差"+varuabce)
    val statCounter = hgRdd.stats();
    println("最大值"+statCounter.max)
    println("最小值"+statCounter.min);
    println("标准偏差"+statCounter.popStdev);

  }

  /**
    * 通过key进行聚合
    * @param sc
    */
  def aggregateByKeyTest(sc:SparkContext): Unit = {

    val hgRdd  = sc.parallelize(List((1,"a"),(1,"b"),(1,"c"),(2,"b"),(2,"e"),(3,"f"),(3,"h")),3);
    val aggRDD = hgRdd.aggregateByKey(Nil:List[String])(((a,b) => b::a),(la,lb) => la:::lb);
    val arrCollect = aggRDD.collect();
    arrCollect.map(println(_))

  }
  
}
