package scala

import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 项目名称:spark-learn<br>
  * 包名:scala<br>
  * 用于:spark-learn<br>
  * 创建时间:2019年03月15日<br>
  * 更新时间:2019年03月15日<br>
  *
  * @author :lds（创建人）<br>
  * @version :v1.0（版本号）<br>
  * @since jdk1.8
  */
object ParallelizeDemo {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local").setAppName("parallelize")
    val sc = new SparkContext(conf)
    val parallelizeRDD = sc.parallelize(List(1,2,3,4))
    val caluRDD = parallelizeRDD.map(x => x * x).persist(StorageLevel.DISK_ONLY)
    //计算均值
    val mean = caluRDD.mean()
    println(mean)
    val z = sc.parallelize(List(1,2,3,4,5,6),1)
    //aggregate
    val aggregate = z.aggregate(0)(math.max,_+_)
    println(aggregate)

    val rdd = sc.parallelize(Array((1,1.0),(1,2.0),(1,3.0),(2,3.0),(2,4.0),(2,5.0),(1,6.0),(2,7.0)),2)

    //combineByKey
    val combineByKey = rdd.combineByKey(createCombiner = (v: Double) => {
      println("createCombiner:"+v)
      (v: Double, 1)
    },
      mergeValue = (c: (Double, Int), v: Double) => {
        println("mergeValue:c:"+c+",v:"+v)
        (c._1 + v, c._2 + 1)
      },
      mergeCombiners = (c1: (Double, Int), c2: (Double, Int)) => {
        println("mergeCombiners:c1:"+c1+",c2:"+c2)
        (c1._1 + c2._1, c1._2 + c2._2)
      }, 2)
    println("combineByKey:")
    for (elem <- combineByKey.collect()) {
      println(elem)
    }

    val list = Array(Array(1,2,3,4,5),Array(6,7,8,9,10))
    val denses = sc.parallelize(list).map(v => Vectors.dense(v.map(i => i.toDouble)))
    val summary = Statistics.colStats(denses)
    println("data:"+denses.collect())
    println("min:"+summary.min)
    println("max:"+summary.max)
    println("mean:"+summary.mean)
    println("count:"+summary.count)
    println("variance:"+summary.variance)
    println("normL1:"+summary.normL1)
    println("normL2:"+summary.normL2)

    val corr1 = Statistics.corr(denses,"pearson")
    val corr2 = Statistics.corr(denses,"spearman")
    val x1 = sc.parallelize(Array(1,2,3,4,5))
    val y1 = sc.parallelize(Array(6,7,8,9,10))
    //Statistics.corr(x1,y1,"pearson")
    println("corr1:"+corr1)
    println("corr2"+corr2)



  }

}
