package com.shujia.mlib

import java.lang

import org.apache.spark.api.java.function.VoidFunction
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.random.RandomRDDs
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.{SparkConf, SparkContext}

object Demo2Sta {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local").setAppName("Demo2Sta")

    val sc = new SparkContext(conf)

    val rdd = sc.parallelize(Array(
      Array[Double](1, 2, 3, 4, 5, 6),
      Array[Double](1, 2, 3, 4, 5, 6),
      Array[Double](1, 2, 3, 4, 5, 6),
      Array[Double](1, 2, 3, 4, 5, 6)
    ))


    //将没一行转换成向量
    val vData = rdd.map(arr => Vectors.dense(arr))


    vData.foreach(println)


    //基本统计
    val info = Statistics.colStats(vData)


    println(info.count)
    println(info.max)
    println(info.min)
    println(info.mean)
    println(info.variance)
    println(info.normL1)
    println(info.normL2)


    //相关性
    val rdd1 = sc.parallelize(Array[Double](1, 2, 3, 4, 5, 6, 7, 8, 9))
    val rdd2 = sc.parallelize(Array[Double](0, 0, 0, 0, 1, 0, 0, 0, 0))

    println(Statistics.corr(rdd1, rdd2, "pearson"))


    val rdd4 = RandomRDDs.normalJavaRDD(sc, 1000L, 10)

    rdd4.foreach(new VoidFunction[lang.Double] {
      override def call(t: lang.Double): Unit = {
        println(t)
      }
    })

  }

}
