package com.shujia.mllib

import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.JavaConverters._

object Demo1 {
  def main(args: Array[String]): Unit = {
    val sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local"))

    val data: RDD[Array[Double]] = sc.parallelize(
      Array(
        Array[Double](12, 3, 4, 5),
        Array[Double](67, 1, 5, 9),
        Array[Double](35, 6, 3, 1),
        Array[Double](31, 1, 5, 6))
    )
    //将每一行转换成向量
    val vdata = data.map(sample => Vectors.dense(sample))

    val stat = Statistics.colStats(vdata)
    println(stat.max) //最大值
    println(stat.min) //最小值
    println(stat.mean) //平均值
    println(stat.variance) //方差值
    println(stat.normL1) //L1范数
    println(stat.normL2) //L2范数

  }
}
