package com.study.basic

import org.apache.spark.mllib.linalg
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.sql.SparkSession

/**
 * 概括统计
 * summary statistics
 * @author stephen
 * @date 2019-08-27 09:21
 */
object SummaryStatisticsDemo {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName(this.getClass.getSimpleName)
      .master("local[*]")
      .getOrCreate()

    spark.sparkContext.setLogLevel("warn")

    val data = spark.sparkContext.parallelize(
      Seq(
        linalg.Vectors.dense(-1, 2, 3),
        linalg.Vectors.dense(2, -4, 6),
        linalg.Vectors.dense(3, 6, -9)
      ))

    val summary = Statistics.colStats(data)

    println(s"平均值：${summary.mean}")
    println(s"方差：${summary.variance}")
    // L1范数是曼哈顿距离，向量中各个元素的绝对值之和
    println(s"L1：${summary.normL1}")
    // L2范数是欧式距离，向量各元素的平方和然后开方
    println(s"L2：${summary.normL2}")

    spark.stop()
  }
}
