package com.itcast.spark.baseCount

import org.apache.spark.mllib.linalg
import org.apache.spark.mllib.linalg.{Matrix, Vectors}
import org.apache.spark.mllib.random.RandomRDDs
import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}

/**
 * DESC:统计特性
 * 这里实现的是基于mllib的数据统计
 * 最大值
 * 最小值
 * 非0值个数
 * 均值
 * 方差
 * 标准差
 */
object _04RandomNumber {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("_04RandomNumber").setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    // from the standard normal distribution.  一般情况现实世界的分部都是正态分布
    val data: RDD[Double] = RandomRDDs.normalRDD(sc, 100L, seed = 123L)
    data.foreach(println(_))
    //对数据进行随机切分
    val split: Array[RDD[Double]] = data.randomSplit(Array(0.8, 0.2), seed = 123L)
    val trainingData: RDD[Double] = split(0)
    val testData: RDD[Double] = split(1)
    trainingData.foreach(println(_))

    val dataSamples: RDD[Int] = sc.parallelize(1 to 10)
    //seed保证每次切分数据的可重复性，每次拿到数据在指定的seed的数值的情况是不变的
    //dataSamples.sample(true, 0.3, 133L).foreach(println(_))
    val ints: Array[Int] = dataSamples.takeSample(true, 7, 65536L)
    println(ints.mkString(","))
    println("="*20)
    for (x <- ints) {
      print(x + " ")
    }
  }
}
