package com.shujia.spark.mllib

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics}
import org.apache.spark.rdd.RDD

object Demo1DataType {
  def main(args: Array[String]): Unit = {

    // 稠密向量， 只能存double类型的数据
    val dv: Vector = Vectors.dense(Array(0.1, 0.2, 0.0, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
    println(dv)

    // 稀疏向量,  当数据里面0很多的时候使用稀疏向量 ， 只记录有值的位置
    val sv: Vector = Vectors.sparse(11, Array(0, 1, 3), Array(0.1, 0.2, 0.3))
    println(sv)


    //代表一条数据，或者一条训练集
    val pos = LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0))

    println(pos)
    // Create a labeled point with a negative label and a sparse feature vector.
    val neg = LabeledPoint(0.0, Vectors.sparse(3, Array(0, 2), Array(1.0, 3.0)))

    println(neg)


    val conf = new SparkConf().setMaster("local").setAppName("app")

    val sc = new SparkContext(conf)


    val rdd1: RDD[Vector] = sc.parallelize(
      List(
        Vectors.dense(Array(0.1, 0.2, 0.3, 0.4)),
        Vectors.dense(Array(0.2, 0.3, 0.3, 0.4)),
        Vectors.dense(Array(0.3, 0.4, 0.3, 0.4)),
        Vectors.dense(Array(0.4, 0.6, 0.3, 0.4)),
        Vectors.dense(Array(0.5, 0.9, 0.3, 0.4))
      )
    )


    val summary: MultivariateStatisticalSummary = Statistics.colStats(rdd1)
    println(summary.mean) // a dense vector containing the mean value for each column
    println(summary.variance) // column-wise variance
    println(summary.numNonzeros) // number of nonzeros in each column


    val rdd2 = sc.parallelize(Array(1.0, 2.0, 3.0, 4.0))
    val rdd3 = sc.parallelize(Array(1.0, 1.0, 1.0, 1.0))

    //计算pearson相关性，  -1  -  1     NaN:代表不相关
    val p = Statistics.corr(rdd2, rdd3, "pearson")
    println(p)


    println(Statistics.corr(rdd1))
  }

}
