package mllib

import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.stat.Correlation
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * DESC:
 */
object _03CorrlationDtaFrame {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("_03CorrlationDtaFrame").setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val sc: SparkContext = spark.sparkContext
    import spark.implicits._

    sc.setLogLevel("WARN")
    //如何读取vector的类型的数据转化为RDD[Vector]
    val data = Seq(
      Vectors.sparse(4, Seq((0, 1.0), (3, -2.0))),
      Vectors.dense(4.0, 5.0, 0.0, 3.0),
      Vectors.dense(6.0, 7.0, 0.0, 8.0),
      Vectors.sparse(4, Seq((0, 9.0), (3, 1.0)))
    )
    val dataDF: DataFrame = data.map(Tuple1.apply).toDF("features")
    dataDF.show()
    val corrResult: DataFrame = Correlation.corr(dataDF, "features")
    corrResult.show(false)
    //对于dataframe的切分
    val array: Array[Dataset[Row]] = dataDF.randomSplit(Array(0.8, 0.2), seed = 123L)
    val traingset: Dataset[Row] = array(0)
    val testset: Dataset[Row] = array(1)

    traingset.show(10,false)
    testset.show(10,false)

    spark.stop()
  }
}
