package com.itcast.spark.basePro

import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.ml.stat.Correlation

/**
 * DESC:
 */
object _04IrisSparkSQlReader2 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("_04IrisSparkSQlReader2").setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    //读取数据
    val dataDF: DataFrame = spark.read.format("csv").option("header", "true").option("inferschema", true).load("./datasets/mldata/iris.csv")
    dataDF.show()
    dataDF.printSchema()
    /* root
     |-- sepal_length: double (nullable = true)
     |-- sepal_width: double (nullable = true)
     |-- petal_length: double (nullable = true)
     |-- petal_width: double (nullable = true)
     |-- class: string (nullable = true)*/
    //如何将dataDF变成向量
    val assembler: VectorAssembler = new VectorAssembler().setInputCols(Array("sepal_length", "sepal_width", "petal_length", "petal_width")).setOutputCol("features")
    val vecDF: DataFrame = assembler.transform(dataDF)
    //相关系数矩阵
    val corrResult: DataFrame = Correlation.corr(vecDF, "features", "pearson")
    corrResult.show(false)
  }
}
