package com.zyx.sparkdemo.mllib.featurescaler

import org.apache.spark.SparkConf
import org.apache.spark.ml.feature.StandardScaler
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession

/**
 * @author Yaxi.Zhang
 * @since 2021/8/27 14:17
 * reference: https://blog.csdn.net/neilron/article/details/75329973
 */
object StandardScalerDemo {
  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("StandardScalerDemo")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    val dataFrame = spark.createDataFrame(Seq(
      (0, Vectors.dense(1.0, 0.5, -1.0)),
      (1, Vectors.dense(2.0, 1.0, 1.0)),
      (2, Vectors.dense(4.0, 10.0, 2.0))
    )).toDF("id", "features")

    // StandardScaler处理的对象是 每一列, 也就是 每一维特征, 将特征标准化为单位标准差或是0均值, 或是0均值单位标准差
    val scaler = new StandardScaler()
      .setInputCol("features")
      .setOutputCol("scaledFeatures")
      // 是否将数据标准化到单位标准差, 默认为true
      .setWithStd(true)
      // 是否变换为0均值, 默认为false
      .setWithMean(false)
    // 计算汇总统计量,生成ScalerModel
    val scalerModel = scaler.fit(dataFrame)
    // 对特征进行标准化
    val scaledData = scalerModel.transform(dataFrame)
    scaledData.show(100, false)

    /*
      +---+--------------+------------------------------------------------------------+
      |id |features      |scaledFeatures                                              |
      +---+--------------+------------------------------------------------------------+
      |0  |[1.0,0.5,-1.0]|[0.6546536707079772,0.09352195295828244,-0.6546536707079771]|
      |1  |[2.0,1.0,1.0] |[1.3093073414159544,0.1870439059165649,0.6546536707079771]  |
      |2  |[4.0,10.0,2.0]|[2.618614682831909,1.870439059165649,1.3093073414159542]    |
      +---+--------------+------------------------------------------------------------+
     */

    spark.close()
  }
}
