package com.study.spark.ml.sample

import org.apache.spark.mllib.feature.{StandardScaler, StandardScalerModel}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 数据特征标准化
 * 先从训练数据的抽样中得出某一列属性的平均值和方差，
 * 然后对所有训练数据的该属性均减去该平均值（可选）后再除以方差，
 * 从而使得该属性值标准化。
 */
object StandardScalarSample {
  def main(args: Array[String]) {
    val conf = new SparkConf().setMaster("local").setAppName("StandardScalar")
    val sc = new SparkContext(conf)
    val data = MLUtils.loadLibSVMFile(sc,
      "/Users/stephen/Applications/spark-2.3.3-hadoop-2.6.0-cdh5.13.3/data/mllib/sample_libsvm_data.txt")

    val scaler1 = new StandardScaler().fit(data.map(x => x.features))
    // withMean：默认 False。在缩放前将数据参照平均值中心对齐
    // withStd：默认 True。将数据缩放到标准差
    val scaler2 = new StandardScaler(withMean = true, withStd = true).fit(data.map(x => x.features))
    // scaler3 与 scaler2 完全相同，将进行相同的转换操作
    val scaler3 = new StandardScalerModel(scaler2.std, scaler2.mean)

    // data1 为单位方差
    val data1 = data.map(x => (x.label, scaler1.transform(x.features)))
    println(data1.first())

    // 需要先将特征转为密集向量，稀疏向量不支持均值为 0 时做转换
    // data2 为单位方差，均值为 0
    val data2 = data.map(x => (x.label, scaler2.transform(Vectors.dense(x.features.toArray))))
    println(data2.first())
  }
}