package io.sqrtqiezi.spark.mlib

import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.feature.RFormula
import org.apache.spark.sql.SparkSession

object MLibSample {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName(this.getClass.getCanonicalName)
      .getOrCreate()

//    val denseVec = Vectors.dense(1.0, 2.0, 3.0)
//    val size = 3
//    val idx = Array(1, 2)
//    val values = Array(2.0, 3.0)
//    val sparseVec = Vectors.sparse(size, idx, values)
//
//    denseVec.toSparse
//    sparseVec.toDense

    val df = spark.read.json("data/simple-ml")
    df.orderBy("value2").show

    // 转换器执行特种工程
    val supervised = new RFormula()
      .setFormula("lab ~ . + color: value1 + color: value2")

    val fittedRF = supervised.fit(df)
    val preparedDF = fittedRF.transform(df)
    preparedDF.show(truncate = false)

    // 随机拆分测试集
    val Array(train, test) = preparedDF.randomSplit(Array(0.7, 0.3))

    // 估计器
    val lr = new LogisticRegression().setLabelCol("label").setFeaturesCol("features")

    // 打印参数
    println(lr.explainParams())

    // 训练模型
    val fittedLR = lr.fit(train)

    fittedLR.transform(train)
      .select("label", "prediction")
      .show()
  }
}
