package cn.itcast.tags.ml.classification

import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.classification.{DecisionTreeClassificationModel, DecisionTreeClassifier}
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.{StringIndexer, VectorIndexer}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._

/**
 * @author: xu
 * @desc: 管道模型
 */
object PipelineClassification {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[4]")
      .getOrCreate()
    import spark.implicits._

    // 1. 加载数据
    val dataframe: DataFrame = spark.read
      .format("libsvm")
      .load("datas/mllib/sample_libsvm_data.txt")

    // 划分数据集：训练集和测试集
    val Array(trainingDF, testingDF) = dataframe.randomSplit(Array(0.8, 0.2))
    // 2. 构建管道Pipeline
    // a. 将标签值label，转换为索引，从0开始，到 K-1
    val labelIndexer = new StringIndexer()
      .setInputCol("label")
      .setOutputCol("index_label")
      .fit(dataframe)
    // b. 对类别特征数据进行特殊处理, 当每列的值的个数小于设置K，那么此列数据被当做类别特征 ，自动进行索引转换
    val featureIndexer = new VectorIndexer()
      .setInputCol("features")
      .setOutputCol("index_features")
      .setMaxCategories(4)
      .fit(dataframe)
    // c. 使用决策树算法构建分类模型
    val dtc: DecisionTreeClassifier = new DecisionTreeClassifier()
      .setLabelCol("index_label")
      .setFeaturesCol("index_features")
      // 设置决策树算法相关超参数
      .setImpurity("gini")
      .setMaxDepth(5)
      .setMaxBins(32) // 此值必须大于等于类别特征类别个数
    // TODO: d. 创建Pipeline，设置Stage（转换器和模型学习器）依赖
    val pipeline: Pipeline = new Pipeline().setStages(
      Array(labelIndexer, featureIndexer, dtc)
    )

    // 3. 训练模型
    val pipelineModel: PipelineModel = pipeline.fit(trainingDF)
    // TODO: 获取决策树分类模型
    val dtcModel: DecisionTreeClassificationModel = pipelineModel.stages(2)
      .asInstanceOf[DecisionTreeClassificationModel]
    println(dtcModel.toDebugString)

    // 4. 模型评估
    val predictionDF: DataFrame = pipelineModel.transform(testingDF)
    predictionDF.printSchema()
    predictionDF
      .select($"label", $"index_label", $"probability", $"prediction")
      .show(20, truncate = false)

    val evaluator = new MulticlassClassificationEvaluator()
      .setLabelCol("index_label")
      .setPredictionCol("prediction")
      .setMetricName("accuracy")
    val accuracy: Double = evaluator.evaluate(predictionDF)
    println(s"Accuracy = $accuracy")

    spark.stop()
  }
}
