package cn.itcast.tags.ml.classification

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.feature.{Normalizer, StringIndexer, StringIndexerModel, VectorAssembler}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.storage.StorageLevel

/**
 * 读取鸢尾花数据集，封装特征值到features和标签处理label
 */
object IrisClassification {
  def main(args: Array[String]): Unit = {
    // 构建SparkSession实例对象，通过建造者模式创建
    val spark: SparkSession = {
      SparkSession
        .builder()
        .appName(this.getClass.getSimpleName.stripSuffix("$"))
        .master("local[3]")
        .config("spark.sql.shuffle.partitions", "3")
        .getOrCreate()
    }

    // For implicit conversions like converting RDDs to DataFrames
    import spark.implicits._

    // 自定义Schema信息
    val irisSchema: StructType = StructType(
      Array(
        StructField("sepal_length", DoubleType, nullable = true),
        StructField("sepal_width", DoubleType, nullable = true),
        StructField("petal_length", DoubleType, nullable = true),
        StructField("petal_width", DoubleType, nullable = true),
        StructField("category", StringType, nullable = true)
      )
    )
    // 1. 读取原始数据集：鸢尾花数据集，数据格式csv格式
    val rawIrisDF: DataFrame = spark.read
      .schema(irisSchema)
      .option("sep", ",")
      .option("header", "false")
      .option("inferSchema", "false")
      .csv("datas/iris/iris.data")
    /*
      root
       |-- sepal_length: double (nullable = true)
       |-- sepal_width: double (nullable = true)
       |-- petal_length: double (nullable = true)
       |-- petal_width: double (nullable = true)
       |-- category: string (nullable = true)
     */
    // rawIrisDF.printSchema()
    // rawIrisDF.show(10, truncate = false)

    // 2. 特征工程
    /*
      1、类别转换数值类型：类别特征索引化 -> label
      2、组合特征值：features: Vector
     */
    // TODO: 2.1 标签数值化，(使用StringIndexer,后续可以使用IndexToString还原)
    val indexerModel: StringIndexerModel = new StringIndexer()
      .setInputCol("category") // 指定对一列的字符串进行数值化
      .setOutputCol("label") // 指定数值化之后该列的列名
      .fit(rawIrisDF)
    val df1: DataFrame = indexerModel.transform(rawIrisDF)
    // df1.printSchema()
    // df1.show(150, truncate = false)

    // TODO: 2.2 组合特征值就是特征向量化，将鸢尾花数值封装到features向量中
    val assembler: VectorAssembler = new VectorAssembler()
      .setInputCols(rawIrisDF.columns.dropRight(1))
      .setOutputCol("raw_features")
    val df2: DataFrame = assembler.transform(df1)
    /*
      root
       |-- sepal_length: double (nullable = true)
       |-- sepal_width: double (nullable = true)
       |-- petal_length: double (nullable = true)
       |-- petal_width: double (nullable = true)
       |-- category: string (nullable = true)
       |-- label: double (nullable = true) 标签：y
       |-- raw_features: vector (nullable = true) 特征：x
       算法：y = kx + b
     */
    // df2.printSchema()
    // df2.show(150, truncate = false)

    // TODO: 2.3 将特征进行各种转换，比如正则化、归一化、标准化等
    val normalizer: Normalizer = new Normalizer()
      .setInputCol("raw_features")
      .setOutputCol("features")
      .setP(2.0) // L2正则化
    val featuresDF: DataFrame = normalizer.transform(df2)
    /*
      root
       |-- sepal_length: double (nullable = true)
       |-- sepal_width: double (nullable = true)
       |-- petal_length: double (nullable = true)
       |-- petal_width: double (nullable = true)
       |-- category: string (nullable = true)
       |-- label: double (nullable = true)
       |-- raw_features: vector (nullable = true)
       |-- features: vector (nullable = true)
     */
    // featuresDF.printSchema()
    // featuresDF.show(10, truncate = false)
    // 将数据集缓存，LR算法属于迭代算法，使用多次
    featuresDF.persist(StorageLevel.MEMORY_AND_DISK).count()

    // TODO: 3. 使用特征数据应用到算法中训练模型
    val lr: LogisticRegression = new LogisticRegression()
      // 设置特征和标签列名称
      .setLabelCol("label") // y -> 标签
      .setFeaturesCol("features") // x -> 特征
      .setPredictionCol("prediction")
      // 设置迭代次数
      .setMaxIter(10)
      .setRegParam(0.3) // 正则化参数
      .setElasticNetParam(0.8) // 弹性网络参数：L1正则和L2正则联合使用
    val lrModel: LogisticRegressionModel = lr.fit(featuresDF)

    // 4. 使用模型预测
    val predictionDF: DataFrame = lrModel.transform(featuresDF)
    predictionDF
      //获取真实标签类别和预测标签类别
      .select("label", "prediction")
      .show(150, truncate = false)

    // TODO: 5. 模型评估：使用测试数据集应用到模型中，获取预测值，与真实值进行比较
    // 模型评估：准确度 = 预测正确的样本数 / 所有的样本数
    import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
    val evaluator: MulticlassClassificationEvaluator = new MulticlassClassificationEvaluator()
      .setLabelCol("label")
      .setPredictionCol("prediction")
      .setMetricName("accuracy")
    // ACCU = 0.9466666666666667
    println(s"ACCU = ${evaluator.evaluate(predictionDF)}")

    val summary = lrModel.summary
    // 准确度：0.9466666666666667
    println(s"accuracy: ${summary.accuracy}")
    // 精确度：1.0,0.9565217391304348,0.8888888888888888
    println(s"precision: ${summary.precisionByLabel.mkString(",")}")

    // 6. 模型调优，此处省略

    // 7. 模型保存与加载
    // TODO: 保存模型
    val saveModelPath = s"datas/models/lrModel-${System.nanoTime()}"
    lrModel.save(saveModelPath)

    // TODO: 加载模型
    val loadLrModel: LogisticRegressionModel = LogisticRegressionModel.load(saveModelPath)
    loadLrModel.transform(
      Seq(
        Vectors.dense(Array(5.1, 3.5, 1.4, 0.2))
      )
        .map(x => Tuple1.apply(x))
        .toDF("features")
    ).show(1, truncate = false)

    spark.stop()
  }
}
