package cn.itcast.tags.models.ml

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.feature.{Normalizer, StringIndexer, StringIndexerModel, VectorAssembler}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
import org.apache.spark.storage.StorageLevel

object IrisClassification {
  def main(args: Array[String]): Unit = {
    // 构建SparkSession实例对象，通过建造者模式创建
    val spark: SparkSession = {
      SparkSession
        .builder()
        .appName(this.getClass.getSimpleName.stripSuffix("$"))
        .master("local[3]")
        .config("spark.sql.shuffle.partitions", "3")
        .getOrCreate()
    }

    // For implicit conversions like converting RDDs to DataFrames
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 自定义Schema信息
    val irisSchema: StructType = StructType(
      Array(
        StructField("sepal_length", DoubleType, nullable = true),
        StructField("sepal_width", DoubleType, nullable = true),
        StructField("petal_length", DoubleType, nullable = true),
        StructField("petal_width", DoubleType, nullable = true),
        StructField("category", StringType, nullable = true)
      )
    )
    val rawIrisDF: DataFrame = spark.read
      .schema(irisSchema)
      .option("sep", ",")
      .option("encoding", "UTF-8")
      .option("header", "false")
      .option("inferSchema", "false")
      .csv("datas/iris/iris.data")

    val indexerModel: StringIndexerModel = new StringIndexer()
      .setInputCol("category")
      .setOutputCol("label")
      .fit(rawIrisDF)

    val df1: DataFrame = indexerModel.transform(rawIrisDF)

    val assembler: VectorAssembler = new VectorAssembler()
      .setInputCols(rawIrisDF.columns.dropRight(1))
      .setOutputCol("raw_features")

    val df2: DataFrame = assembler.transform(df1)

    val normalizer: Normalizer = new Normalizer()
      .setInputCol("raw_features")
      .setOutputCol("features")
      .setP(2.0)

    val featuresDF: DataFrame = normalizer.transform(df2)

    val Array(trainingDF,testingDF)=featuresDF.randomSplit(Array(0.9,0.1),seed = 4869L)
    featuresDF.persist(StorageLevel.MEMORY_AND_DISK).count()

    // 3. 使用特征数据应用到算法中训练模型
    val lr: LogisticRegression = new LogisticRegression()
      // 设置列名称
      .setLabelCol("label")
      .setFeaturesCol("features")
      .setPredictionCol("prediction") // 使用模型预测时预测值存储的列名称
      // 设置迭代次数
      .setMaxIter(10)
      .setRegParam(0.3) // 正则化参数
      .setElasticNetParam(0.8) // 弹性网络参数：L1正则和L2正则联合使用
    val lrModel: LogisticRegressionModel = lr.fit(featuresDF)

    // 4. 使用模型预测
    val predictionDF: DataFrame = lrModel.transform(featuresDF)
    predictionDF
      // 获取真实标签类别和预测标签类别
      .select("label", "prediction")
      .show(150)

    // 5. 模型评估：使用测试数据集应用到模型中，获取预测值，与真实值进行比较
    import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
    val evaluator = new MulticlassClassificationEvaluator()
      .setLabelCol("label")
      .setPredictionCol("prediction")
      .setMetricName("accuracy")
    // ACCU = 0.9466666666666667
    println(s"ACCU = ${evaluator.evaluate(predictionDF)}")

    // 6. 模型调优，此处省略

    // 7. 模型保存与加载
    // TODO: 保存模型
    val modelPath = s"datas/models/lrModel-${System.nanoTime()}"
    lrModel.save(modelPath)

    // TODO: 加载模型
    val loadLrModel = LogisticRegressionModel.load(modelPath)
    loadLrModel.transform(
      Seq(
        Vectors.dense(Array(5.1,3.5,1.4,0.2))
      )
        .map(x => Tuple1.apply(x))
        .toDF("features")
    ).show(1, truncate = false)


    // 应用结束，关闭资源
    spark.stop()



  }
}
