package cn.itcast.tags.ml.features

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel, LogisticRegressionTrainingSummary}
import org.apache.spark.ml.feature.{StandardScaler, StringIndexer, VectorAssembler}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.types.{DoubleType, StringType, StructType}

//逻辑回归
object IrisFeaturesDemo {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[4]")
      .config("spark.sql.shuffle.partitions", 4)
      .getOrCreate()

    val irisSchema:StructType = new StructType()
      .add("sepal_length",DoubleType,true)
      .add("sepal_width",DoubleType,true)
      .add("petal_length",DoubleType,true)
      .add("petal_width",DoubleType,true)
      .add("class",StringType,true)

    val irisDF: DataFrame = spark.read
      .option("sep", ",")
      .option("header", false)
      .option("inferSchema", "false")
      .schema(irisSchema)
      .csv("datas/iris/iris.data")

    //irisDF.show(100,false)

    //2.将萼片长宽度，花瓣长宽度封装在一个响亮中
    val assembler: VectorAssembler = new VectorAssembler()
      .setInputCols(irisDF.columns.dropRight(1))
      .setOutputCol("features")

    val df1: DataFrame = assembler.transform(irisDF)

    //3.转换类别字符串-》数值
    val indexer = new StringIndexer()
      .setInputCol("class")
      .setOutputCol("lable")

    val df2: DataFrame = indexer
      .fit(df1) //表示调用函数，传递DF，获取模型
      .transform(df1) //数据索引话后的列名

    //标准化转换
    val scaler = new StandardScaler()
      .setInputCol("features") //
      .setOutputCol("scale_features")
      .setWithStd(true) //使用标准差缩放
      .setWithMean(false) //不使用平方值缩放

    val df3: DataFrame = scaler.fit(df2).transform(df2)

    val lr: LogisticRegression = new LogisticRegression()
      .setFeaturesCol("scale_features") //x->特征
      .setLabelCol("lable")   //y->标签
      //每个算法都有自己超参数要设置，合理设置可以获得根号的模型
      .setMaxIter(20) //模型训练迭代次数
      .setFamily("multinomial") //设置分类属于二分类还是多分类
      .setStandardization(true) //是否对特征数据进行标准化
      .setRegParam(0) //正则化参数
      .setElasticNetParam(0) //弹性化参数

    //将数据应用到算法中
    val lrmodel: LogisticRegressionModel = lr.fit(df3)

    //评估模型
    println(s"多分类混淆矩阵:${lrmodel.coefficientMatrix}")
    val summary: LogisticRegressionTrainingSummary = lrmodel.summary
    println(s"准确度:${summary.accuracy}")
    //针对每个类别的准确度
    println(s"精确度:${summary.precisionByLabel.mkString(",")}")


    spark.stop()


  }

}
