package cn.itcast.tags.ml.regression

import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.regression.{LinearRegression, LinearRegressionModel, LinearRegressionTrainingSummary}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.types.{DoubleType, StructType}
import org.apache.spark.storage.StorageLevel

//线性回归
object LrBostonRegression {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[4]")
      .config("spark.sql.shuffle.partitions", 4)
      .getOrCreate()

    import spark.implicits._
    val bostonDS: Dataset[String] = spark.read
      .textFile("datas/housing/housing.data")
      .filter(line => null != line && line.trim.split("\\s+").length == 14)

    val bostonDF: DataFrame = bostonDS.mapPartitions(iter => {
      iter.map(line => {
        val parts: Array[String] = line.trim.split("\\s+")
        //获取标签
        val label: Double = parts(parts.length - 1).toDouble
        //获取特征
        val features: linalg.Vector = Vectors.dense(parts.dropRight(1).map(_.toDouble))
        (features, label)
      })
    }).toDF("features","label")

    bostonDF.show(100,false)
    //由于线性回归算法 默认情况下对特征数据进行标注化处理转换，所以次数不在进行处理


    //划分数据集
    val Array(trainingDF,testingDF) = bostonDF.randomSplit(Array(0.8, 0.2), seed = 123L)

    trainingDF.persist(StorageLevel.MEMORY_AND_DISK).count() //触发缓存



    val lr: LinearRegression = new LinearRegression()
      .setFeaturesCol("features")
      .setLabelCol("label")
      //设置算法底层求解方式，要么是最小二乘法(正规方程),要么是拟牛顿法(l-bfgs)
      .setSolver("auto")
      .setStandardization(true) //是否对特征数据标准化，默认为true
      .setMaxIter(20)
      .setRegParam(1) //正则化参数
      .setElasticNetParam(0.4) //弹性化参数

    val lrModel: LinearRegressionModel = lr.fit(trainingDF)

    //模型评估
    println(s"回归系数:${lrModel.coefficients}")
    println(s"截距:${lrModel.intercept}")

    val trainingSummary: LinearRegressionTrainingSummary = lrModel.summary

    //均方根误差
    println(s"RMSE: ${trainingSummary.rootMeanSquaredError}")
    println(s"r2 ${trainingSummary.r2}")

    //模型预测
    lrModel.transform(testingDF).show(10,false)
    spark.stop()
  }
}
