package com.shujia.mllib

import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.regression.{LinearRegression, LinearRegressionModel}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo2LineReg {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("line")
      .getOrCreate()

    import spark.implicits._

    /**
      * 1、读取原始数据
      *
      */

    val dataDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("label DOUBLE,features STRING")
      .load("data/line.txt")

    /**
      * 特征工程
      *
      */

    //将原始数据转换成算法可以识别的数据
    val trainData: DataFrame = dataDF.map {
      case Row(label: Double, features: String) =>
        //将多个x 转换特征向量
        val xs: Array[Double] = features.split("\\|").map(_.toDouble)
        //转换成稠密向量
        val featuresVector: linalg.Vector = Vectors.dense(xs)

        (label, featuresVector)
    }.toDF("label", "features")


    /**
      * 选择算法训练模型
      *
      */

    //线性回归算法
    val linearRegression = new LinearRegression()


    //将数据带入算法训练模型
    /**
      * 训练模型底层是rdd的计算，循环迭代让误差函数最小化
      *
      * 模型：由特征权重和截距组成
      */
    val model: LinearRegressionModel = linearRegression.fit(trainData)

    //模型的截距
    val intercept: Double = model.intercept

    println(s"intercept:$intercept")

    //特征的权重
    println(model.coefficients)

    /**
      * 新的x
      *
      */
    val xs: linalg.Vector = Vectors.dense(Array(0.85, 100))

    //将新的x 带入模型计算得出y
    val y: Double = model.predict(xs)

    println(y)

  }

}
