package com.shujia.ml

import org.apache.spark.SparkContext
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.regression.{LinearRegression, LinearRegressionModel}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo2Point {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("point")
      .getOrCreate()

    import spark.implicits._

    val sc: SparkContext = spark.sparkContext

    //读取数据
    val linesRDD: RDD[String] = sc.textFile("spark/data/points.txt")

    /**
     * 特征工程：将原始的数据转换成算法可以识别的数据
     */

    //整理数据
    val pointsRDD: RDD[LabeledPoint] = linesRDD
      .map(line => {
        val split: Array[String] = line.split(",")
        val x: Double = split(0).toDouble
        val y: Double = split(1).toDouble
        LabeledPoint(y, Vectors.dense(x))
      })

    //转换成DF
    val pointDF: DataFrame = pointsRDD.toDF()

    /**
     * 选择算法
     * 线性回归算法（y=w1x1+w2x2+w3x3++++wn*xn+w）
     */
    val regression = new LinearRegression()

    /**
     * 训练模型
     * 将数据带入算法训练模型，确定k和b
     * 训练模型的过程底层需要经过不断的计算，使用spark作为计算引擎进行模型的训练
     */
    val model: LinearRegressionModel = regression.fit(pointDF)

    println(model.intercept)
    println(model.coefficients)

    /**
     * 使用模型预测新的数据
     */
    val y: Double = model.predict(Vectors.dense(5))
    println(y)

    println(model.predict(Vectors.dense(100)))
  }
}
