package com.fwmagic.spark.ml.linearregression

import com.fwmagic.spark.util.SparkUtils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.regression.{LinearRegression, LinearRegressionModel}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import scala.collection.mutable.ArrayBuffer

/**
 * 线性回归
 */
object LinearRegssionDemo {


  def main(args: Array[String]): Unit = {
    //Logger.getLogger("org").setLevel(Level.INFO)

    val spark: SparkSession = SparkUtils.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    //加载样本
    val sample: DataFrame = spark.read.option("header", true).option("inferSchema", true).csv("data/linearreg/sample.csv")

    //样本数据特征向量化
    val featuresVecs: DataFrame = sample.rdd.map({
      case Row(area: Double, floor: Double, price: Double) => {
        val features: linalg.Vector = Vectors.dense(Array(area, floor))
        (features, price)
      }
    }).toDF("features", "label")


    val linearRegression: LinearRegression = new LinearRegression()
      .setFeaturesCol("features")
      .setLabelCol("label")
      .setRegParam(0.01) //用于防止过拟合，(不需要关心，关注到每一个点)

    //获取训练的模型
    val model: LinearRegressionModel = linearRegression.fit(featuresVecs)

    featuresVecs.show(100, false)

    //加载待预测数据集
    val test: DataFrame = spark.read.option("header", true)
      .option("inferSchema", true).csv("data/linearreg/test.csv")

    test.rdd.map(row => row(0))

    val vecTest: DataFrame = test.rdd.map({
      case Row(id: Int, area: Double, floor: Double) => {
        val features: linalg.Vector = Vectors.dense(Array(area, floor))
        (id, features)
      }
    }).toDF("id", "features")

    //用模型预测数据
    val predict: DataFrame = model.transform(vecTest)

    predict.show(100, false)

    spark.close()
  }
}
