package model

import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.regression.{LinearRegression}
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object LinearRegression {

  def lr(data_path:String,modelpath:String): Unit = {

    //设置环境
    val conf=new SparkConf()
       .setAppName("LinearRegression")
       .setMaster("local")
    val sc=new SparkContext(conf)
    val sqc=new SQLContext(sc)

    //准备训练集合
    val raw_data=sc.textFile(data_path)
    val map_data=raw_data.map{x=>
      val split_list=x.split(",")
      (split_list(0).toDouble,split_list(1).toDouble,split_list(2).toDouble,split_list(3).toDouble,split_list(4).toDouble,split_list(5).toDouble)
    }
    //特征处理
    val df=sqc.createDataFrame(map_data)
    val data = df.toDF("label_y", "feature_x1", "feature_x2", "feature_x3", "feature_x4", "feature_x5")
    val colArray = Array("feature_x1", "feature_x2", "feature_x3", "feature_x4", "feature_x5")
    val assembler = new VectorAssembler().setInputCols(colArray).setOutputCol("features")
    val vecDF: DataFrame = assembler.transform(data)

    //切分训练/测试集
    val Array(train, test) = vecDF.randomSplit(Array(0.9, 0.1), 1L)

    // 建立模型
    // 设置线性回归参数
    val lr = new LinearRegression()
                    .setFeaturesCol("features")
                    .setLabelCol("label_y")
                    .setFitIntercept(true)
                  // RegParam：正则化
                    .setMaxIter(1000)
                    .setRegParam(0.3)
                    .setElasticNetParam(0.8)

    // 将训练集合代入模型进行训练
    val pipeline = new Pipeline()
      .setStages(Array(lr))
    val lrModel = pipeline.fit(train)

    // 输出模型全部参数
//    lrModel.extractParamMap()
//    println(s"Coefficients: ${lrModel.coefficients} Intercept: ${lrModel.intercept}")
//
//    // 模型进行评价
//    val trainingSummary = lrModel.summary
//    println(s"numIterations: ${trainingSummary.totalIterations}")
//    println(s"objectiveHistory: ${trainingSummary.objectiveHistory.toList}")
//    trainingSummary.residuals.show()
//    println(s"RMSE: ${trainingSummary.rootMeanSquaredError}")
//    println(s"r2: ${trainingSummary.r2}")

    //执行预测
    val predictions: DataFrame = lrModel.transform(test)
    val predict_result: DataFrame =predictions.selectExpr("features","label_y", "round(prediction,1) as prediction")

    println("==============输出预测结果==============")
    predict_result.foreach(println(_))

    // 模型保存
    lrModel.write.overwrite().save(modelpath)
    println("==============保存模型成功==============")

    //结束会话
    sc.stop()
  }

  def main(args: Array[String]): Unit = {
    //训练数据路径
    //val data_path = "data\\train_data.txt"
    val data_path = "hdfs:///test/data/train_data.txt"
    //模型保存路径
    //val modelpath = "model\\spark-lr-model"
    val modelpath = "hdfs:///test/model/lrModel"
    //LinearRegression
    lr(data_path,modelpath)
  }
}
