package algorithm

import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DoubleType, StructField, StructType}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by liuwei on 2017/5/23.
  */
import org.apache.log4j.{ Level, Logger }
import org.apache.spark.{ SparkConf, SparkContext }
import org.apache.spark.mllib.regression.LinearRegressionWithSGD
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LinearRegressionModel

object LinearRegressionTest {

  def main(args: Array[String]) {
    // 构建Spark对象
    val conf = new SparkConf().setAppName("LinearRegressionWithSGD").setMaster("local[5]")
    val sc = new SparkContext(conf)

    Logger.getRootLogger.setLevel(Level.WARN)

    //读取样本数据
    val data_path1 = "G:\\BaiduNetdiskDownload\\Spark Mllib机器学习算法源码及实战详解\\MLlib机器学习\\数据\\lpsa.data"
    val data = sc.textFile(data_path1)
    val examples = data.map { line =>
      val parts = line.split(',')
      LabeledPoint(parts(0).toDouble, Vectors.dense(parts(1).split(' ').map(_.toDouble)))
    }.cache()
    val numExamples = examples.count()

    val examples2 = data.map(x => x.split(",")).map(x => Row(x(0), x(1).trim().toDouble))

    // 新建线性回归模型，并设置训练参数
    val numIterations = 100
    val stepSize = 1
    val miniBatchFraction = 1.0
//    LinearRegression.
//    LinearRegression.train(examples, numIterations, stepSize, miniBatchFraction);
    val model:LinearRegressionModel = LinearRegressionWithSGD.train(examples, numIterations, stepSize, miniBatchFraction)
    model.weights
    model.intercept

    /**
      * 容差阈值 Tolerance 0.01
      * 训练循环次数  MaxIter 100
      * L2正则化权重 0.1
      * 是否包含截距项 FitIntercept
      * 特征规范化 Standardization
      */

    val estimator:LinearRegression= new LinearRegression().setElasticNetParam(0.0).setRegParam(0.1).
      setMaxIter(100).setFitIntercept(true).setStandardization(true).
      setTol(0.01).setSolver("normal")
//    examples2.cache().toDF();
val sparkSession : SparkSession = SparkSession.builder().getOrCreate()
    val schema = StructType(
      Seq(
        StructField("column1", DoubleType, true)
        , StructField("column2", DoubleType, true)
      )
    )
    val peopleRdd = sparkSession.createDataFrame(examples2,schema)
    peopleRdd.show
    val model2 = estimator.fit(peopleRdd)
//    val prediction = model2.predict(examples.collect().toVector)

    // 对样本进行测试
    val prediction = model.predict(examples.map(_.features))
    val predictionAndLabel = prediction.zip(examples.map(_.label))
    val print_predict = predictionAndLabel.take(20)
    println("prediction" + "\t" + "label")
    for (i <- 0 to print_predict.length - 1) {
      println(print_predict(i)._1 + "\t" + print_predict(i)._2)
    }
    // 计算测试误差
    val loss = predictionAndLabel.map {
      case (p, l) =>
        val err = p - l
        err * err
    }.reduce(_ + _)
    val rmse = math.sqrt(loss / numExamples)
    println(s"Test RMSE = $rmse.")

    // 模型保存
    val ModelPath = "G:\\BaiduNetdiskDownload\\LinearRegressionModel2"
    model.save(sc, ModelPath)
    val sameModel = LinearRegressionModel.load(sc, ModelPath)

  }

}
