package com.gy.spark.MLlib.lr

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.ml.feature.MinMaxScaler
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.linalg.DenseVector
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.sql.SQLContext

/**
 * 最大最小值归一化
 */
object LogisticRegression7 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("spark").setMaster("local")
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)
    /**
     * 加载生成的DataFrame自动有两列：label features
     */
    val df = sqlContext.read.format("libsvm").load("MLlib/input/scala/环境分类数据.txt")
        //df.show()
    /**
     * MinMaxScaler fit需要DataFrame类型数据
     * setInputCol：设置输入的特征名
     * setOutputCol：设置归一化后输出的特征名
     *
     */
    val minMaxScalerModel = new MinMaxScaler()
      .setInputCol("features")
      .setOutputCol("scaledFeatures")
      .fit(df)
    /**
     * 将所有数据归一化
     */
    val features = minMaxScalerModel.transform(df)
    features.show()

    val normalizeInputData = features.rdd.map(row => {
      val label = row.getAs("label").toString().toDouble
      val dense = (row.getAs("scaledFeatures")).asInstanceOf[DenseVector]
      new LabeledPoint(label, dense)
    })

    val splits = normalizeInputData.randomSplit(Array(0.7, 0.3), 11L)
    val (trainingData, testData) = (splits(0), splits(1))
    val lr = new LogisticRegressionWithLBFGS()
    lr.setIntercept(true)
    val model = lr.run(trainingData)
    val result = testData.map { point => Math.abs(point.label - model.predict(point.features)) }
    println("正确率=" + (1.0 - result.mean()))
    println(model.weights.toArray.mkString(" "))
    println(model.intercept)

  }
}