package com.shujia.mlib

import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

object LogisticRegression2 {

  /**
    * 逻辑回归使用
    */
  def main(args: Array[String]): Unit = {

    /**
      * 读取数据
      *
      */

    val conf = new SparkConf().setAppName("LogisticRegression1").setMaster("local")
    val sc = new SparkContext(conf)
    val sQLContext = new SQLContext(sc)
    import sQLContext.implicits._

    //加载svm数据
    var data: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, "spark/data/线性不可分数据集.txt")

    //对数据进行升维
    data = data.map(labeledPoint => {
      val label = labeledPoint.label
      val features = labeledPoint.features
      //增加维度   两个维度相加,或者是两个维度相乘
      val arr = Array(features(0), features(1), features(0) * features(1))
      LabeledPoint(label, Vectors.dense(arr))
    })


    var df = data.toDF()
    df.show(false)


    //切分训练集和测试集
    val splitDF = df.randomSplit(Array(0.8, 0.2))
    val trainDF = splitDF(0)
    val testDF = splitDF(1)

    df.show(false)

    val log = new LogisticRegression()
      .setFeaturesCol("features")
      .setLabelCol("label")
      .setFitIntercept(true)

    //训练模型
    val model = log.fit(trainDF)

    val redultDF = model.transform(testDF)
    redultDF.show(false)

    //统计准确率
    redultDF.registerTempTable("table1")
    sQLContext.sql("select (count(1)-sum(abs(label-prediction)))/count(1) from table1").show()


  }
}
