package com.shujia.spark.mllib

import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

object Demo5LogisticRegression2 {

  /**
    * 逻辑回归使用
    */
  def main(args: Array[String]): Unit = {

    /**
      * 读取数据
      *
      */

    val conf = new SparkConf().setAppName("LogisticRegression1").setMaster("local")
    val sc = new SparkContext(conf)
    //加载svm数据
    var data: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, "spark/data/线性不可分数据集.txt")

    //对数据进行升维
    data = data.map(labeledPoint => {
      val label = labeledPoint.label
      val features = labeledPoint.features
      //增加维度   两个维度相加,或者是两个维度相乘
      val arr = Array(features(0), features(1), features(0) * features(1))
      LabeledPoint(label, Vectors.dense(arr))
    })


    //切分训练集和测试集
    val split = data.randomSplit(Array(0.8, 0.2))
    val train = split(0)
    val test = split(1)


    //将数据带入算法，训练模型
    // 模型就是w值
    val model = new LogisticRegressionWithLBFGS()
      .setNumClasses(2) //类的数量
      .setIntercept(true) //截距
      .run(train) //训练模型


    //通过测试集判断模型的准确率
    val cwRDD = test.map(point => {

      //通过模型预测的结果
      val product = model.predict(point.features)

      //数据中真是的结果
      val label = point.label

      Math.abs(product - label)
    })


    val sum = test.count()
    //预测不对的数量
    val count = cwRDD.sum()

    println("准确率：" + (1 - (count / sum.toDouble)))


  }
}
