package com.shujia.mlib

import org.apache.spark.mllib.classification.{LogisticRegressionModel, LogisticRegressionWithLBFGS}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object LogisticRegression1 {

  /**
    * 逻辑回归使用
    */
  def main(args: Array[String]): Unit = {


    /**
      * 读取数据
      *
      */

    val conf = new SparkConf().setAppName("LogisticRegression1").setMaster("local")
    val sc = new SparkContext(conf)

    //加载svm数据
    val data: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, "spark/data/人体指标.txt")


    //将数据拆分成训练集和测试集
    val splitRDD: Array[RDD[LabeledPoint]] = data.randomSplit(Array(0.8, 0.2))

    val trainData = splitRDD(0)
    //训练集
    val testData = splitRDD(1) //测试集


    //构建算法
    val log = new LogisticRegressionWithLBFGS()

    log.setNumClasses(2)
    log.setIntercept(true)

    //训练模型
    val model = log.run(trainData)

    println("特征权重:" + model.weights)
    println("截距" + model.intercept)


    /**
      * 评估模型准确率
      *
      */
    //通过模型预测错的数据
    val latNum = testData.map(labeledPoint => {
      val label = labeledPoint.label
      val features = labeledPoint.features
      //预测
      val newlabel = model.predict(features)

      Math.abs(label - newlabel)
    }).reduce(_ + _)

    val sum = testData.count().toDouble

    println("模型准确率：" + (sum - latNum) / sum)



    //保存模型
    model.save(sc, "spark/data/model")

    //加载模型
    val model1 = LogisticRegressionModel.load(sc, "spark/data/model")


  }
}
