package com.shujia.spark.mllib

import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo3Log {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf().setMaster("local").setAppName("app")

    val sc = new SparkContext(conf)

    //加载数据
    var rdd: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, "spark/data/人体指标.txt")
    rdd = rdd.cache()

    //将数据随机切分成训练集和测试集
    val split = rdd.randomSplit(Array(0.7, 0.3))

    //训练模型
    val train = split(0)
    //测试模型
    val test = split(1)


    //将数据带入算法，训练模型
    // 模型就是w值
    val model = new LogisticRegressionWithLBFGS()
      .setNumClasses(2) //类的数量
      .setIntercept(true) //截距
      .run(train) //训练模型


    //通过测试集判断模型的准确率
    val cwRDD = test.map(point => {

      //通过模型预测的结果
      val product = model.predict(point.features)

      //数据中真是的结果
      val label = point.label

      Math.abs(product - label)
    })


    val sum = test.count()
    //预测不对的数量
    val count = cwRDD.sum()

    println("准确率：" + (1 - (count / sum.toDouble)))


    //将模型保存到hdfs
    model.save(sc, "spark/data/model")


  }
}
