package com.shujia.mllib

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.classification.{LogisticRegressionModel, LogisticRegressionWithLBFGS, SVMModel}
import org.apache.spark.mllib.linalg
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD

object Demo2PersonIndex {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setMaster("local").setAppName("test")
    val sc: SparkContext = new SparkContext(conf)


    //加载svm格式的数据
    val data: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, "spark/data/人体指标.txt")

    /**
      * 将数据切分成训练集和测试集
      *
      * 训练集：训练模型
      * 测试机： 测试模型准确率
      *
      *
      */

    val splits: Array[RDD[LabeledPoint]] = data.randomSplit(Array(0.7, 0.3))

    val tran: RDD[LabeledPoint] = splits(0)

    val test: RDD[LabeledPoint] = splits(1)


    /**
      * 训练模型
      * 算法选择
      *
      */

    //构建算法，指定参数，  逻辑回归算法
    val logistic: LogisticRegressionWithLBFGS =
    new LogisticRegressionWithLBFGS()
      .setNumClasses(2) //分类的数量
      .setIntercept(true) //是否经过原点


    //训练模型,   底层就是rdd的计算，多次迭代收敛，   随机梯度下降
    val model: LogisticRegressionModel = logistic.run(tran)


    //模型准确率判断
    val predict: RDD[(Double, Double)] = test.map(point => {
      //真实的情况
      val label: Double = point.label
      //数据的特征
      val features: linalg.Vector = point.features
      //预测
      val yuce: Double = model.predict(features)

      (label, yuce)
    })

    /**
      * 准确率判断
      * 正确的数量  /  总的数量
      *
      */

    val q = predict.filter(kv => kv._1 == kv._2).count() / test.count().toDouble


    println("模型准确率：" + q)


    //如果模型准确还可以
    if (q > 0.95) {
      //保存模型
      model.save(sc, "spark/data/model")
    }


  }
}
