package com.shujia.spark.mllib

import org.apache.spark.ml.classification.{ LogisticRegression, LogisticRegressionModel}
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo6ImageModel {
  def main(args: Array[String]): Unit = {

    /**
     * 训练模型
     *
     */

    val spark: SparkSession = SparkSession
      .builder()
      //.master("local[6]")
      .appName("Demo5ImageData")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._

    /**
     * 1、读取已经被处理过的图片数据
     *
     */

    val imageDataDF: DataFrame = spark
      .read
      .format("libsvm")
      .option("numFeatures", 784) //设置特征向量的长度
      .load("/data/image_data")

    imageDataDF.show(false)

    /**
     *
     * 2、将数据拆分成训练集和测试集
     *
     */
    val Array(train: DataFrame, test: DataFrame) = imageDataDF.randomSplit(Array(0.8, 0.2))


    /**
     *
     * 3、选择算法
     */
    /**
     * 4、将训练集带入算法训练模型
     *
     */
    //逻辑回归
    //0.86
    val logisticRegression = new LogisticRegression()
    val model: LogisticRegressionModel = logisticRegression.fit(train)


    //贝叶斯分类
    //0.82
    //val naiveBayes = new NaiveBayes()
    //val model: NaiveBayesModel = naiveBayes.fit(train)

    //决策树
    //0.62
    //val decisionTreeClassifier = new DecisionTreeClassifier()
    //val model: DecisionTreeClassificationModel = decisionTreeClassifier.fit(train)


    /**
     * 5、将测试集模型，评估模型正确率
     *
     */
    val testDF: DataFrame = model.transform(test)
    testDF.cache()

    val p: Double = testDF.where($"label" === $"prediction").count().toDouble / testDF.count()

    println(s"模型的准确率：$p")

    /**
     * 6、保存模型
     *
     */

    model
      .write
      .overwrite()
      .save("/data/image_model")

  }

}
