package com.shujia.mllib

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.functions.{count, sum, when}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}

object Demo04Image {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo04Image")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", 8)
      .getOrCreate()

    // 1、加载数据 并 进行特征工程处理
    val imageDF: DataFrame = spark
      .read
      .format("image")
      .load("C:\\Users\\zzk10\\Desktop\\Spark\\MLLib\\train")

    imageDF.printSchema()
    import spark.implicits._

    val imageDataDF: DataFrame = imageDF
      .select($"image.origin", $"image.data")
      .as[(String, Array[Byte])]
      .map {
        case (filePath: String, binArr: Array[Byte]) =>
          val intArr: Array[Int] = binArr.map(_.toInt)
          val arr1or0: Array[Double] = intArr.map(int => {
            if (int < 0) {
              1.0
            } else {
              0.0
            }
          })
          val fileName: String = filePath.split("/").last
          (fileName, Vectors.dense(arr1or0).toSparse)
      }.toDF("fileName", "features")


    // 关联image_res.txt 通过文件名称 获取 label
    val imageLabelDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", " ")
      .schema("fileName String,label Double")
      .load("Spark/data/mllib/data/image_res.txt")

    val imageData: DataFrame = imageDataDF
      .join(imageLabelDF, "fileName")
      .select("fileName", "label", "features")

    // 将特征数据进行保存
    imageData
      .select($"label", $"features")
      .write
      .format("libsvm")
      .mode(SaveMode.Overwrite)
      .save("Spark/data/mllib/image/libsvm")


    // 2、切分数据集
    val splits: Array[Dataset[Row]] = imageData.randomSplit(Array(0.7, 0.3))
    val trainDF: Dataset[Row] = splits(0)
    val testDF: Dataset[Row] = splits(1)

    // 3、选择模型 ===> 逻辑回归
    val lr: LogisticRegression = new LogisticRegression()
      .setMaxIter(10)
      .setFitIntercept(true)

    // 4、将训练集带入模型 进行训练
    val lrModel: LogisticRegressionModel = lr.fit(trainDF)

    // 5、使用测试数据评估模型
    val resDF: DataFrame = lrModel.transform(testDF)
    resDF
      .select(sum(when($"label" === $"prediction", 1).otherwise(0)) / count("*"))
    //      .show()

    // 6、保存模型
    lrModel.save("Spark/data/mllib/image/model")


  }

}
