package com.shujia.ml

import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, DataFrameReader, Dataset, Row, SparkSession}

object Code05LogisticRegressionImage {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[4]")
      .appName("spark")
      .getOrCreate()

    import spark.implicits._

    val dataFrame: DataFrame = spark.read.format("image").load("C:\\Users\\shujia\\Desktop\\spark\\data\\train")
    //    println(dataFrame.schema)

    val labelDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", " ")
      .schema("imageName String, label Double")
      .load("spark_code/data/ml/image_res.txt")

    val labelRDD: RDD[(String, Double)] = labelDF
      .rdd
      .map {
        case row => (row.getAs[String]("imageName"), row.getAs[Double]("label"))
      }


    val trainAndTest: Array[Dataset[Row]] = dataFrame
      .select($"image.origin" as "path", $"image.data" as "data")
      .rdd
      .map {
        case row => {
          val pointArr = row.getAs[Array[Byte]]("data")
          val pointList: List[Double] = pointArr.toList.map {
            case point => if (point >= 0) {
              255.0
            } else {
              0.0
            }
          }
          // file:///C:/Users/shujia/Desktop/spark/data/train/28314.jpg
          val path: String = row.getAs[String]("path")
          val fileName: String = path.split("/").reverse.head
          (fileName, Vectors.dense(pointList.toArray))
        }
      }
      .join(labelRDD)
      .map {
        case (fileName, (vector, label)) => {
          // 将标签和特征向量包装成LabeledPoint 之后可以进行转换DF 做模型训练
          LabeledPoint(label, vector)
        }
      }.toDF()
      .randomSplit(Array(0.8, 0.2))
//      .show()

    // 由于传入逻辑回归算法的数据对象为LabeledPoint 而该对象有两个参数 1：标签值  2：特征向量


    //      .show(truncate = false)

    val train: Dataset[Row] = trainAndTest(0)
    val test: Dataset[Row] = trainAndTest(1)

    val mlr = new LogisticRegression()
      .setMaxIter(10)
//      .setRegParam(0.3)
//      .setElasticNetParam(0.8)
      .setFitIntercept(true) // 设置是否存在有截距
      .setFamily("multinomial") // 表示进行多分类

    val mlrModel = mlr.fit(train)

    mlrModel.save("spark_code/model/logistic")

    val testTransformRes: DataFrame = mlrModel.transform(test)

    val cnt: Long = testTransformRes.count()

    val trueCnt: Long = testTransformRes
      .where($"prediction" === $"label")
      .count()

    println(s"该模型应用的准确率为：${Math.round((trueCnt / cnt.toDouble) * 10000) / 100.0}%")

    testTransformRes.show()


  }
}
