package com.shujia.spark.mllib

import org.apache.spark.ml.classification.LogisticRegressionModel
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo7ImageModelUse {
  def main(args: Array[String]): Unit = {

    /**
     * 使用模型
     *
     */

    val spark: SparkSession = SparkSession
      .builder()
      //.master("local[6]")
      .appName("Demo5ImageData")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._


    /**
     * 加载模型
     *
     */
    val model: LogisticRegressionModel = LogisticRegressionModel.load("/data/image_model")


    //读取需要识别图片
    val iamgeData: DataFrame = spark
      .read
      .format("image")
      .load("/data/test2")

    //处理图片数据,将图片中的像素点转换成向量
    val comData: UserDefinedFunction = udf((data: Array[Byte]) => {
      val features: Array[Double] = data
        //将像素点转换成int类型
        .map(byte => byte.toInt)
        //黑色部分替换成0.0 白色部分替换成1.0
        .map(x => {
          if (x >= 0) {
            0.0
          } else {
            1.0
          }
        })
      //返回特征向量
      Vectors.dense(features).toSparse
    })

    //取出图片名
    val comName: UserDefinedFunction = udf((path: String) => {
      path.split("/").last
    })

    val testDF: DataFrame = iamgeData
      //取出文件名和数据
      .select(comName($"image.origin") as "name", comData($"image.data") as "features")


    /**
     * 使用模型识别图片
     *
     */
    val resultDF: DataFrame = model.transform(testDF)

    resultDF.show(1000)
  }
}
