package com.shujia.mllib

import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}

object Demo4ReadImage {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName("Demo4ReadImage")
      .config("spark.sql.shuffle.partitions", 4)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._
    /**
     * 1、数据特征工程
     */
    val imagesDF: DataFrame = spark
      .read
      .format("image")
      .load("C:\\Users\\zzk10\\Desktop\\train")

    imagesDF.printSchema()
    //    imagesDF.show(false)

    val image_data: DataFrame = imagesDF
      .select($"image.origin", $"image.data")
      .as[(String, Array[Byte])]
      .map {
        case (filePath: String, bytes: Array[Byte]) => {
          val ints: Array[Int] = bytes.map(_.toInt)
          val doubles: Array[Double] = ints.map(i => {
            if (i >= 0) {
              0.0
            } else {
              1.0
            }
          })
          // 从文件的绝对路径中提取文件名 用于关联 提取label
          val filename: String = filePath.split("/").last
          // 将处理好的图片数据转换成 稠密向量
          // 转成稀疏向量 再以libsvm格式保存 重新读取的时候会造成长度的丢失
          val image_vec: linalg.Vector = Vectors.dense(doubles)
          (filename, image_vec)
        }
      }.toDF("filename", "features")

    val labelDF: DataFrame = spark.read
      .format("csv")
      .option("sep", " ")
      .schema("filename String,label Double")
      .load("spark/data/image_res.txt")

    // 将特征工程处理好的结果数据以libsvm格式保存
    image_data
      .join(labelDF, "filename")
      .select($"label", $"features")
      .write
      .mode(SaveMode.Overwrite)
      .format("libsvm")
      .save("spark/data/images")


  }

}
