package com.shujia.mllib

import org.apache.spark.ml.linalg
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.ml.linalg.{SparseVector, Vectors}

object Demo4ReadImage {
  def main(args: Array[String]): Unit = {

    /**
      * 特征工程
      *
      */

    val spark: SparkSession = SparkSession.builder()
      .master("local[8]")
      .appName("person")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()


    val images: DataFrame = spark
      .read
      .format("image")
      .load("D:\\课件\\机器学习数据\\手写数字\\train")

    images.printSchema()
    import spark.implicits._

    val data: DataFrame = images
      .select($"image.origin", $"image.data")
      .as[(String, Array[Byte])]
      .map {
        case (name: String, data: Array[Byte]) => {
          val ints: Array[Int] = data.map(b => b.toInt)

          //将数据归一化
          val result: Array[Double] = ints.map(i => {
            if (i < 0) {
              1.0
            } else {
              0.0
            }
          })

          //将数组转换成向量
          val fea: linalg.Vector = Vectors.dense(result)

          val filename: String = name.split("/").last

          (filename, fea)
        }
      }.toDF("name", "features")


    /**
      * 读取标签数据
      *
      */


    val labelDF: DataFrame = spark
      .read.format("csv")
      .option("sep", " ")
      .schema("name STRING, label DOUBLE")
      .load("D:\\课件\\机器学习数据\\手写数字\\train.txt")


    val trainDF: DataFrame = data
      .join(labelDF, "name")
      .select("label", "features")


    trainDF
      .write
      .format("libsvm")
      .mode(SaveMode.Overwrite)
      .save("spark/data/images")


  }

}
