package MLlib

import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.types.DataTypes
import org.apache.spark.sql.{DataFrame, SparkSession}
import util.SparkUtil

object VectorDemo {
  def main(args: Array[String]): Unit = {

    // 密集型向量
    val vec1: linalg.Vector = Vectors.dense(Array(1.0, 2, 3, 4, 5))
    println(vec1)
    // 稀疏型向量
    val vec2: linalg.Vector = Vectors.sparse(10, Array(4, 6), Array(44, 55))
    println(vec2)
    println(vec2.toDense)

    val v1: linalg.Vector = Vectors.dense(Array(1.0, 1))
    val v2: linalg.Vector = Vectors.dense(Array(4.0, 5))
    val d: Double = Vectors.sqdist(v1, v2) // 两个向量之间距离的平方
    println(d)
    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    val sample: DataFrame = spark.read.option("header", value = true).csv("userprofile/data/Bayes/sampleBayes.csv")
    val frame: DataFrame = sample.selectExpr("name",
      "case label when '出轨' then 0.0 when '没出' then 1.0 end as label",
      "case job when '老师' then 0.0 when '程序员' then 1.0 else 2.0 end as job",
      "case income when '低' then 0.0 when '中' then 1.0 else 2.0 end as income",
      "case age when '青年' then 0.0 when '中年' then 1.0 else 2.0 end as age",
      "case sex when '男' then 0.0 when '女' then 1.0 end as sex"
    )
    frame.printSchema()
    import spark.implicits._
    import org.apache.spark.sql.functions._
    val arr2vec: UserDefinedFunction = udf((arr: Array[Double]) => {
      Vectors.dense(arr)
    })
    spark.udf.register("arr2vec", arr2vec)
    // 调API的方式来写SQL
    //    frame.select('name, 'label, arr2vec(array('job.cast(DataTypes.DoubleType), 'income.cast(DataTypes.DoubleType), 'age.cast(DataTypes.DoubleType), 'sex.cast(DataTypes.DoubleType))).as("vec"))
    val res: DataFrame = frame.selectExpr("name", "label", "arr2vec( array( cast(job as double), cast(income as double), cast(age as double), cast(sex as double) ) )")
    // 上面这第二种做法需要将arr2vec这个自定义函数进行注册，否则sparkSQL中找不到这个函数
    res.printSchema()

    spark.close()

  }
}
