package com.doit.day07

import org.apache.spark.sql.SparkSession

/**
 * @Author:
 * @WX: 17710299606
 * @Tips: 学大数据 ,到多易教育
 * @DOC: https://blog.csdn.net/qq_37933018?spm=1000.2115.3001.5343
 * @Description:
 * 自定义函数
 * spark内置了很多函数 一般能满足我们的需求
 * UDF  一行  对一
 * UDAF 聚合  sum  avg
 * -- UDTF  一对多  explode
 *
 */
object Demo05CustomUDF {
  def main(args: Array[String]): Unit = {

    val session = SparkSession.builder()
      .appName("test")
      .master("local[*]")
      .getOrCreate()
    import session.implicits._
    import org.apache.spark.sql.functions._


    val df = session.read
      .option("header", true)
      .option("inferSchema", true)
      .csv("data/diff/")

    val frame1 = df.selectExpr("id", "name", "array(age,height,weight,yanzhi,score) as  arr")
    val frame2 = frame1.toDF("id2", "name2", "arr2")
    val data = frame1.join(frame2, frame1("id") < frame2("id2"))
    data.createTempView("tb_data")


    //自定义函数
    val f: (Array[Double], Array[Double]) => Double = (arr1: Array[Double], arr2: Array[Double]) => {
      // 实现公式
      val fm: Double = Math.pow(arr1.map(e => Math.pow(e, 2)).sum, 0.5) * Math.pow(arr2.map(e => Math.pow(e, 2)).sum, 0.5)

      val fz = arr1.zip(arr2).map(tp => tp._1 * tp._2).sum
      fz / fm
    }
    // 注册
    session.udf.register("cos_sime" , f)

    // 使用
    session.sql(
      """
        |select
        |id ,
        |id2 ,
        |name ,
        |name2 ,
        |cos_sime(arr , arr2)  as  sime_value
        |from
        |tb_data
        |""".stripMargin).show()
  }

}
