package com.doit.sparksql.day03

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import scala.collection.mutable

/**
 * @DATE 2022/1/16/9:27
 * @Author MDK
 * @Version 2021.2.2
 *
 *    余弦相似度  求任意两个人的相似度
 * */
object Demo01 {
  Logger.getLogger("org").setLevel(Level.ERROR)
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
      .master("local")
      .appName("相似度")
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val df = spark.read.option("header", true).option("inferSchema", true).csv("sql_data/yx/")
    df.printSchema()
    df.show()

    println("-------------------------解析数据,生成RDD------------------------------")
    //将用户的特征字段数据  存储在数组中
    val rdd1: RDD[(Int, String, Array[Double])] = df.rdd.map(row => {
      //解析行数据
      val id: Int = row.getAs("id")
      val name: String = row.getAs("name")
      val age: Double = row.getAs("age")
      val height: Double = row.getAs("height")
      val weight: Double = row.getAs("weight")
      val yanzhi: Double = row.getAs("yanzhi")
      val score: Double = row.getAs("score")
      (id, name, Array[Double](age, height, weight, yanzhi, score))
    })

    val df1: DataFrame = rdd1.toDF("id", "name", "features")
    /*   //df1与df2不同的表达方式
    val df2: DataFrame = df.rdd.map({
      case Row(id: Int, name: String, age:Double, height: Double, weight: Double, yanzhi: Double, score: Double)
      => (id, name, Array[Double](age, height, weight, yanzhi, score))
    }).toDF("id", "name", "features").show()
    */
    df1.show()


    println("-----------------------------sql语句处理----------------------------------")
    val df3:DataFrame = df.selectExpr("id", "name", "array(age, height, weight, yanzhi, score) as features")
    df3.printSchema()
    df3.show(false)
    df.select('id, 'name, array('age, 'height, 'weight, 'yanzhi, 'score))


    println("----------------------------------------------------------")
    val df2: DataFrame = df1.toDF("bid", "bname", "bfeatures")
    val resDF: DataFrame = df1.join(df2, 'id < 'bid)
    resDF.show(100, false)

    //自定义余弦相似度函数   普通数组无法加载数据  使用包装类型的数组
    val cosin = (arr1:mutable.WrappedArray[Double], arr2:mutable.WrappedArray[Double])=>
      {
        //求出每个分母
        val fm1: Double = Math.pow(arr1.map(Math.pow(_, 2)).sum, 0.5)
        val fm2: Double = Math.pow(arr2.map(Math.pow(_, 2)).sum, 0.5)
        val fz = arr1.zip(arr2).map(tp => tp._1 * tp._2).sum
        fz / (fm1 * fm2)
      }
      //注册函数  建表  写SQL
      spark.udf.register("cos_sim", cosin)
    resDF.createTempView("user")
    spark.sql(
      """
        |select
        |id,name,
        |bid, bname,
        |cos_sim(features, bfeatures)
        |from
        |user
        |""".stripMargin).show()

    spark.close()
  }
}
