package com.xbai.spark.recall.test

import breeze.numerics.{pow, sqrt}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
  *
  * @author xbai
  * @Date 2021/1/9
  */
object RecallUserCF {
  def main(args: Array[String]): Unit = {
    // 创建 spark 环境
    val spark: SparkSession = SparkSession.builder().master("local[*]")
      .appName("RecallUserCF").enableHiveSupport().getOrCreate()
    spark.sql("show databases").show()

    // 导入数据
    val user_profile: DataFrame = spark.sql("select * from db_recall.user_profile")
    val user_listen: DataFrame = spark.sql("select userId, musicId, cast(remainTime as double),cast(durationHour as double) from db_recall.user_listen")
    //    println(user_listen.count()) // 321039
    //    println(user_profile.count()) // 100000

    // 计算用户听某一首歌曲的总时间
    val itemTotalTime: DataFrame = user_listen.selectExpr("userId", "musicId", "remainTime", "durationHour")
      .groupBy("userId", "musicId")
      .sum("remainTime")
      .withColumnRenamed("sum(remainTime)", "itemTotalTime")
    // 用户总共听歌时长
    val totalTime: DataFrame = user_listen.selectExpr("userId", "musicId", "remainTime", "durationHour")
      .groupBy("userId")
      .sum("remainTime")
      .withColumnRenamed("sum(remainTime)", "totalTime")

    // uid ,iid ,rating(用户听某一首歌曲的总时间/用户总共听歌时长)
    val data: DataFrame = itemTotalTime.join(totalTime, "userId")
      .selectExpr("userId", "musicId as itemId", "itemTotalTime/totalTime as rating")
    data.createOrReplaceTempView("user")
    /*
        +--------------------+----------+-------------------+
        |              userId|    itemId|             rating|
        +--------------------+----------+-------------------+
        |00941e652b84b9967...|6326709127| 0.0855431993156544|
        |01e7b27c256cc06c1...|5652309207|0.12641083521444696|
        +--------------------+----------+-------------------+
    */

    // 计算用户相似性 (x1*y1+x2*y2......)/|X|*|Y|
    import spark.implicits._
    //计算分母 |X| =sqrt(x1^2+x2^2+x3^2) |Y| =sqrt(y1^2+y2^2+y3^2)
    val userSumPowRating: DataFrame = data.rdd
      .map(x => (x(0).toString, x(2).toString))
      .groupByKey().mapValues(x => sqrt(x.toArray.map(rating => pow(rating.toDouble, 2)).sum))
      .toDF("userId", "sqrt_rating_sum")
    userSumPowRating.show(3)
    userSumPowRating.cache()

    // 计算分子 x1*y1+x2*y2...... 如果要计算 两两相似性，直接爆炸。
    val data_with_userProfile =  data.
      join(user_profile,"userId").
      selectExpr("userId",
        "itemId",
        "gender",
        "age",
        "salary",
        "rating")

    val data_with_userProfile_copy = data_with_userProfile.
      selectExpr("userId as userId1",
        "gender as gender1",
        "salary as salary1",
        "itemId as itemId1",
        "age as age1",
        "rating as rating1")

    // item->user倒排表,去除一样用户的打分
    val user_item2item: Dataset[Row] = data_with_userProfile.join(data_with_userProfile_copy,
      data_with_userProfile("itemId") === data_with_userProfile_copy("itemId1") and
        data_with_userProfile("gender")===data_with_userProfile_copy("gender1") and
        data_with_userProfile("age")===data_with_userProfile_copy("age1") and
        data_with_userProfile("salary")===data_with_userProfile_copy("salary1"))
      .filter("userId <> userId1")
    user_item2item.selectExpr("userId").distinct().count()
    user_item2item.show(3)

    //  部分用户跟其他人听的歌没有任何交集，这部分用户不会有召回结果，所以，我们需要对这部分用户以其他方式进行相似性计算。
    // 计算两个用户在一个item下的评分的乘积，consine举例的分子的一部分
    import org.apache.spark.sql.functions._
    val product_udf = udf((s1: Double, s2: Double) => s1 * s2)
    val selectData: DataFrame = user_item2item.selectExpr("userId", "rating", "itemId", "userId1", "rating1")

    val user_data: DataFrame = selectData.withColumn("rating_product", product_udf(col("rating"), col("rating1")))
      .selectExpr("userId", "userId1", "itemId", "rating_product")

    // 此处直接爆炸了 OOM
    val user_rating_sum: DataFrame = user_data.groupBy("userId", "userId1")
      .agg("rating_product" -> "sum")
      .withColumnRenamed("sum(rating_product)", "rating_dot")

    // 分子除以分母 做相似性
    val userSumPowRatingCopy: DataFrame = userSumPowRating.selectExpr("userId as userId1", "sqrt_rating_sum as sqrt_rating_sum1")
    val df_sim: DataFrame = user_rating_sum.join(userSumPowRating, "userId").join(userSumPowRatingCopy, "userId1")
      .selectExpr("userId", "userId1", "rating_dot/(sqrt_rating_sum*sqrt_rating_sum1) as cosine_sim")

    // 获取相似用户的物品集合
    // 取得前n个相似用户
    val df_nsim: DataFrame = df_sim.rdd.map(x => (x(0).toString, (x(1).toString, x(2).toString)))
      .groupByKey()
      .mapValues { x => x.toArray.sortWith((x, y) => x._2 > y._2).slice(0, 10) }
      .flatMapValues(x => x).toDF("userId", "user_v_sim")
      .selectExpr("userId", "user_v_sim._1 as user_v", "user_v_sim._2 as sim")

    // 获取用户的物品集合进行过滤,拿到每一个用户的物品集合列表
    val df_user_item: DataFrame = data.rdd.map(x => (x(0).toString, x(1).toString + "_" + x(2).toString))
      .groupByKey().mapValues(x => x.toArray).toDF("userId", "item_rating_arr")
    val df_user_item_v: DataFrame = df_user_item.selectExpr("userId as user_v", "item_rating_arr as item_rating_arr_v")

    val df_gen_item: DataFrame = df_nsim.join(df_user_item, "userId").join(df_user_item_v, "user_v")
    val filter_udf: UserDefinedFunction = udf { (items: Seq[String], items_v: Seq[String]) =>
      val fMap: Map[String, String] = items.map { x =>
        val i: Array[String] = x.split("_")
        (i(0), i(1))
      }.toMap
      items_v.filter { x =>
        val i: Array[String] = x.split("_")
        fMap.getOrElse(i(0), -1) == -1
      }
    }

    val df_filter_item: DataFrame = df_gen_item.withColumn("filtered_item", filter_udf(col("item_rating_arr"), col("item_rating_arr_v")))
      .selectExpr("userId", "sim", "filtered_item")

    // 公式计算 相似度*rating
    val simRatingUDF = udf { (sim: Double, items: Seq[String]) =>
      items.map { x =>
        val l = x.split("_")
        l(0) + "_" + l(1).toDouble * sim
      }
    }
    val itemSimRating = df_filter_item.withColumn("item_prod",
      simRatingUDF(col("sim"), col("filtered_item")))
      .select("userId", "item_prod")

    val userItemScore = itemSimRating.select(itemSimRating("userId"),
      explode(itemSimRating("item_prod"))).toDF("userId", "item_prod")
      .selectExpr("userId", "split(item_prod,'_')[0] as item_id",
        "cast(split(item_prod,'_')[1] as double) as score")


    spark.stop()
  }
}
