package com.xbai.spark.recall.test

import org.apache.spark.SparkConf
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.recommendation.{ALS, ALSModel}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.types.{LongType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.functions._

/**
  * @author xbai
  * @Date 2021/1/23
  */
object RecallALS {

  def main(args: Array[String]): Unit = {
    // 创建 spark 环境
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("RecallItemCF")
    val spark: SparkSession = SparkSession.builder().config(conf)
      .enableHiveSupport().getOrCreate()
    spark.sql("use db_recall")
    import spark.implicits._

    //导入数据
    val data: DataFrame = processData(spark)
    // 转化为intId
    val userItemRating: DataFrame = transFromIdToInt(spark, data)
    // 将样本拆分为训练0.8，测试0.2
    val Array(training, test): Array[Dataset[Row]] = userItemRating.randomSplit(Array(0.8, 0.2), seed = 1)
    // 训练模型
    val als: ALS = new ALS()
      .setMaxIter(5) //迭代次数，用于最小二乘交替迭代的次数
      .setRegParam(0.01) //惩罚系数
      .setRank(10) // 隐藏参数维度
      .setUserCol("uid") // userId
      .setItemCol("iid") // itemId
      .setRatingCol("rating") // rating 矩阵
    val model: ALSModel = als.fit(userItemRating)

    val predictTest: DataFrame = model.transform(test)
    val evaluator: RegressionEvaluator = new RegressionEvaluator()
      .setMetricName("rmse")
      .setLabelCol("rating")
      .setPredictionCol("prediction")
    val rmse: Double = evaluator.evaluate(predictTest)


    // 预测结果
    val predictDF = crateUserItemUnClick(userItemRating,spark)
    val res: DataFrame = model.transform(predictDF)
    // 推荐 top30
    val recommand: DataFrame = res.selectExpr("userId", "iid", "prediction as score")
      .withColumn("rank", row_number().over(Window.partitionBy("userId").orderBy($"score".desc)))
      .filter(s"rank <= 30")
      .selectExpr("userId", "iid as recallList", "score as recallWeight")
    recommand.show(300)
  }

  def crateUserItemUnClick(userItemRating: DataFrame, spark: SparkSession): DataFrame = {
    // 组合 uid、iid，uid、iid 是未曾出现的组合，然后我们按照 topk 给每个用户推迟
    // 在实际业务中，我们不会去选择全量物品，会基于用户标签过滤掉一部分物品
    // 如在淘宝中，会过滤掉用户购买金额之类的
    import spark.implicits._
    val iidHot: DataFrame = userItemRating.groupBy("iid").count()
    val itemSet: Set[Any] = iidHot.filter($"count" > 10)
      .selectExpr("iid").rdd.map(x => x(0)).collect().toSet

    val userItemNoClick: RDD[String] = userItemRating.rdd.map(x => (x(0) + ":" + x(1), (x(2), x(3))))
      .groupByKey().map { x =>
      val temp = (x._1, x._2.toSet)
      val predictItem: Set[Any] = (itemSet - temp._2).take(20)
      val res: Set[String] = predictItem.map(x => temp._1 + ":" + x)
      res
    }.flatMap(x => x)
    val predictDF: DataFrame = userItemNoClick.map(x => (tempClass(x.split(":")(0), x.split(":")(1).toInt, x.split(":")(2).toInt))).toDF
    predictDF
  }

  case class tempClass(userId: String, uid: Int, iid: Int)

  private def transFromIdToInt(spark: SparkSession, data: DataFrame): DataFrame = {
    import spark.implicits._
    val dataWithUserId: DataFrame = data.groupBy("userId").count()
      .selectExpr("userId as UserId1", "count")
    val tempUidData: DataFrame = increaseId(dataWithUserId, spark, "uid")
    val dataWithItemId: DataFrame = data.groupBy("itemId").count()
      .selectExpr("itemId as itemId1", "count")
    val tempIidData: DataFrame = increaseId(dataWithItemId, spark, "iid")
    val allData: DataFrame = data.join(tempUidData, $"userId" === $"userId1")
      .join(tempIidData, $"itemId" === $"itemId1")
      .selectExpr("userId", "uid", "iid", "rating")
    allData
  }

  def increaseId(data: DataFrame, spark: SparkSession, addId: String): DataFrame = {
    val a: StructType = data.schema.add(StructField(addId, LongType))
    val b: RDD[(Row, Long)] = data.rdd.zipWithIndex()
    val c: RDD[Row] = b.map(tp => Row.merge(tp._1, Row(tp._2)))
    val newDF: DataFrame = spark.createDataFrame(c, a)
    newDF
  }

  private def processData(spark: SparkSession): DataFrame = {
    /*1.导入数据并加工*/
    // 导入数据
    val userProfile: DataFrame = spark.sql("select * from user_profile")
    val userListen: DataFrame = spark.sql("select userId, musicId, cast(remainTime as double), cast(durationHour as double) from user_listen")

    // 计算用户听某一首歌曲的总时间
    val itemTotalTime: DataFrame = userListen.selectExpr("userId", "musicId", "remainTime")
      .groupBy("userId", "musicId")
      .sum("remainTime")
      .withColumnRenamed("sum(remainTime)", "itemTotalTime")
    // 计算用户听歌的总时长
    val totalTime: DataFrame = userListen.selectExpr("userId", "musicId", "remainTime")
      .groupBy("userId")
      .sum("remainTime")
      .withColumnRenamed("sum(remainTime)", "totalTime")
    // uid-iid-rating
    val data: DataFrame = itemTotalTime.join(totalTime, "userId")
      .selectExpr("userId", "musicId as itemId", "itemTotalTime/totalTime as rating")
    data.write.mode("overwrite").saveAsTable("rating")
    data
  }
}
