package com.train

import com.train.UserToUser.spark
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.recommendation.ALS
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{col, current_timestamp}
import org.apache.spark.sql.types.{DoubleType, IntegerType, StructType}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}

import java.util.Properties

object GoodToGood {

  // 创建 SparkSession
  val spark = SparkSession.builder()
    .appName("als-demo")
    .master("local[*]")
    .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    .getOrCreate()

  // 获取用户浏览商品数据
  def getBrowseDataframe(): RDD[(Int, Int, Double)] = {
    // 从 MySQL 数据库读取数据
    val props = new Properties()
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
    val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"
    val query = "(SELECT user_id as userId, good_id as goodId FROM action_user_good_browse) AS browse_tmp"
    val userId_goodId: DataFrame = spark.read.jdbc(jdbcUrl, query, props)
    val like_rating = 1.0
    val rating: RDD[(Int, Int, Double)] = userId_goodId.rdd.map {
      case Row(userId: Long, goodId: Long) => (userId.toInt, goodId.toInt, like_rating)
    }
    //    rating.take(10).foreach(println)
    //    println("------浏览行为------")
    rating
  }

  // 获取用户喜欢商品数据
  def getLikeDataframe(): RDD[(Int, Int, Double)] = {
    // 从 MySQL 数据库读取数据
    val props = new Properties()
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
    val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"
    val query = "(SELECT user_id as userId, good_id as goodId FROM action_user_good_like) AS like_tmp"
    val userId_goodId: DataFrame = spark.read.jdbc(jdbcUrl, query, props)
    val like_rating = 2.0
    val rating: RDD[(Int, Int, Double)] = userId_goodId.rdd.map {
      case Row(userId: Long, goodId: Long) => (userId.toInt, goodId.toInt, like_rating)
    }
    //    rating.take(10).foreach(println)
    //    println("------喜欢行为------")
    rating
  }

  // 获取用户收藏商品数据
  def getSaveDataframe(): RDD[(Int, Int, Double)] = {
    // 从 MySQL 数据库读取数据
    val props = new Properties()
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
    val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"
    val query = "(SELECT user_id as userId, good_id as goodId FROM action_user_good_save) AS save_tmp"
    val userId_goodId: DataFrame = spark.read.jdbc(jdbcUrl, query, props)
    val like_rating = 3.0
    val rating: RDD[(Int, Int, Double)] = userId_goodId.rdd.map {
      case Row(userId: Long, goodId: Long) => (userId.toInt, goodId.toInt, like_rating)
    }
    //    rating.take(10).foreach(println)
    //    println("------收藏行为------")
    rating
  }

  // 获取用户购物车数据
  def getCartDataframe(): RDD[(Int, Int, Double)] = {
    // 从 MySQL 数据库读取数据
    val props = new Properties()
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
    val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"
    val query = "(SELECT user_id as userId, product_id as goodId FROM shopping_cart) AS cart_tmp"
    val userId_goodId: DataFrame = spark.read.jdbc(jdbcUrl, query, props)
    val like_rating = 4.0
    val rating: RDD[(Int, Int, Double)] = userId_goodId.rdd.map {
      case Row(userId: Long, goodId: Long) => (userId.toInt, goodId.toInt, like_rating)
    }
    //    rating.take(10).foreach(println)
    //    println("------加入购物车行为------")
    rating
  }

  // 获取用户订单数据
  def getOrderDataframe(): RDD[(Int, Int, Double)] = {
    // 从 MySQL 数据库读取数据
    val props = new Properties()
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
    val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"
    val query = "(SELECT user_id as userId, product_id as goodId FROM orders) AS order_tmp"
    val userId_goodId: DataFrame = spark.read.jdbc(jdbcUrl, query, props)
    val like_rating = 5.0
    val rating: RDD[(Int, Int, Double)] = userId_goodId.rdd.map {
      case Row(userId: Long, goodId: Long) => (userId.toInt, goodId.toInt, like_rating)
    }
    //    rating.take(10).foreach(println)
    //    println("------订单行为------")
    rating
  }

  // 定义一个方法来合并所有行为的 RDD
  def mergeBehaviorDataframes(): RDD[(Int, (Int, Double))] = {
    // 获取所有行为的 RDD
    val browseDataframe = getBrowseDataframe()
    val likeDataframe = getLikeDataframe()
    val saveDataframe = getSaveDataframe()
    val cartDataframe = getCartDataframe()
    val orderDataframe = getOrderDataframe()

    // 使用 union 方法将这些 RDD 合并为一个 RDD
    val mergedRDD = browseDataframe.union(likeDataframe).union(saveDataframe).union(cartDataframe).union(orderDataframe)
    //    val mergedRDD = likeDataframe.union(saveDataframe).union(cartDataframe).union(orderDataframe)

    // 将 RDD 转换为以 ((userId, goodId), rating) 为键值对的格式
    val keyValueRDD = mergedRDD.map { case (userId, goodId, rating) => ((userId, goodId), rating) }

    // 使用 reduceByKey 保留每个键的最大 rating
    val maxRatingRDD = keyValueRDD.reduceByKey(Math.max)

    // 打印合并后的数据前10行
    //    maxRatingRDD .take(10).foreach(println)

    val finalRDD = maxRatingRDD.map { case ((userId, goodId), rating) => (userId, (goodId, rating)) }

    // 返回最后的 RDD
    finalRDD
  }
  // 过滤重复数据
  def filterDuplicateGoodsPairData(userIdAndPairOfMovies: (Int, ((Int, Double), (Int, Double)))): Boolean = {
    val movieId1: Int = userIdAndPairOfMovies._2._1._1
    val movieId2: Int = userIdAndPairOfMovies._2._2._1
    movieId1 < movieId2
  }

  def mapGoodsPairsWithRatings(userIdAndGoodsData: (Int, ((Int, Double), (Int, Double)))):
  ((Int, Int), (Double, Double)) = {
    val goodsid1 = userIdAndGoodsData._2._1._1
    val goodsid2 = userIdAndGoodsData._2._2._1

    val rating1 = userIdAndGoodsData._2._1._2
    val rating2 = userIdAndGoodsData._2._2._2

    ((goodsid1, goodsid2), (rating1, rating2))
  }

  /**
   * 计算商品之间的余弦相似度
   *
   * */
  def computeCosineSimilarity(ratingPairs: Iterable[(Double, Double)]): (Double, Int) = {
    var numOfPairs: Int = 0
    var sumXX: Double = 0.0
    var sumYY: Double = 0.0
    var sumXY: Double = 0.0

    for (ratingPair: (Double, Double) <- ratingPairs) {
      val ratingX: Double = ratingPair._1
      val ratingY: Double = ratingPair._2

      sumXX += ratingX * ratingX
      sumYY += ratingY * ratingY
      sumXY += ratingX * ratingY
      numOfPairs += 1
    }

    val numerator: Double = sumXY
    val denominator: Double = Math.sqrt(sumXX) * Math.sqrt(sumYY)
    val result: Double = numerator / denominator
    (result, numOfPairs)
  }


  def suggestTopGoods(sorceValue:Double,pairNumValue:Double,firstNumValue:Int,GoodsAndSimilarityScore:RDD[((Int,Int),(Double,Int))]): Unit = {

    // 设置余弦相似度的阈值
    val sorceThreshold: Double = sorceValue

    // 设置至少要大于等于多少个用户对该商品组合进行评分
    val pairNumThreshold: Double = pairNumValue

    // 排除相似度小于阈值，排除评分数量小于阈值的数据
    val goodPairsFilteredByThreshold: RDD[((Int,Int),(Double,Int))] = GoodsAndSimilarityScore
      .filter((goodPairAndScore: ((Int,Int),(Double,Int))) => {
      val raingAndNumOfPairs:(Double,Int) = goodPairAndScore._2

        raingAndNumOfPairs._1 >sorceThreshold && raingAndNumOfPairs._2 >pairNumThreshold
    })

    // 计算经过阈值过滤后的数据总条数
    val filteredCount: Long = goodPairsFilteredByThreshold.count()
    println(s"相似度阈值：$sorceThreshold ,评分组合数量阈值：$pairNumThreshold ,相似列表数量： $firstNumValue")
    println(s"经过阈值过滤后的数据总条数: $filteredCount")


    // 获取每个商品相似度前firstNumValue的数据保存
//    val firstGoodsAndTheirScores: Array[((Int,Int),(Double,Int))] = goodPairsFilteredByThreshold.take(firstNumValue)

    // 转换数据形式
    val finalRDD1: RDD[(Int, Int, Double)] = goodPairsFilteredByThreshold.map {
      case ((goodId1, goodId2), (rating, numOfPairs)) => (goodId1, goodId2, rating)
    }
    // 将goodId2和goodId1调换位置，并保存到数据库中，减少后端的处理过程
    val finalRDD2: RDD[(Int, Int, Double)] = goodPairsFilteredByThreshold.map {
      case ((goodId1, goodId2), (rating, numOfPairs)) => (goodId2, goodId1, rating)
    }

    val finalRDD = finalRDD1.union(finalRDD2)


    import spark.implicits._
    val flatMapDF: DataFrame = finalRDD.toDF("good_id1", "good_id2", "similarity")

    // 生成新的DataFrame数据在添加到数据库

    saveDFToMysql(flatMapDF)
  }

  def saveDFToMysql(flatMapDF: DataFrame) = {
    val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"
    val table = "good_to_good"
    val username = "root"
    val userpwd = "123456"

    // 添加当前时间列
    val dataFrameWithTime = flatMapDF.withColumn("create_time", current_timestamp())

    // 写入DataFrame到临时表
    dataFrameWithTime.write
      .format("jdbc")
      .option("url", jdbcUrl)
      .option("dbtable", table)
      .option("user", username)
      .option("password", userpwd)
      .mode(SaveMode.Overwrite) // 使用Overwrite模式，覆盖旧数据
      .save()

  }

  def main(args: Array[String]): Unit = {

    // 获取数据
    val user_good_rating_RDD = mergeBehaviorDataframes()

    // 对于某一用户，找到该用户对应的商品行为评分，并两两组合
    val pairOfGoodsWatchBySameUserId: RDD[(Int,((Int,Double),(Int,Double)))] = user_good_rating_RDD.join(user_good_rating_RDD)

    // 对于上面的两两组合会有重复的数据，需要去除重复数据
    val pairOfGoodWithoutDuplicates: RDD[(Int,((Int,Double),(Int,Double)))] = pairOfGoodsWatchBySameUserId
      .filter(filterDuplicateGoodsPairData)

    // 将前面的RDD转换为只有goodId和rating，如((goodId1,goodId2),(rating1,rating2))
    val GoodPairAndRating: RDD[((Int,Int),(Double,Double))] = pairOfGoodWithoutDuplicates.map(mapGoodsPairsWithRatings)

    // 对上述的RDD按照key进行分组，这样同组的商品对应的评分就构成了商品的评分矩阵（2*n）
    val groupOfRatingPairsForSameGoodsPair: RDD[((Int,Int),Iterable[(Double,Double)])] = GoodPairAndRating.groupByKey()

    // 根据上面的评分矩阵，计算两两商品组合的评分矩阵
    // mapValues算子仅作用在value上，不会改变key的值，依然返回（key,value)的形式
    val goodPairAndSimilarityScore: RDD[((Int,Int),(Double,Int))] = groupOfRatingPairsForSameGoodsPair.mapValues(computeCosineSimilarity)

    // 相似度阈值
    val scoreValue = 0.8
    // 评分组数量阈值
    val pairNumValue = 10.0
    // 相似度列表数量
    val firstNumValue = 20

    // 获取相似度列表
    suggestTopGoods(scoreValue,pairNumValue,firstNumValue,goodPairAndSimilarityScore)



  }

}
