package com.test

import org.apache.hadoop.hive.common.AcidMetaDataFile.DataFormat
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import org.apache.spark.ml.recommendation.{ALS, ALSModel}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DoubleType, IntegerType, StructType}
import org.apache.spark.sql.functions._
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.mllib.evaluation.RankingMetrics
import org.apache.spark.sql.functions.{col, expr}

import java.util.Properties
import org.apache.spark.sql.functions.current_timestamp



object AlsFinish {
  // 创建 SparkSession
  val spark = SparkSession.builder().appName("als-demo").master("local[*]").getOrCreate()

  // 修改mapUserIdAndGoodsRatings方法
  // 获取行为数据
  def mapUserIdAndGoodsRatingsPlus(): DataFrame = {

    // 从 MySQL 数据库读取数据
    val props = new Properties()
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
//    val jdbcUrl = "jdbc:mysql://8.130.72.129:3306/hf"
    val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"
    val query = "(SELECT user_id as userId, good_id as goodId, CAST(action AS DOUBLE) as rating FROM sys_user_good_action) AS tmp"

    val rating = spark.read.jdbc(jdbcUrl, query, props)


    //    val rddData: RDD[((Int, Int), Double)] = rating.rdd.map {
    //      case Row(userId: Long, goodId: Long, rating: Double) =>
    //        ((userId.toInt, goodId.toInt), rating)
    //    }.reduceByKey(_+_)

    val rddData: RDD[((Int, Int), Double)] = rating.rdd.map {
      case Row(userId: Long, goodId: Long, rating: Double) =>
        ((userId.toInt, goodId.toInt), rating)
    }.reduceByKey((a, b) => if (a > b) a else b)
    val aggregatedRatingsRow = rddData.map {
      case ((userId, itemId), ratingSum) =>
        Row(userId, itemId, ratingSum)
    }
    // 定义数据结构
    val schema = new StructType()
      .add("userId", IntegerType)
      .add("itemId", IntegerType)
      .add("rating", DoubleType)
    // 将RDD转换为DataFrame
    val dataFrame1 = spark.createDataFrame(aggregatedRatingsRow, schema)
    dataFrame1
  }


  // 计算模型性能指标
  def evaluateModel(predictions: DataFrame): (Double, Double) = {
    // 创建回归评估器
    val evaluator = new RegressionEvaluator()
      .setLabelCol("rating") // 设置真实评分列
      .setPredictionCol("prediction") // 设置预测评分列

    // 计算 RMSE
    val rmse = evaluator.setMetricName("rmse").evaluate(predictions)

    // 计算 MAE
    val mae = evaluator.setMetricName("mae").evaluate(predictions)

    (rmse, mae) // 返回 RMSE 和 MAE
  }


  // 保存推荐数据
  def dfSaveToMysqlNew(dataFrame: DataFrame): Unit = {
    val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"
//    val jdbcUrl = "jdbc:mysql://8.130.72.129:3306/hf"
    val table = "individuation_good"
//    val tempTable = "temp_individuation_good" // 临时表名
    val username = "root"
    val userpwd = "123456"

    // 添加当前时间列
    val dataFrameWithTime = dataFrame.withColumn("create_time", current_timestamp())


    println("*******************")
    println("*****数据开始保存****")
    println("*******************")


    // 写入DataFrame到临时表
    dataFrameWithTime.write
      .format("jdbc")
      .option("url", jdbcUrl)
      .option("dbtable", table)
      .option("user", username)
      .option("password", userpwd)
      .mode(SaveMode.Overwrite) // 使用Overwrite模式，覆盖旧数据
      .save()

    // 使用SQL语句将临时表中的数据覆盖到目标表中，并添加当前时间
//    val spark = SparkSession.builder().getOrCreate()
//    spark.sql(s"TRUNCATE TABLE $table") // 清空目标表
//    spark.sql(s"INSERT INTO $table SELECT * FROM $tempTable") // 将临时表数据插入目标表，并添加当前时间
    println("*******************")
    println("*****数据保存完成****")
    println("*******************")

  }


  def train(epochs: Int, rank: Int, regParam: Double, trainingData: DataFrame, testData: DataFrame): ALSModel  = {
    val als = new ALS()
      .setMaxIter(epochs) // 迭代次数，用于最小二乘交替迭代的次数
      .setRank(rank) // 隐向量的维度
      .setRegParam(regParam) // 惩罚系数
      .setUserCol("userId") // user_id
      .setItemCol("itemId") // item_id
      .setRatingCol("rating") // 评分列

    // 训练模型
    val model = als.fit(trainingData)

    // 在测试集上评估模型

    val predictions = model.transform(testData)
      .withColumn("prediction", col("prediction").cast("double"))
      .na.drop(Seq("prediction"))

    // 使用 evaluateModel 函数评估模型性能
    val (rmse, mae) = evaluateModel(predictions)

    val perUserActual = predictions.where("rating>=1.5")
      .groupBy("userId").agg(expr("collect_set(itemId) as goods"))
    perUserActual.cache()
    perUserActual.show()
    println("------perUserActual------")

    val perUserPredictions = predictions.orderBy(col("userId"),col("Prediction").desc)
      .groupBy("userId").agg(expr("collect_list(ItemId) as goods2"))
    perUserPredictions.cache()



    perUserPredictions.show()
    println("------perUserPredictions------")

    import spark.implicits._
    val perUserActualvPred = perUserActual.join(perUserPredictions,Seq("userId"))

    perUserActualvPred.cache()
    perUserActualvPred.show()
    println("------perUserActualvPred------")

    //    val perUserActualvPred2 = perUserActualvPred
    //      .map(row => (
    //        row(1).asInstanceOf[Seq[Integer]].toArray,
    //        row(2).asInstanceOf[Seq[Integer]].toArray.take(15)
    //      ))
    val perUserActualvPred2 = perUserActualvPred
      .map(row => {
        val goodsArray = row.getAs[Seq[Integer]]("goods").toArray
        val goods2Array = row.getAs[Seq[Integer]]("goods2").toArray
        val goods2Take15 = if (goods2Array.length >= 50) goods2Array.take(50) else goods2Array
        (goodsArray, goods2Take15)
      })

    perUserActualvPred2.cache()
    perUserActualvPred2.show()

    // 计算准确率
    val userPrecision = perUserActualvPred2.map { case (actual, predicted) =>
      // 交集计算实际与预测的相同元素的个数
      val intersection = predicted.take(10).intersect(actual).length
      // 判断实际列表是不是为空，非空就进行计算
      val precision = if (actual.nonEmpty) intersection.toDouble / actual.length else 0.0
      precision
    }

    // 计算平均准确率
    val AverageAccuracyByAction = userPrecision.agg(avg("value")).first().getDouble(0)
    userPrecision.show()
    println("----userPrecision-----")


    val ranks = new RankingMetrics(perUserActualvPred2.rdd)

    val AverageAccuracy = ranks.meanAveragePrecision
    val TopAccuracy = ranks.precisionAt(10)
    // 7-4 查看Top准确率
    println("平均准确率: " + AverageAccuracy)
    println("Top准确率: " + TopAccuracy)
    println("自定义准确率: "+ AverageAccuracyByAction)
    println(s"轮次 = $epochs,维度 = $rank,正则系数 = $regParam")
    println(s"测试数据上的均方根误差（RMSE）= $rmse")
    println(s"测试数据上的平均绝对误差（MAE）= $mae")

    model
  }




  def main(args: Array[String]): Unit = {
    // 设置日志级别
    Logger.getLogger("org").setLevel(Level.ERROR)

    val rating = mapUserIdAndGoodsRatingsPlus()

    // 划分数据集为训练集和测试集（90%训练，10%测试）
    val Array(trainingData, testData) = rating.randomSplit(Array(0.8, 0.2))

    val model = train(10,9,0.1,trainingData,testData)

    // 打印用户向量和物品向量
    model.userFactors.show(truncate = false)
    println("---------用户特征-----------")
    model.itemFactors.show(truncate = false)
    println("---------商品特征-----------")

    println("-------开始推荐商品---------")
    // 给所有用户推荐2个物品
//    model.recommendForAllUsers(3).show(truncate = false)

    // 给所有用户推荐20个物品
    val recommendations = model.recommendForAllUsers(100)

//    recommendations.show(truncate = false)

    import org.apache.spark.sql.Row

    // 将推荐结果调整为 UserId, GoodId, Rating 的格式，并使用 flatMap
    val flatMapRDD = recommendations.rdd
    // [1000,ArraySeq([10040,3.9266365], [10011,3.8586197], [10144,3.8149981])]
    // [1004,ArraySeq([10465,4.2893], [10364,4.2569294], [10549,4.2140193])]
    // [1005,ArraySeq([10035,6.027819], [10370,5.1725416], [10604,5.101832])]
    // [1002,ArraySeq([10064,6.99444], [10072,6.9152246], [10591,6.8077297])]
    // [1001,ArraySeq([10221,5.0368185], [10494,4.732238], [10011,4.5636663])]
    // [1009,ArraySeq([10035,4.9172864], [10221,4.6656775], [10602,4.602901])]
    // [1008,ArraySeq([10035,5.8814607], [10370,4.939296], [10604,4.856574])]
//    flatMapRDD.foreach(print)


      val flatMapRDD2 = flatMapRDD
      .flatMap { row =>
      val userId = row.getInt(0)
      val recommendations = row.getAs[Seq[Row]]("recommendations").toList

        recommendations.map { recommendation =>
        val goodId = recommendation.getInt(0)
        val rating = recommendation.getFloat(1)
        (userId, goodId, rating)
      }
    }
    // (1000,10664,3.6802955)(1005,10943,6.1811967)(1000,10257,3.637286)
    // (1000,10072,3.62932)(1004,10767,4.590105)(1005,10494,5.8273087)
    // (1004,10268,4.521421)(1005,10455,5.7052608)(1004,10417,4.4929657)
//    flatMapRDD2.foreach(print)
    println()

    // 将 RDD 转换为 DataFrame，并命名列名
    import spark.implicits._
    val flatMapDF: DataFrame = flatMapRDD2.toDF("user_id", "good_id", "rating")
    // 显示调整后的 DataFrame
//    flatMapDF.show(truncate = false)


    dfSaveToMysqlNew(flatMapDF)
    // 保存数据
    // 删除or清空individuation表
    // 创建表
    // 写入数据


    // 停止 SparkSession
    spark.stop()
  }
}
