package com.train

import com.train.AlsForGoods.{saveDataToMysql, spark}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.recommendation.ALS
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{col, current_timestamp}
import org.apache.spark.sql.types.{DoubleType, IntegerType, StructType}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}

import java.util.{Timer, TimerTask}
import java.util.Properties

object AlsForTrend {

  // 创建 SparkSession
  val spark = SparkSession.builder()
    .appName("als-demo")
    .master("local[*]")
    .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    .getOrCreate()

  // 从 MySQL 数据库读取数据
  val props = new Properties()
  props.setProperty("user", "root")
  props.setProperty("password", "123456")
  val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"

  // 获取用户浏览博客数据
  def getBrowseDataframe(): RDD[(Int, Int, Double)] = {
    val query = "(SELECT user_id as userId, trend_id as trendId FROM action_user_trend_browse) AS trend_browse_tmp"
    val userId_trendId: DataFrame = spark.read.jdbc(jdbcUrl, query, props)
    val like_rating = 1.0
    val rating: RDD[(Int, Int, Double)] = userId_trendId.rdd.map {
      case Row(userId: Long, trendId: Long) => (userId.toInt, trendId.toInt, like_rating)
    }

    rating
  }
  // 获取用户喜欢博客数据
  def getLikeDataframe():RDD[(Int, Int, Double)] = {

    val query = "(SELECT user_id as userId, trend_id as trendId FROM action_user_trend_like) AS trend_like_tmp"
    val userId_trendId: DataFrame = spark.read.jdbc(jdbcUrl, query, props)
    val like_rating = 2.0
    val rating: RDD[(Int, Int, Double)] = userId_trendId.rdd.map {
      case Row(userId: Long, trendId: Long) => (userId.toInt, trendId.toInt, like_rating)
    }
    rating
  }
  // 获取用户收藏博客数据
  def getSaveDataframe(): RDD[(Int, Int, Double)] = {

    val query = "(SELECT user_id as userId, trend_id as trendId FROM action_user_trend_save) AS trend_save_tmp"
    val userId_trendId: DataFrame = spark.read.jdbc(jdbcUrl, query, props)
    val like_rating = 3.0
    val rating: RDD[(Int, Int, Double)] = userId_trendId.rdd.map {
      case Row(userId: Long, trendId: Long) => (userId.toInt, trendId.toInt, like_rating)
    }
    rating
  }

  // 定义一个方法来合并所有行为的 RDD
  def mergeBehaviorDataframes(): RDD[(Int, Int, Double)] = {
    // 获取所有行为的 RDD
    val browseDataframe = getBrowseDataframe()
    val likeDataframe = getLikeDataframe()
    val saveDataframe = getSaveDataframe()


    // 使用 union 方法将这些 RDD 合并为一个 RDD
    val mergedRDD = browseDataframe.union(likeDataframe).union(saveDataframe)

    // 将 RDD 转换为以 ((userId, goodId), rating) 为键值对的格式
    val keyValueRDD = mergedRDD.map{ case(userId,goodId,rating) =>((userId,goodId),rating)}

    // 使用 reduceByKey 保留每个键的最大 rating
    val maxRatingRDD  = keyValueRDD.reduceByKey(Math.max)

    // 打印合并后的数据前10行
    //    maxRatingRDD .take(10).foreach(println)

    val finalRDD = maxRatingRDD.map{ case((userId,goodId),rating) =>(userId,goodId,rating)}

    // 返回最后的 RDD
    finalRDD
  }

  def changeRDDToDataFrame(actionDataRDD:RDD[(Int, Int, Double)]):DataFrame ={

    // 定义数据结构
    val schema = new StructType()
      .add("userId", IntegerType)
      .add("itemId", IntegerType)
      .add("rating", DoubleType)

    val actionDataRow = actionDataRDD.map { case (userId, goodId, rating) => Row(userId, goodId, rating) }

    // 将RDD转换为DataFrame
    val dataFrame1 = spark.createDataFrame(actionDataRow, schema)

    dataFrame1

  }

  // 计算模型性能指标
  def evaluateModel(predictions: DataFrame): (Double, Double) = {
    // 创建回归评估器
    val evaluator = new RegressionEvaluator()
      .setLabelCol("rating") // 设置真实评分列
      .setPredictionCol("prediction") // 设置预测评分列

    // 计算 RMSE
    val rmse = evaluator.setMetricName("rmse").evaluate(predictions)

    // 计算 MAE
    val mae = evaluator.setMetricName("mae").evaluate(predictions)

    (rmse, mae) // 返回 RMSE 和 MAE
  }

  def train(epochs: Int, rank: Int, regParam: Double, trainingData: DataFrame, testData: DataFrame) ={

    val als = new ALS()
      .setMaxIter(epochs) // 迭代次数，用于最小二乘交替迭代的次数
      .setRank(rank) // 隐向量的维度
      .setRegParam(regParam) // 惩罚系数
      .setUserCol("userId") // user_id
      .setItemCol("itemId") // item_id
      .setRatingCol("rating") // 评分列

    // 训练模型
    val model = als.fit(trainingData)

    // 在测试集上评估模型
    val predictions = model.transform(testData)
      .withColumn("prediction", col("prediction").cast("double"))
      .na.drop(Seq("prediction"))

    // 使用 evaluateModel 函数评估模型性能
    val (rmse, mae) = evaluateModel(predictions)

    // 展示预测结果
    predictions.show()
    println("--predictions---")

    println(s"轮次 = $epochs,维度 = $rank,正则系数 = $regParam")
    println(s"测试数据上的均方根误差（RMSE）= $rmse")
    println(s"测试数据上的平均绝对误差（MAE）= $mae")

  }

  def apply(epochs: Int, rank: Int, regParam: Double, trainingData: DataFrame) = {

    val als = new ALS()
      .setImplicitPrefs(true) // 使用隐式反馈数据
      .setMaxIter(epochs) // 迭代次数，用于最小二乘交替迭代的次数
      .setRank(rank) // 隐向量的维度
      .setRegParam(regParam) // 惩罚系数
      .setUserCol("userId") // user_id
      .setItemCol("itemId") // item_id
      .setRatingCol("rating") // 评分列

    // 训练模型
    val model = als.fit(trainingData)

    // 给所有用户推荐100个物品
    val recommendations = model.recommendForAllUsers(200)
    saveDataToMysql(recommendations)

  }

  def saveDataToMysql(recommendations: DataFrame) = {

    // 将推荐结果调整为 UserId, GoodId, Rating 的格式，并使用 flatMap
    val flatMapRDD = recommendations.rdd
    // [1000,ArraySeq([10040,3.9266365], [10011,3.8586197], [10144,3.8149981])]
    // [1004,ArraySeq([10465,4.2893], [10364,4.2569294], [10549,4.2140193])]

    val flatMapRDD2 = flatMapRDD
      .flatMap { row =>
        val userId = row.getInt(0)
        val recommendations = row.getAs[Seq[Row]]("recommendations").toList

        recommendations.map { recommendation =>
          val goodId = recommendation.getInt(0)
          val rating = recommendation.getFloat(1)
          (userId, goodId, rating)
        }
      }
    // (1000,10664,3.6802955)(1005,10943,6.1811967)(1000,10257,3.637286)
    // (1000,10072,3.62932)(1004,10767,4.590105)(1005,10494,5.8273087)

    import spark.implicits._
    val flatMapDF: DataFrame = flatMapRDD2.toDF("user_id", "trend_id", "rating")

    val jdbcUrl = "jdbc:mysql://localhost:3306/hf-test"
    val table = "individuation_trend"
    val username = "root"
    val userpwd = "123456"

    // 添加当前时间列
    val dataFrameWithTime = flatMapDF.withColumn("create_time", current_timestamp())

    // 写入DataFrame到临时表
    dataFrameWithTime.write
      .format("jdbc")
      .option("url", jdbcUrl)
      .option("dbtable", table)
      .option("user", username)
      .option("password", userpwd)
      .mode(SaveMode.Overwrite) // 使用Overwrite模式，覆盖旧数据
      .save()

  }

  def begin() = {
    // 从不同表中获取用户行为数据，转化为RDD
    val actionDataRDD = mergeBehaviorDataframes()

    // 将用户行为RDD转化为DataFrame
    val actionDataframe = changeRDDToDataFrame(actionDataRDD)

    // 划分数据集为训练集和测试集（90%训练，10%测试）
//    val Array(trainingData, testData) = actionDataframe.randomSplit(Array(0.8, 0.2))

    // 训练
//    train(10, 10, 0.1, trainingData, testData)
    apply(10, 10, 0.1, actionDataframe)
  }

  def main(args: Array[String]): Unit = {
    // 设置日志级别
    Logger.getLogger("org").setLevel(Level.ERROR)

    println("开始运行")

    // 创建定时器对象
    val timer = new Timer()

    // 定义定时任务
    val task = new TimerTask {
      def run(): Unit = {
        // 在这里执行你想要定时执行的任务
        println("Executing task...")

        // 调用你的需要执行的函数
        begin()

        // 这里是示例代码，可以替换为你的逻辑

        println("Task completed.")
      }
    }

    // 每隔24小时执行一次任务，初始延迟为0，间隔为24小时（24 * 60 * 60 * 1000 毫秒）
    timer.schedule(task, 0, 24 * 60 * 60 * 1000)


  }




}
