import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.recommendation.{ALS, ALSModel}
import org.apache.spark.sql.catalyst.dsl.expressions.StringToAttributeConversionHelper
import org.apache.spark.sql.functions.explode
import org.apache.spark.sql.{DataFrame, Row, SparkSession}


case class rating(userid: String, mealid: String, rating: Int)

object Index {

  // 添加隐式转换
  implicit class DataFrameHelpers(df: DataFrame) {
    def showWithIndex(): Unit = {
      df.rdd.zipWithIndex().map { case (row, index) => s"${index + 1}: $row" }.collect().foreach(println)
    }
  }

  def main(args: Array[String]): Unit = {
    // 创建SparkSession
    val spark = SparkSession.builder()
      .appName("MenuRecommendation")
      .master("local[1]")
      .getOrCreate()

    // 读取数据
    var data = spark.read.option("header", "true").option("inferSchema", "true")
      .json("MealRatings.json")


    data.createTempView("data");
    // 数据清洗
    spark.sql("select * from data where UserId is not null").show(100)

//    System.exit(0)

    // top 10
    spark.sql("select MealId, Rating from data order by Rating desc limit 10").show()

    spark.sql("select UserId, Rating from data order by Rating desc limit 10").show()


    data = spark.sql("select hash(UserId) userId, hash(MealId) mealId, Rating from data")


    data.show()

    // 将数据拆分为训练集和测试集
    val Array(training, test) = data.filter(x => x != null).randomSplit(Array(0.8, 0.2))

    // 创建ALS模型
    val als = new ALS()
      .setMaxIter(5)
      .setRegParam(0.3)
      .setUserCol("userId")
      .setItemCol("mealId")
      .setRatingCol("Rating")

    // 使用训练数据拟合ALS模型
    val model = als.fit(training)

    //    model.save("file:///D:\\java-project\\spark-menu/UCF")

    //    val loadedModel = ALSModel.load("file:///D:\\java-project\\spark-menu/UCF")
    //
    //    // 对测试数据进行预测
    val predictions = model.transform(test).na.drop() //去空值

    // 使用RegressionEvaluator来评估模型
    val evaluator = new RegressionEvaluator()
      .setMetricName("rmse")
      .setLabelCol("Rating")
      .setPredictionCol("prediction")

    val rmse = evaluator.evaluate(predictions)
    println(s"Root-mean-square error = $rmse")

    val userId = 1 // 你想要为哪个用户生成推荐
    val userRecs = model.recommendForAllUsers(3)

    userRecs.show()
    userRecs.schema
    // 将整数列转换为字符串格式
    val rdd = userRecs.select("userId", "recommendations").rdd.map {
      case Row(userId: Int, recommendations: Seq[Row]) =>
        s"$userId,${recommendations.map(row => s"${row.getInt(0)},${row.getFloat(1)}").mkString(";")}"
    }
    //
    //    rdd.foreach(x => println(x))

    val dishId = 101 // 你想要为哪道菜品生成推荐
    val dishRecs = model.recommendForAllItems(3)
    //      .filter(df => df.getInt(0) == dishId) // 过滤特定菜品的推荐结果
    //    dishRecs.show()
    dishRecs.foreach(println(_))

    dishRecs.createOrReplaceTempView("dish_recommendations")

    // 使用Spark SQL进行查询，将数组展开并选择需要的字段
    val flattenedDF = spark.sql(
      """
  SELECT mealId, recommendation.userId, recommendation.rating
  FROM dish_recommendations
  LATERAL VIEW explode(recommendations) exploded_table AS recommendation
""")
    flattenedDF.repartition(1).write.mode("overwrite").csv("E:\\test\\dish")

    flattenedDF.show()
    // 停止SparkSession
    spark.stop()
  }
}
