package org.niit.service

import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.recommendation.{ALS, ALSModel}
import org.apache.spark.sql.{DataFrame, SparkSession, functions, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.expressions.Window
import org.niit.dao.RecommendDao
import org.niit.util.ConfigUtil

/**
 * 推荐系统服务，使用Spark MLlib的ALS算法构建推荐模型
 */
object RecommendService {

  /**
   * 加载订单数据作为评分数据
   *
   * @param spark SparkSession
   * @param dataPath 数据文件路径
   * @return 评分DataFrame (用户ID, 菜品名称, 评分)
   */
  def loadRatingData(spark: SparkSession, dataPath: String): DataFrame = {
    // 读取CSV格式的订单数据，第一行是标题
    val orderDF = spark.read
      .option("header", "true")
      .option("inferSchema", "true")
      .csv(dataPath)
    
    import spark.implicits._
    
    // 将订单数据转换为评分数据
    val ratingDF = orderDF.select(
      col("用户ID").alias("user_id"),
      col("菜品名称").alias("dish_name"),
      col("评分").cast("double").alias("rating")
    )
    
    // 打印模式和示例数据
    ratingDF.printSchema()
    ratingDF.show(5)
    
    ratingDF
  }

  /**
   * 构建ALS推荐模型
   *
   * @param ratingDF 评分DataFrame
   * @return ALS模型
   */
  def buildALSModel(ratingDF: DataFrame): ALSModel = {
    // 将字符串ID转换为整数ID
    val indexedDF = ratingDF.withColumn("user_idx", functions.hash(col("user_id")) % 1000000)
                           .withColumn("dish_idx", functions.hash(col("dish_name")) % 1000000)
    
    // 创建ALS模型
    val als = new ALS()
      .setMaxIter(10)
      .setRegParam(0.1)
      .setUserCol("user_idx")
      .setItemCol("dish_idx")
      .setRatingCol("rating")
      .setColdStartStrategy("drop")
    
    // 训练模型
    val model = als.fit(indexedDF)
    
    model
  }

  /**
   * 热门品类推荐
   *
   * @param spark SparkSession
   * @param orderDF 订单DataFrame
   */
  def hotCategoryRecommendation(spark: SparkSession, orderDF: DataFrame): Unit = {
    import spark.implicits._
    
    // 清空旧数据
    RecommendDao.clearRecommendTable("recommend_hot_category")
    
    // 按菜品类别和菜品名称分组，统计订单数量和平均评分
    val hotCategoryDF = orderDF.groupBy(col("菜品类别"), col("菜品名称"))
      .agg(
        count("*").alias("order_count"),
        avg(col("评分")).alias("avg_rating")
      )
      .orderBy(desc("order_count"), desc("avg_rating"))
      .limit(50)  // 每个类别取前50
    
    // 计算推荐得分 (0-5分)
    val resultDF = hotCategoryDF.withColumn("score", 
      (col("order_count") / max("order_count").over() * 2.5) + (col("avg_rating") / 5 * 2.5)
    )
    
    // 保存到数据库
    resultDF.collect().foreach { row =>
      val category = row.getString(0)
      val dishName = row.getString(1)
      val orderCount = row.getLong(2).toInt
      val score = row.getDouble(4)
      RecommendDao.saveHotCategory(category, dishName, orderCount, score)
    }
    
    // 打印热门品类推荐结果表格
    val topResults = resultDF.orderBy(desc("score")).limit(10).collect()
    
    println("\n热门品类推荐结果:")
    println("+--------+-----------------+-------+------+-------+")
    println("|category|       dish_name |orders |rating| score |")
    println("+--------+-----------------+-------+------+-------+")
    
    topResults.foreach { row =>
      val category = row.getString(0)
      val dishName = row.getString(1)
      val orderCount = row.getLong(2)
      val avgRating = row.getDouble(3)
      val score = row.getDouble(4)
      
      // 格式化输出
      printf("| %-6s | %-15s | %5d | %4.1f | %5.2f |\n", 
             category, dishName, orderCount, avgRating, score)
    }
    
    println("+--------+-----------------+-------+------+-------+")
  }

  /**
   * 热门商铺推荐
   *
   * @param spark SparkSession
   * @param orderDF 订单DataFrame
   */
  def hotMerchantRecommendation(spark: SparkSession, orderDF: DataFrame): Unit = {
    import spark.implicits._
    
    // 清空旧数据
    RecommendDao.clearRecommendTable("recommend_hot_merchant")
    
    // 按商铺名称分组统计订单数量和平均评分
    val hotMerchantDF = orderDF.groupBy(col("商铺名称"))
      .agg(
        count("*").alias("order_count"),
        avg(col("评分")).alias("avg_rating")
      )
      .orderBy(desc("order_count"), desc("avg_rating"))
      .limit(30)
    
    // 计算推荐得分 (0-5分)
    val resultDF = hotMerchantDF.withColumn("score", 
      (col("order_count") / max("order_count").over() * 2.5) + (col("avg_rating") / 5 * 2.5)
    )
    
    // 保存到数据库
    resultDF.collect().foreach { row =>
      val merchantName = row.getString(0)
      val orderCount = row.getLong(1).toInt
      val score = row.getDouble(3)
      RecommendDao.saveHotMerchant(merchantName, orderCount, score)
    }
    
    // 打印热门商铺推荐结果表格
    val topResults = resultDF.orderBy(desc("score")).limit(10).collect()
    
    println("\n热门商铺推荐结果:")
    println("+------------------+-------+------+-------+")
    println("|    merchant_name |orders |rating| score |")
    println("+------------------+-------+------+-------+")
    
    topResults.foreach { row =>
      val merchantName = row.getString(0)
      val orderCount = row.getLong(1)
      val avgRating = row.getDouble(2)
      val score = row.getDouble(3)
      
      // 格式化输出
      printf("| %-16s | %5d | %4.1f | %5.2f |\n", 
             merchantName, orderCount, avgRating, score)
    }
    
    println("+------------------+-------+------+-------+")
  }

  /**
   * 热门平台推荐
   *
   * @param spark SparkSession
   * @param orderDF 订单DataFrame
   */
  def hotPlatformRecommendation(spark: SparkSession, orderDF: DataFrame): Unit = {
    import spark.implicits._
    
    // 清空旧数据
    RecommendDao.clearRecommendTable("recommend_hot_platform")
    
    // 按平台分组统计订单数量和平均评分
    val hotPlatformDF = orderDF.groupBy(col("平台"))
      .agg(
        count("*").alias("order_count"),
        avg(col("评分")).alias("avg_rating")
      )
      .orderBy(desc("order_count"), desc("avg_rating"))
    
    // 计算推荐得分 (0-5分)
    val resultDF = hotPlatformDF.withColumn("score", 
      (col("order_count") / max("order_count").over() * 2.5) + (col("avg_rating") / 5 * 2.5)
    )
    
    // 保存到数据库
    resultDF.collect().foreach { row =>
      val platform = row.getString(0)
      val orderCount = row.getLong(1).toInt
      val score = row.getDouble(3)
      RecommendDao.saveHotPlatform(platform, orderCount, score)
    }
    
    // 打印热门平台推荐结果表格
    val topResults = resultDF.orderBy(desc("score")).collect()
    
    println("\n热门平台推荐结果:")
    println("+------------+-------+------+-------+")
    println("| platform   |orders |rating| score |")
    println("+------------+-------+------+-------+")
    
    topResults.foreach { row =>
      val platform = row.getString(0)
      val orderCount = row.getLong(1)
      val avgRating = row.getDouble(2)
      val score = row.getDouble(3)
      
      // 格式化输出
      printf("| %-10s | %5d | %4.1f | %5.2f |\n", 
             platform, orderCount, avgRating, score)
    }
    
    println("+------------+-------+------+-------+")
  }

  /**
   * 基于时段的推荐
   *
   * @param spark SparkSession
   * @param orderDF 订单DataFrame
   */
  def timeBasedRecommendation(spark: SparkSession, orderDF: DataFrame): Unit = {
    import spark.implicits._
    
    // 清空旧数据
    RecommendDao.clearRecommendTable("recommend_time_based")
    
    // 按时段、菜品名称和类别分组统计订单数量和平均评分
    val timeBasedDF = orderDF.groupBy(col("时间段"), col("菜品名称"), col("菜品类别"))
      .agg(
        count("*").alias("order_count"),
        avg(col("评分")).alias("avg_rating")
      )
    
    // 为每个时段获取热门菜品
    val windowSpec = Window.partitionBy(col("时间段")).orderBy(desc("order_count"), desc("avg_rating"))
    val topTimeBasedDF = timeBasedDF.withColumn("rank", rank().over(windowSpec))
      .filter(col("rank") <= 10)  // 每个时段取前10
      .drop("rank")
    
    // 计算推荐得分 (0-5分)
    val resultDF = topTimeBasedDF.withColumn("score", 
      (col("order_count") / max("order_count").over(windowSpec) * 2.5) + (col("avg_rating") / 5 * 2.5)
    )
    
    // 保存到数据库
    resultDF.collect().foreach { row =>
      val timePeriod = row.getString(0)
      val dishName = row.getString(1)
      val category = row.getString(2)
      val score = row.getDouble(5)
      RecommendDao.saveTimeBasedRecommend(timePeriod, dishName, category, score)
    }
    
    // 获取所有不同的时间段
    val timePeriods = resultDF.select("时间段").distinct().collect().map(_.getString(0)).sorted
    
    // 按时段打印推荐结果
    timePeriods.foreach { timePeriod =>
      println(s"\n时段 '$timePeriod' 的推荐菜品:")
      println("+------------+-----------------+--------+-------+------+-------+")
      println("| time_period|      dish_name  |category|orders |rating| score |")
      println("+------------+-----------------+--------+-------+------+-------+")
      
      val periodResults = resultDF.filter(col("时间段") === timePeriod)
                            .orderBy(desc("score"))
                            .limit(5)
                            .collect()
      
      periodResults.foreach { row =>
        val tp = row.getString(0)
        val dishName = row.getString(1)
        val category = row.getString(2)
        val orderCount = row.getLong(3)
        val avgRating = row.getDouble(4)
        val score = row.getDouble(5)
        
        // 格式化输出
        printf("| %-10s | %-15s | %-6s | %5d | %4.1f | %5.2f |\n", 
               tp, dishName, category, orderCount, avgRating, score)
      }
      
      println("+------------+-----------------+--------+-------+------+-------+")
    }
  }

  /**
   * 个性化推荐
   *
   * @param spark SparkSession
   * @param orderDF 订单DataFrame
   * @param model ALS模型
   */
  def personalizedRecommendation(spark: SparkSession, orderDF: DataFrame, model: ALSModel): Unit = {
    import spark.implicits._
    
    // 清空旧数据
    RecommendDao.clearRecommendTable("recommend_personalized")
    
    // 获取所有用户
    val users = orderDF.select(col("用户ID")).distinct()
    
    // 将字符串ID转换为整数ID
    val userIdxDF = users.withColumn("user_idx", functions.hash(col("用户ID")) % 1000000)
    
    // 为每个用户生成10个推荐
    val recommendationsDF = model.recommendForUserSubset(userIdxDF, 10)
    
    // 处理推荐结果
    val dishIdxNameMap = orderDF.select(col("菜品名称"))
      .distinct()
      .withColumn("dish_idx", functions.hash(col("菜品名称")) % 1000000)
      .collect()
      .map(row => (row.getAs[Int]("dish_idx"), row.getAs[String]("菜品名称")))
      .toMap
    
    val categoryMap = orderDF.select(col("菜品名称"), col("菜品类别")).distinct().collect()
      .map(row => (row.getAs[String]("菜品名称"), row.getAs[String]("菜品类别")))
      .toMap
    
    val merchantMap = orderDF.select(col("菜品名称"), col("商铺名称")).distinct().collect()
      .map(row => (row.getAs[String]("菜品名称"), row.getAs[String]("商铺名称")))
      .toMap
    
    // 转换结果并保存
    val userRecommendations = scala.collection.mutable.Map[String, Seq[(String, String, String, Float)]]()
    
    recommendationsDF.collect().foreach { row =>
      val userId = row.getAs[String]("用户ID")
      val recs = row.getAs[Seq[Row]]("recommendations")
      val userRecs = scala.collection.mutable.ArrayBuffer[(String, String, String, Float)]()
      
      recs.foreach { rec =>
        val dishIdx = rec.getAs[Int](0)
        val score = rec.getAs[Float](1)
        
        // 根据dishIdx查找对应的菜品名称
        if (dishIdxNameMap.contains(dishIdx)) {
          val dishName = dishIdxNameMap(dishIdx)
          val category = categoryMap.getOrElse(dishName, "未知")
          val merchantName = merchantMap.getOrElse(dishName, "未知")
          
          RecommendDao.savePersonalizedRecommend(userId, dishName, category, merchantName, score)
          userRecs += ((dishName, category, merchantName, score))
        }
      }
      
      if (userRecs.nonEmpty) {
        userRecommendations(userId) = userRecs.toSeq
      }
    }
    
    // 为了示例，打印前5个用户的推荐结果
    val topUsers = userRecommendations.keys.toSeq.sorted.take(5)
    
    topUsers.foreach { userId =>
      println(s"\n用户 $userId 的个性化推荐:")
      println("+-------+-----------------+--------+------------------+-------+")
      println("|user_id|      dish_name  |category|    merchant      | score |")
      println("+-------+-----------------+--------+------------------+-------+")
      
      userRecommendations(userId).foreach { case (dishName, category, merchantName, score) =>
        // 格式化输出
        printf("| %-5s | %-15s | %-6s | %-16s | %5.2f |\n", 
               userId, dishName, category, merchantName, score)
      }
      
      println("+-------+-----------------+--------+------------------+-------+")
    }
  }

  /**
   * 运行所有推荐算法
   *
   * @param spark SparkSession
   * @param dataPath 数据文件路径
   */
  def runAllRecommendations(spark: SparkSession, dataPath: String): Unit = {
    // 加载订单数据
    val orderDF = spark.read
      .option("header", "true")
      .option("inferSchema", "true")
      .csv(dataPath)
    
    println("开始执行推荐算法...")
    
    // 基于统计的推荐
    println("\n1. 热门品类推荐")
    hotCategoryRecommendation(spark, orderDF)
    
    println("\n2. 热门商铺推荐")
    hotMerchantRecommendation(spark, orderDF)
    
    println("\n3. 热门平台推荐")
    hotPlatformRecommendation(spark, orderDF)
    
    println("\n4. 基于时段的推荐")
    timeBasedRecommendation(spark, orderDF)
    
    // 基于协同过滤的个性化推荐
    println("\n5. 个性化推荐")
    println("暂时跳过ALS个性化推荐，避免资源不足问题")
    /* 
    val ratingDF = loadRatingData(spark, dataPath)
    try {
      val model = buildALSModel(ratingDF)
      personalizedRecommendation(spark, orderDF, model)
    } catch {
      case e: Exception =>
        println("构建ALS模型时发生异常:")
        e.printStackTrace()
    }
    */
    
    println("\n推荐算法执行完成！")
  }
} 