package com.chinasoft.shop

import com.huaban.analysis.jieba.JiebaSegmenter
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.expressions.Window
import scala.collection.JavaConverters._

object WordCount {
  def main(args: Array[String]): Unit = {
    // 1. 初始化 SparkSession (不变)
    val spark = SparkSession.builder()
      .appName("WordCountByScoreRange")
      .master("local[*]")
      .config("spark.executor.heartbeatInterval", "30s")
      .config("spark.network.timeout", "60s")
      .getOrCreate()
    import spark.implicits._

    // 2. JDBC 配置 (不变)
    val url = "jdbc:mysql://localhost:3306/dazhong?useSSL=false&characterEncoding=utf8&serverTimezone=UTC"
    val props = new java.util.Properties()
    props.setProperty("user", "root")
    props.setProperty("password", "Etestnmm4l!")
    props.setProperty("driver", "com.mysql.cj.jdbc.Driver")

    try {
      // 3. 读取点评数据 (不变)
      println("开始读取点评数据...")
      val reviews = spark.read.jdbc(url, "dazhong_dianping", props)
        .withColumn(
          "weighted_rating",
          col("rating") * 0.4 +
            col("rating_env") * 0.2 +
            col("rating_flavor") * 0.2 +
            col("rating_service") * 0.2
        )
      println(s"成功读取 ${reviews.count()} 条点评数据")

      // 4. 读取平均评分表 (不变)
      println("读取餐厅平均分数据...")
      val averages = spark.read.jdbc(url, "dazhong_average", props)
        .withColumnRenamed("average_rating", "avg_rating")

      // 5. 合并数据 (不变)
      println("合并点评与餐厅平均分数据...")
      val joined = reviews.join(averages, "restId")

      // 6. 定义评分区间 (不变)
      val getRatingRange = udf((score: Double) => {
        val bucket = ((score - 1) / 0.5).toInt * 0.5 + 1
        if (bucket >= 1 && bucket < 5) f"$bucket%.1f-${bucket + 0.5}%.1f" else "unknown"
      })
      val withRange = joined.withColumn("rating_range", getRatingRange(col("avg_rating")))

      // ========== 新增部分：统计并显示各评分段评论数 ==========
      println("各评分区间评论数统计:")
      withRange.groupBy("rating_range")
        .count()
        .orderBy("rating_range")
        .show(numRows = 20, truncate = false)
      // =================================================

      // 7. 停用词过滤 (不变)
      val stopwords = Set("非常", "这个", "那个", "真的", "感觉", "就是", "还是", "有点", "。", "了", "在", "有", "无", "是", "会", "有", "东西")

      // 8. 修复分词器序列化问题 (不变)
      val jiebaSegmenter = new ThreadLocal[JiebaSegmenter] with Serializable {
        override def initialValue(): JiebaSegmenter = new JiebaSegmenter()
      }

      // 9. 分词 UDF (不变)
      val jiebaFilter = udf { text: String =>
        if (text == null || text.trim.isEmpty) {
          Seq.empty[String]
        } else {
          val segmenter = jiebaSegmenter.get()
          val words = segmenter.sentenceProcess(text).asScala
            .filter(w => w.length > 1 && w.length <= 5 && !stopwords.contains(w))
          jiebaSegmenter.remove()
          words.toSeq
        }
      }

      // 10. 分词并统计高频词 (不变)
      println("开始分词并统计高频词汇...")
      val words = withRange.filter(col("rating_range") =!= "unknown")
        .select(
          col("rating_range"),
          explode(jiebaFilter(col("comment"))).alias("word")
        )

      // 11. 计算各区间 Top50 词汇 (不变)
      val windowSpec = Window.partitionBy("rating_range").orderBy(desc("count"))
      val topWords = words.groupBy("rating_range", "word")
        .count()
        .withColumn("rn", row_number().over(windowSpec))
        .filter(col("rn") <= 50)
        .select("rating_range", "word", "count")

      // 12. 写入结果表 (不变)
      println(s"准备写入结果表，共 ${topWords.count()} 条记录")
      topWords.write.mode("overwrite").jdbc(url, "dazhong_word", props)
      println("结果表写入完成")

    } catch {
      case e: Exception =>
        println(s"执行过程中发生错误: ${e.getMessage}")
        e.printStackTrace()
    } finally {
      spark.stop()
      println("SparkSession已关闭")
    }
  }
}