package com.chinasoft.shop

import com.huaban.analysis.jieba.JiebaSegmenter
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.types._

object WordCount {
  def main(args: Array[String]): Unit = {
    // 1. 初始化 SparkSession
    val spark = SparkSession.builder()
      .appName("WordCountByScoreRange")
      .master("local[*]")
      .getOrCreate()
    import spark.implicits._

    // 2. JDBC 配置
    val url = "jdbc:mysql://localhost:3306/dazhong?useSSL=false&characterEncoding=utf8&serverTimezone=UTC"
    val props = new java.util.Properties()
    props.setProperty("user", "root")
    props.setProperty("password", "Etestnmm4l!")
    props.setProperty("driver", "com.mysql.cj.jdbc.Driver")

    // 3. 读取评论并计算加权得分
    val reviews = spark.read
      .jdbc(url, "dazhong_dianping", props)
      .withColumn(
        "weighted_rating",
        col("rating") * 0.4 +
          col("rating_env") * 0.2 +
          col("rating_flavor") * 0.2 +
          col("rating_service") * 0.2
      )

    // 4. 读取平均评分表
    val averages = spark.read
      .jdbc(url, "dazhong_average", props)
      .withColumnRenamed("average_rating", "avg_rating")

    // 5. 合并评论与平均分
    val joined = reviews.join(averages, "restId")

    // 6. 定义评分区间
    val buckets = Array(1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0)
    val bucketizer = udf { score: Double =>
      buckets.sliding(2).find {
        case Array(l, r) => score >= l && score < r
      } match {
        case Some(Array(l, r)) => f"$l%.1f-$r%.1f"
        case _                 => "unknown"
      }
    }
    val withRange = joined.withColumn("rating_range", bucketizer(col("avg_rating")))

    // 7. 注册 UDF：在 UDF 内部实例化 JiebaSegmenter，避免序列化问题
    val jiebaFilter = udf { text: String =>
      if (text == null) {
        Seq.empty[String]
      } else {
        // 每个 Task 中各自创建一个 Segmenter 实例
        val segmenter = new JiebaSegmenter()
        import scala.collection.JavaConverters._
        segmenter.sentenceProcess(text).asScala
          .filter(w => w.length > 1 && w.length <= 5)
          .toSeq
      }
    }

    // 8. 用结巴分词展开词语，统计各评分区间 Top20
    val words = withRange
      .filter(col("rating_range") =!= "unknown")
      .select(
        col("rating_range"),
        explode(jiebaFilter(col("comment"))).alias("word")
      )

    val windowSpec = Window.partitionBy("rating_range").orderBy(desc("count"))
    val topWords = words.groupBy("rating_range", "word")
      .count()
      .withColumn("rn", row_number().over(windowSpec))
      .filter(col("rn") <= 20)
      .select("rating_range", "word", "count")

    // 9. 写入 dazhong_word 表
    topWords.write
      .mode("overwrite")
      .jdbc(url, "dazhong_word", props)

    spark.stop()
  }
}
