import org.apache.spark.ml.feature.{StopWordsRemover, Tokenizer}
import org.apache.spark.sql.{Row, SaveMode, SparkSession}
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}

object DataHandle {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "root")
    // 初始化 SparkSession 并启用 Hive 支持
    val spark = SparkSession.builder()
      .appName("DataAnalysisToHive")
      .config("spark.master", "local[*]")
      .config("spark.local.dir", "C:/tmp/spark-temp")
      .config("spark.sql.warehouse.dir", "hdfs://192.168.190.128:8020/user/hive/warehouse") // Hive 仓库路径
      .config("hive.metastore.uris", "thrift://192.168.190.128:9083") // Hive Metastore 地址
      .enableHiveSupport() // 启用 Hive 支持
      .getOrCreate()

    // 创建 Hive 数据库（如果不存在）
    spark.sql("CREATE DATABASE IF NOT EXISTS social_media_analysis")
    spark.sql("USE social_media_analysis")

    // 定义数据的 Schema
    val schema = StructType(Array(
      StructField("date", StringType, nullable = true),
      StructField("title_translate", StringType, nullable = true),
      StructField("title", StringType, nullable = true),
      StructField("searchCount", DoubleType, nullable = true),
      StructField("rank", DoubleType, nullable = true),
      StructField("words_list", StringType, nullable = true),
      StructField("Coron_Related", StringType, nullable = true)
    ))

    // 读取 HDFS 中的数据
    val dataPath = Seq(
      "hdfs://192.168.190.128:8020/project/input/Douban_2020Coron.xlsx",
      "hdfs://192.168.190.128:8020/project/input/Douyin_2020Coron.xlsx",
      "hdfs://192.168.190.128:8020/project/input/Toutiao_2020Coron.xlsx",
      "hdfs://192.168.190.128:8020/project/input/Weibo_2020Coron.xlsx"
    )

    // 加载并标准化所有数据到一个 DataFrame
    val allData = dataPath.map { path =>
      // 加载原始数据
      val rawData = spark.read
        .format("com.crealytics.spark.excel")
        .option("header", "true")
        .option("inferSchema", "true")
        .option("treatEmptyValuesAsNulls", "true")
        .option("encoding", "UTF-8")
        .load(path)

      // 强制转换为统一的 Schema，并处理数据类型不一致的问题
      spark.createDataFrame(rawData.rdd.map { row =>
        val values = row.toSeq.toArray
        // 如果列数不足，用 null 填充
        val filledValues = values ++ Array.fill(schema.length - values.length)(null)

        // 确保每个字段的类型与 Schema 定义一致
        val date = Option(filledValues(0)).map(_.toString).orNull
        val titleTranslate = Option(filledValues(1)).map(_.toString).orNull
        val title = Option(filledValues(2)).map(_.toString).orNull
        val searchCount = try {
          Option(filledValues(3)).map(_.toString.toDouble).getOrElse(0.0)
        } catch {
          case _: Exception => 0.0
        }
        val rank = try {
          Option(filledValues(4)).map(_.toString.toDouble).getOrElse(0.0)
        } catch {
          case _: Exception => 0.0
        }
        val wordsList = Option(filledValues(5)).map(_.toString).orNull
        val coronRelated = Option(filledValues(6)).map(_.toString).orNull

        Row(date, titleTranslate, title, searchCount, rank, wordsList, coronRelated)
      }, schema)
    }.reduce(_ union _) // 合并多个 DataFrame

    // 创建临时视图以便使用 SQL 查询
    allData.createOrReplaceTempView("raw_data")

    // （2）数据预处理：去除重复数据和噪声
    val cleanedDataSQL =
      """
        |SELECT DISTINCT *
        |FROM raw_data
        |WHERE title IS NOT NULL AND searchCount IS NOT NULL
        |""".stripMargin

    val cleanedData = spark.sql(cleanedDataSQL)

    // 将清洗后的数据保存到 Hive
    cleanedData.write.mode(SaveMode.Overwrite).saveAsTable("social_media_analysis.cleaned_data")
    println("清洗后的数据已保存到 Hive 表: social_media_analysis.cleaned_data")

    // （3）分词处理：利用 Tokenizer 对文本进行分词
    val tokenizer = new Tokenizer()
      .setInputCol("title")
      .setOutputCol("words")
    val tokenizedData = tokenizer.transform(cleanedData)

    // 将分词后的数据保存到 Hive
    tokenizedData.write.mode(SaveMode.Overwrite).saveAsTable("social_media_analysis.tokenized_data")
    println("分词后的数据已保存到 Hive 表: social_media_analysis.tokenized_data")

    // （4）去除停用词：使用 StopWordsRemover
    val remover = new StopWordsRemover()
      .setInputCol("words")
      .setOutputCol("filtered_words")
      .setStopWords(Array("的", "是", "在", "和", "了", "有", "我", "他", "她", "它")) // 添加中文停用词
    val filteredData = remover.transform(tokenizedData)

    // 将过滤后的数据保存到 Hive
    filteredData.write.mode(SaveMode.Overwrite).saveAsTable("social_media_analysis.filtered_data")
    println("停用词过滤后的数据已保存到 Hive 表: social_media_analysis.filtered_data")

    // 热点话题分析：统计关键词频率
    val wordCountsSQL =
      """
        |SELECT word, COUNT(*) AS count
        |FROM (
        |  SELECT EXPLODE(filtered_words) AS word
        |  FROM filtered_data
        |) exploded
        |GROUP BY word
        |ORDER BY count DESC
        |""".stripMargin

    val topKeywords = spark.sql(wordCountsSQL)

    // 将关键词统计结果保存到 Hive
    topKeywords.write.mode(SaveMode.Overwrite).saveAsTable("social_media_analysis.top_keywords")
    println("热点关键词统计结果已保存到 Hive 表: social_media_analysis.top_keywords")

    println("Top 10 Keywords:")
    topKeywords.show(10)

    // 用户行为分析：按日期统计活跃度
    val userActivitySQL =
      """
        |SELECT date,
        |       COUNT(*) AS post_count,
        |       AVG(searchCount) AS avg_search_count,
        |       SUM(searchCount) AS total_search_count
        |FROM cleaned_data
        |GROUP BY date
        |ORDER BY post_count DESC
        |""".stripMargin

    val userActivity = spark.sql(userActivitySQL)

    // 将用户行为分析结果保存到 Hive
    userActivity.write.mode(SaveMode.Overwrite).saveAsTable("social_media_analysis.user_activity")
    println("用户行为分析结果已保存到 Hive 表: social_media_analysis.user_activity")

    println("Top 10 User Activity:")
    userActivity.show(10)

    // 停止 SparkSession
    spark.stop()

    println("程序执行成功，所有结果已保存到 Hive")
  }
}