package org.example.analyse

import org.example.utils.{hbaseConn, mysqlConn}
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame
import org.example.pojo.{HotItem, HotTotal}
import org.example.utils.hbaseConn.session
import java.time.LocalDate
import java.time.format.DateTimeFormatter
import scala.concurrent.duration.DurationInt

object keyword {
  val keyword_rdd: RDD[(ImmutableBytesWritable, Result)] = hbaseConn.createRdd("hot_search_keywords") //连接hbase

  /**
   * 处理热搜关键词
   *
   * @return
   */
  def dealHbaseData(): DataFrame = {
    import session.implicits._
    val df = keyword_rdd.map(f => {
      val rowkey: String = Bytes.toString(f._1.get())
      val result: Result = f._2
      val keyword: String = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("keyword")))
      val num: String = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("num")))
      val rank: String = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("rank")))
      val url: String = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("url")))
      val platform: String = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("platform")))
      var time: String = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("time")))
      val remarkDate = LocalDate.parse(time, DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))
      (rowkey, keyword, num, rank, url, platform, time)
    }).toDF("id", "hot_keyword", "search_num", "ranking", "url", "platform", "time")
    df.show(20)
    df.createOrReplaceTempView("keywords")
    df
  }

  /*
  * 不同平台下的热度总量
  * */
  def totalByPlatform(df:DataFrame): DataFrame = {
    df.createOrReplaceTempView("keywords")

    val totalDF = session.sql(
      """
      SELECT
        SUM(search_num) AS total,
        platform,
        time
      FROM keywords
      GROUP BY platform, time
      ORDER BY platform, time DESC
      """
    )

    totalDF.show(20)

    // 构建数据列表
    val dataList = totalDF.collect().map(item => {
      HotTotal(
        total = item.getDouble(0),
        platform = item.getString(1),
        time = item.getString(2)
      )
    }).toList

    // 获取数据库连接并执行批量插入或更新操作
    mysqlConn.getConnection()
    mysqlConn.batchUpsertTotal(dataList)
    mysqlConn.closeConnection()

    totalDF
  }

  /**
   * 不同平台下的关键词在榜次数统计
   */
  def listCountByPlatform(df:DataFrame): DataFrame = {
    df.createOrReplaceTempView("keywords")

    val countDF = session.sql(
      """
      SELECT
        platform,
        hot_keyword,
        COUNT(*) AS count,
        MIN(time) AS start_time,
        MAX(time) AS end_time,
        MAX(search_num) AS hot_top,
        MAX(ranking) AS top_ranking
      FROM keywords
      GROUP BY platform, hot_keyword
      ORDER BY platform, count DESC
      """
    )
    countDF.show(20)

    // 构建数据列表
    val dataList = countDF.collect().map(item => {
      HotItem(
        platform = item.getString(0),
        count = item.getLong(2),
        hot_keyword = item.getString(1),
        start_time = item.getString(3),
        end_time = item.getString(4),
        hot_top = item.getString(5),
        top_ranking = item.getString(6)
      )
    }).toList

    // 获取数据库连接并执行批量插入或更新操作
    mysqlConn.getConnection()
    mysqlConn.batchUpsert(dataList)
    mysqlConn.closeConnection()

    countDF
  }

  def main(args: Array[String]): Unit = {
    while (true) {
      try {
        val df = dealHbaseData()
        totalByPlatform(df)
        listCountByPlatform(df)
      } catch {
        case e: Exception =>
          println(s"An error occurred: ${e.getMessage}")
          e.printStackTrace()
      }
      Thread.sleep(30.minutes.toMillis)
    }
  }
}
