package org.huangrui.spark.scala.sql

import org.apache.parquet.schema.Types.ListBuilder
import org.apache.spark.SparkConf
import org.apache.spark.sql.{Encoder, Encoders, SparkSession, functions}
import org.apache.spark.sql.expressions.Aggregator

import scala.collection.mutable
import scala.collection.mutable.ListBuffer

/**
 * @Author hr
 * @Create 2024-10-21 6:49
 */
object SparkSQL09_Source_Req_2 {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "huangrui")
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL")
    val spark: SparkSession = SparkSession.builder().enableHiveSupport().config(conf).getOrCreate()
    spark.sql("use db_spark")

//    spark.sql("drop table user_visit_action")
//    spark.sql("drop table product_info")
//    spark.sql("drop table city_info")
    spark.udf.register("cityRemark", functions.udaf(new CityRemarkUDAF()))
    spark.sql(
      """
        |SELECT	c.area,	p.product_name,	count(*) clickCnt,cityRemark(city_name) cityremark
        |FROM
        |	( SELECT click_product_id, city_id FROM user_visit_action WHERE click_product_id != - 1 ) AS a
        |	JOIN product_info p ON a.click_product_id = p.product_id
        |	JOIN ( SELECT city_id, city_name, area FROM city_info ) c ON a.city_id = c.city_id
        |GROUP BY	area,	product_id,	product_name
        |LIMIT 10
        |""".stripMargin).show(false)

    spark.sql(
      """
        |select
        |    *
        |from (
        |    select
        |        *,
        |        rank() over( partition by area order by clickCnt desc ) as rank
        |    from (
        |        select
        |           area,
        |           product_name,
        |           count(*) as clickCnt,
        |           cityRemark(city_name) cityremark
        |        from (
        |            select
        |               a.*,
        |               p.product_name,
        |               c.area,
        |               c.city_name
        |            from user_visit_action a
        |            join product_info p on a.click_product_id = p.product_id
        |            join city_info c on a.city_id = c.city_id
        |            where a.click_product_id > -1
        |        ) t1 group by area, product_name
        |    ) t2
        |) t3 where rank <= 3
            """.stripMargin).show(false)

    spark.stop()
  }
  case class Buffer(var total: Long, var cityMap: mutable.Map[String, Long])
  class CityRemarkUDAF extends Aggregator[String, Buffer, String] {
    // 缓冲区初始化数据
    override def zero: Buffer = Buffer(0, mutable.Map[String, Long]())
    // TODO 将函数的输入值和缓冲区的数据进行聚合处理
    override def reduce(buff: Buffer, city: String): Buffer = {
      buff.total += 1
      val newCount = buff.cityMap.getOrElse(city, 0L) + 1
      buff.cityMap.update(city,newCount)
      buff
    }
    // TODO 合并缓冲区数据
    override def merge(b1: Buffer, b2: Buffer): Buffer = {
      b1.total += b2.total
//      b1.cityMap ++= b2.cityMap
//      b1
      val map1: mutable.Map[String, Long] = b1.cityMap
      val map2: mutable.Map[String, Long] = b2.cityMap
      // 两个Map的合并操作
      /*buff1.cityMap = map1.foldLeft(map2) {
        case (map, (city, cnt)) => {
          val newCnt: Long = map.getOrElse(city, 0L) + cnt
          map.update(city, newCnt)
          map
        }
      }*/

      map2.foreach {
        case (city, cnt) => {
          val newCnt: Long = map1.getOrElse(city, 0L) + cnt
          map1.update(city, newCnt)
          map1
        }
      }
      b1.cityMap = map1
      b1
    }
    // 将统计的结果生成字符串信息
    override def finish(buffer: Buffer): String = {
      val sb = new ListBuffer[String]()
      val total = buffer.total
      val cityMap = buffer.cityMap
      // 降序排列
      val cityListCnt = cityMap.toList.sortBy(_._2).reverse.take(2)
      val hasMore = cityMap.size > 2
      var rsum = 0L
      cityListCnt.foreach { case (city, cnt) =>{
        val percent = cnt * 100 / total
        sb += s"${city} $percent%"
        rsum += percent
      }}
      if (hasMore) {
        sb.append(s"其他 ${100 - rsum}%")
      }
      sb.mkString(",")
    }
    // 缓冲区的编码操作，用于序列化和反序列化数据结构
    override def bufferEncoder: Encoder[Buffer] = Encoders.product[Buffer]
    // 输出的编码操作，用于序列化和反序列化函数返回的数据结构
    override def outputEncoder: Encoder[String] = Encoders.STRING
  }
}
