package com.at.bigdata.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.{Encoder, Encoders, SparkSession, functions}
import org.apache.spark.sql.expressions.Aggregator

import scala.collection.mutable
import scala.collection.mutable.ListBuffer

/**
 *
 * @author cdhuangchao3
 * @date 2023/5/27 8:05 PM
 */
object Spark06_SparkSql_Test2 {
  def main(args: Array[String]): Unit = {
    //      System.setProperty("HADOOP_USER_NAME", "hdfs")
    //    System.setProperty("SPARK_LOCAL_IP", "192.168.0.109")
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("hive")
    val spark = SparkSession.builder()
      .enableHiveSupport()
      .config(sparkConf)
      .config("spark.sql.warehouse.dir", "hdfs://node01:9000/user/hive/warehouse")
      .getOrCreate()

    // 查询基本信息
    spark.sql("use hdm")
    spark.sql(
      """
        |SELECT
        |	 a.*,
        |  p.product_name,
        |  c.area,
        |  c.city_name
        |from user_visit_action a
        |join product_info p on a.click_product_id = p.product_id
        |join city_info c on a.city_id = c.city_id
        |where a.click_product_id > -1
        |""".stripMargin
    ).createOrReplaceTempView("t1")
    println("-------------11111111111")
    // 根据区域、商品进行数据聚合
    spark.udf.register("cityRemark", functions.udaf(new CityRemarkUDAF))
    spark.sql(
      """
        |SELECT
        |  area,
        |	 product_name,
        |	 count(*) as clickCnt,
        |  cityRemark()
        |from t1 group by area, product_name
        |""".stripMargin).createOrReplaceTempView("t2")
    println("-------------222222222222")

    // 区域内对点击数量进行排行
    spark.sql(
      """
        |SELECT
        |	 *,
        |	 rank() over(partition by area order by clickCnt desc) as rank
        |from t2
        |""".stripMargin).createOrReplaceTempView("t3")
    println("-------------333333333333")

    // 取前3名
    spark.sql(
      """
        |select
        | *
        |from t3 where rank <= 3
        |""".stripMargin).show(false)
    println("-------------44444444444")

    spark.close();
  }

  // 自定义聚合函数:实现城市备注功能
  // IN: 城市
  // Buff： 【总点击数量，map[(city, cnt)]】
  // Out: 备注信息
  case class Buffer(var total: Long, var cityMap: mutable.Map[String, Long])

  class CityRemarkUDAF extends Aggregator[String, Buffer, String] {
    override def zero: Buffer = new Buffer(0, new mutable.HashMap[String, Long]())

    override def reduce(b: Buffer, a: String): Buffer = {
      b.total += 1
      b.cityMap.update(a, b.cityMap.getOrElse(a, 0L) + 1L)
      b
    }

    override def merge(b1: Buffer, b2: Buffer): Buffer = {
      b1.total += b2.total
      // 2个map的合并
      b1.cityMap = b1.cityMap.foldLeft(b2.cityMap) {
        case (map, (city, cnt)) => {
          map.update(city, map.getOrElse(city, 0L) + cnt)
          map
        }
      }
      b1
    }

    // 将统计结果生成字符串信息
    override def finish(reduction: Buffer): String = {
      val remarkList = new ListBuffer[String]
      val total = reduction.total
      val cityMap = reduction.cityMap
      val cityCntList = cityMap.toList.sortWith(
        (left, right) => {
          left._2 > right._2
        }
      ).take(2)
      var hasMore = cityMap.size > 2
      var rsum = 0L
      cityCntList.foreach{
        case (city, cnt) => {
          var r = cnt * 100 / total
          remarkList.append(s"${city} ${r}%")
          rsum += r
        }
      }
      if (hasMore) {
        remarkList.append(s"其他 ${100 - rsum}%")
      }

      remarkList.mkString(",")
    }

    override def bufferEncoder: Encoder[Buffer] = Encoders.product

    override def outputEncoder: Encoder[String] = Encoders.STRING
  }
}
