package day09

import day07.caseClass.UserVisitAction
import org.apache.spark.SparkConf
import org.apache.spark.sql.{Dataset, Encoder, Row, SparkSession, TypedColumn}
import org.apache.spark.sql.expressions.{Aggregator, MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, LongType, MapType, StringType, StructField, StructType}

import java.text.DecimalFormat

/**
 * 第4章	 SparkSQL项目实战
 *
 * 4.1	准备数据
 * 我们这次Spark-sql操作所有的数据均来自 Hive，首先在Hive中创建表,
 * 并导入数据。一共有3张表： 1张用户行为表，1张城市表，1 张产品表
 *
 * 4.2	需求：各区域热门商品Top3
 *
 * 4.2.1	需求简介
 * 这里的热门商品是从点击量的维度来看的，计算各个区域前三大热门商品，并备注上
 * 每个商品在主要城市中的分布比例，超过两个城市用其他显示。
 *
 * 4.2.2	思路分析
 *
 * 	使用 sql 来完成，碰到复杂的需求，可以使用 udf 或 udaf
 * 	查询出来所有的点击记录，并与 city_info 表连接，得到每个城市所在的地区，与 Product_info 表连接得到产品名称
 * 	按照地区和商品名称分组，统计出每个商品在每个地区的总点击次数
 * 	每个地区内按照点击次数降序排列
 * 	只取前三名，并把结果保存在数据库中
 * 	城市备注需要自定义 UDAF 函数
 *
 */
object Spark_SQL_6 {

}

/**
 * 计算各个区域前三大热门商品：SQL分开写，创建临时视图
 */
object Spark_SQL_6_1 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL_exercise")

    val spark: SparkSession = SparkSession.builder()
      .config(conf)
      .enableHiveSupport()
      .getOrCreate()

    val before: Long = System.currentTimeMillis()

    // 1、使用default库
    spark.sql("use default")

    // 2、查询出所有点击记录，计算每个区域，每个产品的点击量
    spark.sql(
      """
        |select ci.area,
        |pi.product_name,
        |count(*) count_click_product
        |from user_visit_action uv
        |left join city_info ci on uv.city_id = ci.city_id
        |left join product_info pi on pi.product_id = uv.click_product_id
        |where uv.click_product_id != -1
        |group by ci.area, pi.product_name
        |""".stripMargin).createOrReplaceTempView("t1")

    // 3、对每个区域内产品的点击量进行倒序排列
    spark.sql(
      """
        |select t1.area,
        |t1.product_name,
        |t1.count_click_product,
        |row_number() over (partition by t1.area order by t1.count_click_product desc) cn
        |from  t1
        |""".stripMargin).createOrReplaceTempView("t2")

    // 4、每个区域取Top3
    spark.sql(
      """
        |select t2.area,
        |t2.product_name,
        |t2.count_click_product,
        |t2.cn
        |from t2
        |where t2.cn <= 3
        |""".stripMargin).show(numRows = 1000, truncate = false)

    val after: Long = System.currentTimeMillis()

    println("耗时：" + (after - before).toDouble / 1000)

    spark.stop()
  }
}

/**
 * 计算各个区域前三大热门商品：SQL放到一起
 */
object Spark_SQL_6_2 {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL_exercise")

    val spark: SparkSession = SparkSession.builder()
      .config(conf)
      .enableHiveSupport()
      .getOrCreate()

    // 默认使用的就是default库，可以省略不写
    // spark.sql("use default")

    val before: Long = System.currentTimeMillis()
    spark.sql(
      """
        |select t2.area,
        |t2.product_name,
        |t2.count_click_product,
        |t2.cn
        |from (
        |      select t1.area,
        |      t1.product_name,
        |      t1.count_click_product,
        |      row_number() over (partition by t1.area order by t1.count_click_product desc) cn
        |      from (
        |             select ci.area,
        |             pi.product_name,
        |             count(*) count_click_product
        |             from user_visit_action uv
        |             left join city_info ci on uv.city_id = ci.city_id
        |             left join product_info pi on pi.product_id = uv.click_product_id
        |             where uv.click_product_id != -1
        |             group by ci.area, pi.product_name) t1
        |             ) t2
        |where t2.cn <= 3
        |""".stripMargin).show(numRows = 1000, truncate = false)

    val after: Long = System.currentTimeMillis()

    println("耗时：" + (after - before).toDouble / 1000)

    spark.stop()
  }
}

/**
 * 计算各个区域前三大热门商品，
 * 并备注上每个商品在主要城市中的分布比例，超过两个城市用其他显示。
 */
object Spark_SQL_6_3 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL")
    val spark: SparkSession = SparkSession.builder()
      .config(conf)
      .enableHiveSupport()
      .getOrCreate()

    val before: Long = System.currentTimeMillis()

    spark.udf.register("city_remark", new CityRemarkAggregator)

    spark.sql(
      """
        |select
        | t2.area,
        | t2.product_name,
        | t2.count_click_product,
        | t2.city_remark,
        | t2.cn
        |from (
        |      select
        |       t1.area,
        |       t1.product_name,
        |       t1.count_click_product,
        |       t1.city_remark,
        |       row_number() over (partition by t1.area order by t1.count_click_product desc) cn
        |      from (
        |             select
        |               ci.area,
        |               pi.product_name,
        |               count(*) count_click_product,
        |               city_remark(ci.city_name) city_remark
        |             from user_visit_action uv
        |             left join city_info ci on uv.city_id = ci.city_id
        |             left join product_info pi on pi.product_id = uv.click_product_id
        |             where uv.click_product_id != -1
        |             group by ci.area, pi.product_name) t1
        |             ) t2
        |where t2.cn <= 3
        |""".stripMargin).show(numRows = 1000, truncate = false)


    val after: Long = System.currentTimeMillis()

    println("耗时：" + (after - before).toDouble / 1000)

    spark.stop()
  }
}

class CityRemarkAggregator extends UserDefinedAggregateFunction {

  // 输入数据类型: 北京  String
  override def inputSchema: StructType = {
    StructType(Array(StructField("cityNe", StringType)))
  }

  // 缓存的数据的类型: 北京->1000, 天津->5000  Map,  总的点击量  1000/?
  override def bufferSchema: StructType = {
    StructType(Array(StructField("map", MapType(StringType, LongType)), StructField("totalCount", LongType)))
  }

  // 输出的数据类型  "北京21.2%，天津13.2%，其他65.6%"  String
  override def dataType: DataType = StringType

  // 相同的输入是否应用有相同的输出
  override def deterministic: Boolean = true

  // 给存储数据初始化
  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    //初始化map缓存
    buffer(0) = Map[String, Long]()
    // 初始化总的点击量
    buffer(1) = 0L
  }

  // 分区内合并 Map[城市名, 点击量]
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    // 首先拿到城市名, 然后把城市名作为key去查看map中是否存在, 如果存在就把对应的值 +1, 如果不存在, 则直接0+1
    val cityName: String = input.getString(0)
    val map: Map[String, Long] = buffer.getAs[Map[String, Long]](0)
    buffer(0) = map + (cityName -> (map.getOrElse(cityName, 0L) + 1L))

    // 碰到一个城市, 则总的点击量要+1
    buffer(1) = buffer.getLong(1) + 1L
  }

  // 分区间的合并
  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    val map1: Map[String, Long] = buffer1.getAs[Map[String, Long]](0)
    val map2: Map[String, Long] = buffer2.getAs[Map[String, Long]](0)

    // 把map1的键值对与map2中的累积, 最后赋值给buffer1
    buffer1(0) = map1.foldLeft(map2) {
      case (map, (k, v)) => map + (k -> (map.getOrElse(k, 0L) + v))
    }

    buffer1(1) = buffer1.getLong(1) + buffer2.getLong(1)
  }

  // 最终的输出. "北京21.2%，天津13.2%，其他65.6%"
  override def evaluate(buffer: Row): Any = {
    val cityCountMap: Map[String, Long] = buffer.getAs[Map[String, Long]](0)

    val totalCount: Long = buffer.getLong(1)

    var citiesRatios: List[CityRemark] = cityCountMap.toList.sortWith((e1, e2) => e1._2 > e2._2).take(2).map {
      case (cityName, cityCount) => CityRemark(cityName, cityCount.toDouble / totalCount)
    }

    // 如果城市的个数超过2才显示其他
    if (cityCountMap.size > 2) {
      val otherCitiesRatio: Double = citiesRatios.foldLeft(1D)(_ - _.cityRatio)
      citiesRatios = citiesRatios :+ CityRemark("其他", otherCitiesRatio)
    }

    citiesRatios.mkString(",")
  }
}

/**
 * 城市备注
 *
 * @param cityName  城市名称
 * @param cityRatio 城市点击次数占总的点击次数的比例
 */
case class CityRemark(cityName: String, cityRatio: Double) {
  val formatter: DecimalFormat = new DecimalFormat("0.00%")

  override def toString: String = s"$cityName:${formatter.format(cityRatio)}"
}

/**
 * 区域城市点击排行
 *
 * @param area              地区
 * @param productName       商品名称
 * @param countClickProduct 地区：areaName，商品名称：productName对应的点击次数
 * @param cityRemark        城市备注
 */
case class CityClickCondition(area: String, productName: String, countClickProduct: Long, cityRemark: CityRemark)

/**
 *
 * @param productId   产品id
 * @param productName 产品名称
 * @param extendInfo  扩展信息(自营、第三方)
 */
case class ProductInfo(productId: Long, productName: String, extendInfo: String)

/**
 *
 * @param cityId   城市id
 * @param cityName 城市名称
 * @param area     区域(华东、华北、东门。。。)
 */
case class CityInfo(cityId: BigInt, cityName: String, area: String)