package com.yanggu.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession}

import scala.collection.JavaConverters._

//各区域热门商品 Top3
//这里的热门商品是从点击量的维度来看的，计算各个区域前三大热门商品，
//并备注上每个商品在主要城市中的分布比例，超过两个城市用其他显示。
//由于没有hive的环境。这里使用textFile的方式
object SparkSQL06_TopN {

  def main(args: Array[String]): Unit = {

    //1. 创建sparkConf配置对象
    val sparkConf = new SparkConf().setAppName("SparkSQL06_TopN").setMaster("local[*]")

    //2. 创建sparkSession对象
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    //导入隐士转换
    import spark.implicits._

    //3. 加载用户访问数据
    val userVisitActionDataSet = spark.read.textFile("input/user_visit_action.txt").map(line => {
      val datas = line.split("_")
      UserVisitAction(datas(0), datas(1), datas(2), datas(3), datas(4), datas(5), datas(6), datas(7), datas(8), datas(9), datas(10), datas(11), datas(12).toLong)
    }).as[UserVisitAction]

    //4. 注册临时表user_visit_action
    userVisitActionDataSet.createOrReplaceTempView("user_visit_action")

    //5. 加载产品信息
    val productInfoDataSet = spark.read.textFile("input/product_info.txt").map(line => {
      val datas = line.split("\t")
      ProductInfo(datas(0).toLong, datas(1), datas(2))
    }).as[ProductInfo]

    //6. 注册临时表product_info
    productInfoDataSet.createOrReplaceTempView("product_info")

    //7. 加载城市数据
    val cityInfoDataSet = spark.read.textFile("input/city_info.txt").map(line => {
      val datas = line.split("\t")
      CityInfo(datas(0).toLong, datas(1), datas(2))
    }).as[CityInfo]

    //8. 注册临时表city_info
    cityInfoDataSet.createOrReplaceTempView("city_info")

    //注册udaf函数
    val cityCountUDAF = new CityCountUDAF

    spark.udf.register("cityCount", cityCountUDAF)

    //9. 执行sql
    val rows = spark.sql(
      """
        |SELECT
        |  t2.area,
        |  t2.product_name,
        |  t2.count,
        |  t2.remark
        |FROM
        |(
        |  SELECT
        |  t1.*,
        |  rank() over(partition by area order by count desc) rank
        |  FROM
        |  (
        |    SELECT
        |      city_info.area,
        |      product_info.product_name,
        |      count(*) count,
        | -- 这里需要注意的是自定义的UDAF聚合函数需要使用在group by后, 或者是全量数据
        |      cityCount(city_info.city_name) remark
        |    FROM
        |      user_visit_action
        |    JOIN
        |      product_info ON product_info.product_id = user_visit_action.click_product_id
        |    JOIN
        |      city_info ON city_info.city_id = user_visit_action.city_id
        |    WHERE
        |      user_visit_action.click_category_id > -1
        |    AND
        |      user_visit_action.click_product_id > -1
        |    GROUP BY
        |      city_info.area, product_info.product_name
        |  ) t1
        |) t2
        |WHERE
        |  t2.rank <= 3
        |""".stripMargin)
      .collect()

    println("地区\t商品名称\t\t数量\t备注")
    rows.foreach(row => {
      println(s"${row.getString(0)}\t${row.getString(1)}\t\t${row.getLong(2)}\t\t${row.getString(3)}")
    })
  }

}

case class UserVisitAction(
                            date: String,
                            user_id: String,
                            session_id: String,
                            page_id: String,
                            action_time: String,
                            search_keyword: String,
                            click_category_id: String,
                            click_product_id: String,
                            order_category_ids: String,
                            order_product_ids: String,
                            pay_category_ids: String,
                            pay_product_ids: String,
                            city_id: Long
                          )

case class ProductInfo(
                        product_id: Long,
                        product_name: String,
                        extend_info: String
                      )

case class CityInfo(
                     city_id: Long,
                     city_name: String,
                     area: String
                   )

//自定义UDAF实现。备注的功能
//in catyName
//buffer Map[cityName, count]
//out remark 备注
class CityCountUDAF extends UserDefinedAggregateFunction {

  //定义输入的数据类型
  override def inputSchema = StructType(Array(StructField("cityName", StringType)))

  //定义缓冲的数据类型
  override def bufferSchema = StructType(Array(StructField("buffer", MapType(StringType, LongType))))

  //定义返回的数据类型
  override def dataType = StringType

  //确定性的。直接设置为true即可
  override def deterministic = true

  //初始化方法, 初始化缓冲区内的数据
  override def initialize(buffer: MutableAggregationBuffer): Unit = buffer(0) = Map[String, Long]()

  //当有新的数据输入时, 合并到缓冲区的map中
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    //从缓冲区中获得数据
    val cityMap = buffer.getMap[String, Long](0)
    val cityName = input.getString(0)
    val newClickCount = cityMap.getOrElse(cityName, 0L) + 1L
    //这里需要将buffer(0)的数据更新一下
    buffer(0) = cityMap.updated(cityName, newClickCount)
  }

  //合并两个executor缓冲区中的数据
  //将buffer2的数据合并到buffer1中
  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    //这里合并两个map的数据使用foldMap(折叠)
    val cityMap1 = buffer1.getMap[String, Long](0)
    val cityMap2 = buffer2.getMap[String, Long](0)

    //这里尤其是需要注意的是, 需要将buffer1(0)重新更新一下
    buffer1(0) = cityMap1.foldLeft(cityMap2) {
      case (map, (k, v)) =>
        map.updated(k, map.getOrElse(k, 0L) + v)
    }
  }

  //这里输出数据
  //备注上每个商品在主要城市中的分布比例，超过两个城市用其他显示。
  //类似的效果: 北京21.2%，天津13.2%，其他65.6%
  override def evaluate(buffer: Row) = {

    //获取缓冲区中的数据。按照次数进行降序排序
    val list = buffer.getMap[String, Long](0).toList.sortBy(-_._2)

    //总共的点击次数
    val sumClickCount = list.map(_._2).sum

    //构建备注字符串
    val builder = new StringBuilder
    //如果超过两个城市, 使用其他显示
    list.size match {
      case size if size > 2 =>
        //获取前2个
        val list2 = list.take(2)
        var used = 0.0d
        list2.foreach {
          case (cityName, count) =>
            val percentage = count * 100 / sumClickCount
            used += percentage
            builder.append(s"$cityName: $percentage%, ")
        }
        builder.append(s"其他: ${100 - used}%")
      case 2 =>
        val (firstCityName, count) = list.head
        val percentage = count * 100 / sumClickCount
        builder.append(s"$firstCityName: $percentage%, ")
        val (lastCityName, _) = list.last
        builder.append(s"$lastCityName: ${100 - percentage}%")
      case _ =>
        val (cityName, _) = list.head
        builder.append(s"$cityName: 100%")
    }
    builder.toString
  }

}