package com.wzk.dwetl

import com.wzk.dwetl.utils.RrUtils.toBitmap
import org.apache.spark.sql.{DataFrame, SparkSession}

object CubeDistinctAggrgationWrite {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "root")
    val spark = SparkSession.builder()
      .appName(this.getClass.getSimpleName)
      .enableHiveSupport()
      .master("local")
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val data = spark.createDataset(Seq(
      "1,江苏省,南通市,下关区",
      "1,江苏省,南通市,下关区",
      "2,江苏省,南通市,下关区",
      "2,江苏省,南通市,白领区",
      "3,江苏省,南通市,白领区",
      "3,江苏省,南通市,富豪区",
      "1,江苏省,苏州市,园林区",
      "1,江苏省,苏州市,园林区",
      "4,江苏省,苏州市,虎跳区"
    ))

    val table = data.map(e => {
      val arr = e.split(",")
      (arr(0).toInt, arr(1), arr(2), arr(3))
    }).toDF("id", "province", "city", "region")

    //分组，收集每一组的guid到一个去重后的数组中
    val ids: DataFrame = table.groupBy("province", "city", "region")
      .agg(collect_set("id") as "ids")

    //将guid数组，变成一个bitmap
    val toRrBitmap = udf(toBitmap)

    val bitmapResult = ids.select('province, 'city, 'region, toRrBitmap('ids) as "ids_bitmap")
    bitmapResult.createTempView("res")
    spark.sql(
      """
        |
        |insert into table test.bitmap_demo
        |select * from res
        |""".stripMargin)

    spark.close()
  }
}
