package cn.doitedu.dwetl

import java.io.{ByteArrayOutputStream, DataOutputStream, ObjectOutputStream}

import org.apache.spark.sql.{SaveMode, SparkSession}
import org.roaringbitmap.RoaringBitmap
import cn.doitedu.dwetl.utils.RrUtils.toBitmap
import scala.collection.mutable

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-01-17
 * @desc cube逐级聚合（去重指标），用bitmap思想

create table test.bitmap_demo(
province  string,
city      string,
region    string,
bitmap    binary
)
stored as parquet
;


 *
 *
 */
object CubeDistinctAggregationWrite {
  def main(args: Array[String]): Unit = {


    val spark = SparkSession.builder()
      .appName("")
      .enableHiveSupport()
      .master("local")
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val data = spark.createDataset(Seq(
      "1,江苏省,南通市,下关区",
      "1,江苏省,南通市,下关区",
      "2,江苏省,南通市,下关区",
      "2,江苏省,南通市,白领区",
      "3,江苏省,南通市,白领区",
      "3,江苏省,南通市,富豪区",
      "1,江苏省,苏州市,园林区",
      "1,江苏省,苏州市,园林区",
      "4,江苏省,苏州市,虎跳区"
    ))

    val table = data.map(s => {
      val arr = s.split(",")
      (arr(0).toInt, arr(1), arr(2), arr(3))
    }).toDF("id", "province", "city", "region")

    // 分组，收集每一组的guid到一个去重后的数组中
    val ids = table.groupBy( "province", "city", "region")
      .agg(collect_set('id) as "ids")

    // 将guid数组，变成一个bitmap

    val toRrBitmap = udf(toBitmap)

    val bitmapResult = ids.select('province,'city,'region,toRrBitmap('ids) as "ids_bitmap")
    bitmapResult.createTempView("res")
    spark.sql(
      """
        |insert into table test.bitmap_demo
        |select province,city,region,ids_bitmap
        |from res
        |
        |
        |
        |""".stripMargin)

    spark.close()
  }

}
