package cn.doitedu.dw_etl.utils

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, FileSystem, Path}
import org.apache.spark.sql.{DataFrame, SparkSession}

object DictLoadUtils {

  def loadGeoHashDict(spark:SparkSession): collection.Map[String, (String, String, String)] ={
    // "geo","province","city","region"
    val areadictDF: DataFrame = spark.read.parquet("/dict_data/geohash_area_dict/")
    // 将parquet中的字典数据转成kv结构，并收集到driver端
    val areaDictMap = areadictDF.rdd.map(row => {
      val geo = row.getAs[String]("geo")
      val province = row.getAs[String]("province")
      val city = row.getAs[String]("city")
      val region = row.getAs[String]("region")
      (geo, (province, city, region))
    }).collectAsMap()

    areaDictMap
  }


  def loadIp2RegionDbFile(): Array[Byte] = {

    val conf = new Configuration()
    val fs = FileSystem.get(conf)
    val len: Long = fs.getFileStatus(new Path("/dict_data/ip_area_dict/ip2region.db")).getLen

    println("file len: " + len)

    val fsin: FSDataInputStream = fs.open(new Path("/dict_data/ip_area_dict/ip2region.db"))
    val bytes = new Array[Byte](len.toInt)
    // //read方法，不保证一定读满 byte数组长度的数据；可能读到小部分就返回了; 而readfully则一定会读到byte数组指定的长度量级的数据才返回
    val read = fsin.readFully(bytes)
    println("read len: " + read)

    bytes
  }

  def loadIdmappingDict(spark:SparkSession,dictPath:String)={
    val idmapDictDF = spark.read.json(dictPath)
    val idmpMap = idmapDictDF
      .select("deviceid", "guid")
      .where("!isblank(deviceid)")
      .rdd
      .map(row => {
        val deviceid = row.getAs[String]("deviceid")
        val guid = row.getAs[String]("guid")
        (deviceid, guid)
      })
      .collectAsMap()

    idmpMap
  }


  def loadDeviceIdsAndAccounts(spark:SparkSession,dt:String): (Set[String], Set[String]) ={

    val logTable = spark.read.table("dwd.device_account_log")
      .where(s"dt='${dt}'")
      .select("deviceid","account")
    logTable.cache()

    val deviceIds = logTable.rdd.map(row=>{
      row.getAs[String]("deviceid")
    }).collect().toSet

    val accounts: Set[String] = logTable.rdd.map(row=>{
      row.getAs[String]("account")
    }).collect().toSet

    (deviceIds,accounts)
  }

}
