package cn.doitedu.datayi.etl

import java.lang
import java.text.SimpleDateFormat
import java.util.UUID
import java.util.regex.Pattern

import ch.hsr.geohash.GeoHash
import cn.doitedu.datayi.beans.LogBean
import org.apache.commons.io.{FileUtils, IOUtils}
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.lionsoul.ip2region.{DbConfig, DbSearcher}
import redis.clients.jedis.Jedis

object OdsApp2DwdApp {

  def main(args: Array[String]): Unit = {

    if(args.length<3){
      println(
        """
          |usage:
          |  args(0): redis hostname
          |  args(1): log current day like '2021-08-04'
          |  args(2): log next day like '2021-08-05'
          |
          |""".stripMargin)

      sys.exit(1)
    }

    val redis_host = args(0)
    val log_curday = args(1)
    val log_nextday = args(2)

    // 获取上一日的最大guid
    val jedis1 = new Jedis(redis_host, 6379)
    val str = jedis1.get("guid_cnt")
    var lastMaxGuid = 0L
    if(str!=null) lastMaxGuid = str.toLong
    jedis1.close()

    val spark = SparkSession.builder()
      .appName("app日志ods层etl到dwd层") // 传统数仓  kettel
      //.master("local")
      .config("spark.sql.shuffle.partitions", 1)
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._

    // 清洗过滤
    val curLog: DataFrame = spark.read.table("ods23.app_event_log").where(s"dt='${log_curday}'")

    // deviceid/properties/eventid/sessionid 缺任何一个都不行
    import org.apache.spark.sql.functions._
    val isnotblank = udf((s: String) => {
      StringUtils.isNotBlank(s)
    })

    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val startTime = sdf.parse(s"${log_curday} 00:00:00").getTime
    val endTime = sdf.parse(s"${log_nextday} 00:00:00").getTime


    val filtered = curLog
      .where(isnotblank($"deviceid") and 'properties.isNotNull and isnotblank(col("eventid")) and isnotblank(curLog("sessionid")))
      .where(s"timestamp >= $startTime and timestamp <$endTime")


    // 数据规范化  TODO
    filtered.createTempView("filtered")

    val regulared = spark.sql(
      """
        |select
        |
        |if(account='',null,account) as account ,
        |appId                     ,
        |appVersion                ,
        |carrier                   ,
        |deviceId                  ,
        |deviceType                ,
        |eventId                   ,
        |ip                        ,
        |latitude                  ,
        |longitude                 ,
        |netType                   ,
        |osName                    ,
        |osVersion                 ,
        |properties                ,
        |releaseChannel            ,
        |resolution                ,
        |sessionId                 ,
        |timeStamp                 ,
        | null as splitedSessionId   ,
        | null as filledAccount      ,
        | null as province           ,
        | null as city               ,
        | null as region             ,
        | -1 as guid                 ,
        | 0  as isnew
        |from filtered
        |
        |""".stripMargin)

    // 将dataframe转成RDD
    val dataSet: Dataset[LogBean] = regulared.as[LogBean]

    // SESSION分割 ,填splitedSessionId字段的值
    val sessionSplitted: Dataset[LogBean] = dataSet.rdd
      .groupBy(bean=>bean.sessionId)
      .flatMap(tp=>{
        val iter = tp._2
        val list = iter.toList.sortBy(b=>b.timeStamp)

        var uuid = UUID.randomUUID().toString
        for( i <- 0 until list.size ) {
          list(i).splitedSessionId = uuid
          if( i < list.size-1 &&  (list(i+1).timeStamp - list(i).timeStamp > 30*60*1000)) uuid = UUID.randomUUID().toString
        }
      list
    }).toDS()


    // 数据集成（gps、ip集成省市区）
    // 读取hive中的gps参考点知识库数据
    val areaDict = spark.read.table("dim23.area_dict").where("geohash is not null and geohash!=''")
    val gpsDictMap: collection.Map[String, (String, String, String)] = areaDict.rdd.map({ case Row(geohash: String, province: String, city: String, region: String) => {
      (geohash, (province, city, region))
    }
    }).collectAsMap()
    // 广播出去
    val bc = spark.sparkContext.broadcast(gpsDictMap)

    // 读取ip2region库文件
    val hadoopConf = new Configuration()
    val fs = FileSystem.get(hadoopConf)
    val path = new Path("/dict/ip2region.db")
    val l: Long = fs.getFileStatus(path).getLen
    val inputStream = fs.open(path)
    val bytes = new Array[Byte](l.toInt)
    IOUtils.readFully(inputStream, bytes)
    IOUtils.closeQuietly(inputStream)
    // 广播
    val bc2 = spark.sparkContext.broadcast(bytes)


    // 地理位置集成
    val areaed = sessionSplitted.mapPartitions(iter => {
      // 构造
      val gpsDict = bc.value
      val ip2regionBytes = bc2.value
      val config = new DbConfig()
      val searcher = new DbSearcher(config, ip2regionBytes)
      val pattern = Pattern.compile("^[\\u0391-\\uFFE5]+$")


      // 逐条映射
      iter.map(bean => {
        var flag = false
        try {
          val lat = bean.latitude
          val lng = bean.longitude

          // 经纬度转 geohash 码
          val geoStr = GeoHash.geoHashStringWithCharacterPrecision(lat, lng, 6)

          // 用geoStr去知识库匹配
          if (gpsDict.contains(geoStr)) {
            flag = true
            val areaInfo: (String, String, String) = gpsDict.getOrElse(geoStr, ("", "", ""))
            bean.province = areaInfo._1
            bean.city = areaInfo._2
            bean.region = areaInfo._3
          }
        } catch {
          case exception: Exception => exception.printStackTrace()
        }

        // 如果用gps查询地理位置失败，则用ip地址查询
        if (!flag) {

          val block = searcher.memorySearch(bean.ip)
          val strings = block.getRegion.split("|")

          if (strings.length >= 6 &&  pattern.matcher(strings(3)).matches()) {
            bean.province = strings(3)
            bean.city = strings(4)
          }
        }
        bean
      })
    })


    /**
     * idmapping  新老访客标记
     */
    areaed.createTempView("areaed")
    val anonymousFilled = spark.sql(
      """
        |-- 将当天日志 去  left join  设备账号绑定表，从而将当天日志中的游客访问记录填充绑定的账号
        |select
        |   areaed.account            ,
        |   areaed.appId              ,
        |   areaed.appVersion         ,
        |   areaed.carrier            ,
        |   areaed.deviceId           ,
        |   areaed.deviceType         ,
        |   areaed.eventId            ,
        |   areaed.ip                 ,
        |   areaed.latitude           ,
        |   areaed.longitude          ,
        |   areaed.netType            ,
        |   areaed.osName             ,
        |   areaed.osVersion          ,
        |   areaed.properties         ,
        |   areaed.releaseChannel     ,
        |   areaed.resolution         ,
        |   areaed.sessionId          ,
        |   areaed.timeStamp          ,
        |   areaed.splitedSessionId   ,
        |   nvl(areaed.account,o2.account) as filledAccount,
        |   areaed.province           ,
        |   areaed.city               ,
        |   areaed.region             ,
        |   areaed.guid               ,
        |   areaed.isnew
        |from
        |  areaed
        |left join
        |   -- 这一段子查询，是加载设备账号绑定评分表，获取设备上优先级最高的绑定账号
        |   (
        |      select
        |        deviceid,account
        |      from
        |      (
        |        select
        |            deviceid,
        |            account,
        |            row_number() over(partition by deviceid order by score desc,last_login desc) as rn
        |        from dws23.device_account_bind_score
        |        where dt='2021-08-04' and account is not null
        |      ) o1
        |      where rn=1
        |   ) o2
        |on areaed.deviceid=o2.deviceid
        |
        |
        |""".stripMargin)

    val ds2 = anonymousFilled.as[LogBean]
    val res: Dataset[LogBean] = ds2.mapPartitions(iter => {
      val jedis = new Jedis(redis_host, 6379)
      iter.map(bean => {
        // 先用账号去取guid
        var guidStr: String = null
        try {
          guidStr = jedis.get(bean.filledAccount)
        } catch {
          case exception: Exception =>
        }
        if (bean.filledAccount != null && guidStr != null) {
          bean.guid = guidStr.toLong
        }
        else {
          // 如果账号没取到，则用deviceid去取
          guidStr = jedis.get(bean.deviceId)
          if (guidStr != null) { // 用deviceid取到了
            bean.guid = guidStr.toLong
            // 将redis中的deviceid数据key 换成 account
            if (bean.filledAccount != null) {
              jedis.del(bean.deviceId)
              jedis.set(bean.filledAccount, guidStr)
            }
          } else { // 用deviceid也没取到
            // 用计数器获得一个新的guid
            val newGuid = jedis.incr("guid_cnt")

            // 并将结果插入redis
            val key = if (bean.filledAccount == null) bean.deviceId else bean.filledAccount;
            jedis.set(key, newGuid + "")

            // 并将guid设置到bean中
            bean.guid = newGuid

          }
        }

        // 在此处标记新老访客属性
        if(bean.guid>lastMaxGuid) bean.isnew = 1
        bean
      })
    })

    // 将结果写入hive
    res.createTempView("res")
    spark.sql(
      s"""
        |
        |insert into table dwd23.app_event_detail partition(dt='${log_curday}')
        |select
        | *
        |from res
        |
        |""".stripMargin)
    spark.close()

  }
}
