package cn.doitedu.dwh

import java.util.UUID

import ch.hsr.geohash.GeoHash
import org.apache.commons.io.IOUtils
import org.apache.commons.lang.StringUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}
import org.lionsoul.ip2region.{DataBlock, DbConfig, DbSearcher}
import redis.clients.jedis.Jedis

object ApplogOds2Dwd {
  def main(args: Array[String]): Unit = {

    if(args.length < 1) {
      System.err.println(
        """
          |  dismatched arguments!
          |  Usage:
          |    args(0):要计算的日志的日期
          |
          |""".stripMargin)
      sys.exit(1)
    }


    val logDt = args(0)



    // 构造sparksession
    val spark = SparkSession
      .builder()
      .appName("")
      //.master("local")
      .enableHiveSupport()
      //.config("spark.default.parallelism", 5) // 对rdd处理生效
      //.config("spark.sql.shuffle.partitions", "5") // 对sql生效
      .getOrCreate()
    import spark.implicits._

    val jds = new Jedis("hdp02", 6379)
    val pre_max = jds.get("max_guid").toLong


    // 读取ods中的app日志表  SerDe
    val odsTable = spark.read.table("ods.app_event_log").where(s" dt='${logDt}' ")

    // 清洗过滤  deviceid/properties/eventid/sessionid
    val filtered = odsTable.where(
      s"""
        |deviceid is not null and trim(deviceid)!=''
        |and  properties is not null
        |and eventid is not null and trim(eventid)!=''
        |and sessionid is not null and trim(sessionid)!=''
        |and to_date(from_unixtime(cast((timestamp + 8*60*60*1000)/1000 as bigint))) = '${logDt}'
        |""".stripMargin)

    // 数据规范化


    // session分割s
    val beanRdd: RDD[ApplogBean] = filtered.rdd.map(LogBeanUtils.appLogRow2Bean1(_))

    // 按原sessionid进行分组
    val sessionSplited: RDD[ApplogBean] = beanRdd.groupBy(bean => bean.sessionId).flatMap(tp => {
      // 对一组数据（同一个原始session）按照时间戳排序
      val sorted: List[ApplogBean] = tp._2.toList.sortBy(bean => bean.timeStamp)

      var tmpSession = UUID.randomUUID().toString

      for (i <- 0 until sorted.size) {
        sorted(i).newsession = tmpSession
        // 判断，下一条的时间戳是否比当前这条的时间戳相距超过30分钟
        if (i < sorted.size - 1 && sorted(i + 1).timeStamp - sorted(i).timeStamp > 30 * 60 * 1000) {
          tmpSession = UUID.randomUUID().toString
        }
      }
      sorted
    }
    )

    /* 统计切割的效果
    val l1 = beanRdd.groupBy(bean => bean.sessionId).map(tp => tp._1).count()
    val l2 = sessionSplited.groupBy(bean => bean.newsession).map(tp => tp._1).count()
    println(l1,l2)*/

    // 地理位置集成

    // 加载地理位置参考点数据，并收集到driver端的map对象中
    val geoReference: Dataset[Row] = spark.read.table("dim.area_dict_geo")
    val geoReferenceRdd = geoReference.rdd.map(row => {
      val province = row.getAs[String]("province")
      val city = row.getAs[String]("city")
      val region = row.getAs[String]("region")
      val geo = row.getAs[String]("geo")
      (geo, (province, city, region))
    })
    val geoReferenceMap = geoReferenceRdd.collectAsMap()

    // 将参考点数据map广播出去
    val bc_1 = spark.sparkContext.broadcast(geoReferenceMap)

    // 加载ip地理位置字典文件成为一个字节数组对象
    val fs: FileSystem = FileSystem.get(new Configuration())
    val path = new Path("/doit21_dict/ip2region.db")
    // 获取db文件的长度
    val dbFileLen = fs.listStatus(path)(0).getLen

    // 构造一个字节数组
    val buff = new Array[Byte](dbFileLen.toInt)

    // 打开db文件的输入流
    val in = fs.open(path)

    // 利用IO工具一次性将整个文件读入上面的字节数组
    IOUtils.readFully(in, buff)

    // 将buff广播出去
    val bc_2 = spark.sparkContext.broadcast(buff)


    // 开始地理位置数据集成
    val aread = sessionSplited.mapPartitions(iter => {
      // 做初始化工作
      // 从广播变量中取出地理位置参考点map
      val geoMap = bc_1.value
      val ipDbBytes = bc_2.value
      val searcher = new DbSearcher(new DbConfig(), ipDbBytes)

      // 然后处理迭代器中的每一条数据
      iter.map(bean => {
        var country: String = "中国"
        var province: String = ""
        var city: String = ""
        var region: String = ""

        // 然后，根据bean日志中的gps坐标，去geoMap中get省市区
        var matched = false
        try {
          val geoCode = GeoHash.geoHashStringWithCharacterPrecision(bean.latitude, bean.longitude, 5)
          val areaTuple = geoMap.getOrElse(geoCode, ("", "", ""))
          province = areaTuple._1
          city = areaTuple._2
          region = areaTuple._3

          // 如果用gps坐标没有查到，则用ip地址查一遍
          if (!"".equals(province)) {
            matched = true
          }
        } catch {
          case e: Exception => e.printStackTrace()
        }

        // 如果通过gps坐标匹配省市区失败，则用ip地址匹配一次
        if (!matched) {
          val block: DataBlock = searcher.memorySearch(bean.ip)
          // 中国|0|北京|北京|联通
          val arr = block.getRegion.split("\\|")
          if (arr.length == 5) {
            country = arr(0)
            province = arr(2)
            city = arr(3)
          }
        }
        // 将前面步骤取到的省市区信息，填入日志bean对象
        bean.country = country
        bean.province = province
        bean.city = city
        bean.region = region



        // 将account进行规范化
        if (StringUtils.isBlank(bean.account)) bean.account = null

        // 填充dt字段
        bean.dt = s"${logDt}"

        bean
      })
    })

    // 根据绑定评分表对日志中的匿名记录进行账号回补
    // 1，加载绑定评分表，并做处理（过滤掉account为null的数据，且一个设备只保留一个分数最高的账号）
    /*spark.sql(
      """
        |select
        |  deviceid,
        |  split(max(score || '_' || last_timestamp || '_' || account),'_')[2] as account
        |from device_account_bind_score
        |where dt='2021-06-05' and account is not null
        |group by deviceid
        |
        |""".stripMargin)*/

    val bindAccount = spark.sql(
      s"""
        |select
        | deviceid,account
        |from
        |(
        |   select
        |      deviceid,
        |      account,
        |      row_number() over(partition by deviceid order by score desc,last_timestamp desc)  as rn
        |   from dws.device_account_bind_score
        |   where dt='${logDt}' and account is not null
        |) o
        |where rn=1
        |
        |""".stripMargin)

    aread.toDF().drop("bindAccount").createTempView("aread")
    bindAccount.createTempView("bind")

    val binded = spark.sql(
      """
        |
        |select
        |
        |aread.*,
        |
        |nvl(aread.account,bind.account)  as bindAccount
        |
        |from
        |  aread
        |left join
        |  bind
        |on aread.deviceid=bind.deviceid
        |
        |""".stripMargin)


    // 去redis中查询用户对应的全局唯一标识 guid
    val bindedRdd = binded.rdd.map(row => LogBeanUtils.appLogRow2Bean2(row))

    val guidedRdd = bindedRdd.map(bean => {
      val jedis = new Jedis("hdp02", 6379)

      var guid: String = null
      // 用 bindaccount 去 redis 查询 guid
      if (StringUtils.isNotBlank(bean.bindAccount)) {
        guid = jedis.get(bean.bindAccount)
      }

      // 如果没查到，用deviceid去查
      if (StringUtils.isBlank(guid)) {
        guid = jedis.get(bean.deviceId)

        // 如果用deviceid查到了，且当前数据中拥有account，则需要更新redis中的 key为 account
        if (StringUtils.isNotBlank(guid) && StringUtils.isNotBlank(bean.bindAccount)) {
          jedis.del(bean.deviceId)
          jedis.set(bean.bindAccount, guid)
        }

        // 如果还没查到，往redis中插入一条新的纪录
        if (StringUtils.isBlank(guid)) {
          // 此处利用了一个记录最大guid的redis变量
          guid = jedis.incr("max_guid") + ""
          jedis.set(if (StringUtils.isNotBlank(bean.bindAccount)) bean.bindAccount else bean.deviceId, guid)
        }
      }

      bean.guid = guid.toLong

      bean
    })


    // 新老访客标记
    // 核心逻辑是： 日志数据中的guid如果比前一天的redis中最大guid还大，那就是个新访客
    val isnewed = guidedRdd.map(bean => {
      if (bean.guid > pre_max) bean.isnew = 1
      bean
    })


    // 输出结果到dwd层
    isnewed.toDF().write.mode(SaveMode.Append).partitionBy("dt").saveAsTable("dwd.app_event_detail")


    spark.close()
  }

}
