package cn.doitedu.dwetl

import java.text.SimpleDateFormat
import java.util.UUID

import ch.hsr.geohash.GeoHash
import cn.doitedu.dwetl.beans.AppLogBean
import cn.doitedu.dwetl.utils.Row2AppLogBean
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, FileStatus, FileSystem, Path}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkFiles
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.lionsoul.ip2region.{DbConfig, DbSearcher}

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-01-14
 * @desc ods层app端行为日志数据，处理为dwd明细表
 *  修订记录1：新老访客通过deviceid和account判断，修订为直接根据guid判断
 *
 *
 *   目标表建表语句
 CREATE TABLE dwd.event_app_detail (
    account         String                ,
    appid           String                ,
    appversion      String                ,
    carrier         String                ,
    deviceid        String                ,
    devicetype      String                ,
    eventid         String                ,
    ip              String                ,
    latitude        Double                ,
    longitude       Double                ,
    nettype         String                ,
    osname          String                ,
    osversion       String                ,
    properties      Map<String,String>    ,
    releasechannel  String                ,
    resolution      String                ,
    sessionid       String                ,
    `timestamp`     BIGINT                ,
    newsessionid    String                ,
    country         String                ,
    province        String                ,
    city            String                ,
    region          String                ,
    guid            String                ,
    isnew           String
  )
  PARTITIONED BY (dt string)
  STORED AS parquet
  TBLPROPERTIES("parquet.compress"="snappy")
  ;

 *
 */
object EventAppLog2DwdTable {

  def main(args: Array[String]): Unit = {

    if(args.size<3){
      println(
        """
          |
          |wrong number of parameters
          |usage:
          | args(0) :  T-1日
          | args(1) :  T日
          | args(2) :  T+1日
          |
          |""".stripMargin)
    }

    val DT_PRE = args(0)
    val DT_CUR = args(1)
    val DT_NEXT = args(2)



    //Logger.getLogger("org").setLevel(Level.FATAL)

    val spark = SparkSession.builder()
      .appName("ods层app端行为日志数据，处理为dwd明细表")
      //.master("local[*]")
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._

    /**
     * 加载各种字典数据，并广播
     */
    // 1.geohash字典
    val geodf: Dataset[Row] = spark.read.parquet("/dicts/geodict")
    val geomap = geodf.rdd.map(row => {
      val geohash: String = row.getAs[String]("geohash")
      val province: String = row.getAs[String]("province")
      val city: String = row.getAs[String]("city")
      val region: String = row.getAs[String]("region")
      (geohash, (province, city, region))
    }).collectAsMap()
    val bc1 = spark.sparkContext.broadcast(geomap)


    // 2.ip2region.db字典
    /*

        // 添加缓存文件
        spark.sparkContext.addFile("/dicts/ip2region/ip2region.db")
        // 在算子中使用缓存文件
        geodf.rdd.map(row=>{
          val path = SparkFiles.get("ip2region.db")
          new DbSearcher(new DbConfig(),path)

        })
    */

    // 自己读文件，存入一个字节数组，并广播
    val fs = FileSystem.get(new Configuration())
    val path = new Path("/dicts/ip2region/ip2region.db")
    // 获取文件的长度（字节数）
    val statuses: Array[FileStatus] = fs.listStatus(path)
    val len = statuses(0).getLen


    // 将字典文件，以字节形式读取并缓存到一个字节buffer中
    val in: FSDataInputStream = fs.open(path)
    val buffer = new Array[Byte](len.toInt)
    in.readFully(0, buffer)

    val bc2 = spark.sparkContext.broadcast(buffer)


    // 3.设备账号关联评分字典
    // val relation = spark.read.table("dwd.device_account_relation").where("dt='2021-01-10'")
    //d01,c01,1000
    //d01,c02,800
    // 上面的数据，需要加工成：  d01,c01  加工逻辑：求分组top1
    val relation = spark.sql(
      s"""
        |
        |select
        |    deviceid,
        |    account
        |   from
        |      (
        |         select
        |          deviceid,
        |          account,
        |          row_number() over(partition by deviceid order by score desc,last_time desc) as rn
        |         from dwd.device_account_relation
        |         where dt='${DT_CUR}'
        |      ) o
        |where rn=1
        |
        |""".stripMargin)

    val relationMap = relation.rdd.map(row => {
      val deviceid = row.getAs[String]("deviceid")
      val account = row.getAs[String]("account")
      (deviceid, account)
    }).collectAsMap()
    val bc3 = spark.sparkContext.broadcast(relationMap)


    // 3.历史设备、账号标识（用户判断新老访客）
    val ids = spark.read.table("dwd.device_account_relation")
      .where(s"dt='${DT_PRE}' ")
      .selectExpr("explode (array(deviceid,account)) as id")
      .map(row=>row.getAs[String]("id")).collect().toSet
    val bc4 = spark.sparkContext.broadcast(ids)

    // 4.全局guid表(from整个表）
    val guidList: Array[String] = spark.read.table("dwd.user_guid_global")
      .select("guid")
      .rdd.map(_.getAs[String]("guid"))
      .collect()
    val bc5 = spark.sparkContext.broadcast(guidList.toSet)



    /**
     * 加载T日的ODS日志表数据
     */
    val ods = spark.read.table("ods.event_app_log").where(s"dt='${DT_CUR}'")

    val beanRdd = ods.rdd.map(row => {
      Row2AppLogBean.row2AppLogBean(row)
    })


    /**
     * 根据规则清洗过滤
     */
    val filtered: RDD[AppLogBean] = beanRdd.filter(bean => {
      var flag = true
      // deviceid/properties/eventid/sessionid
      if (!StringUtils.isNotBlank(bean.deviceid) && bean.properties != null && StringUtils.isNotBlank(bean.eventid) && StringUtils.isNotBlank(bean.sessionid)) flag = false

      // 判断数据的时间是否正确
      val sdf = new SimpleDateFormat("yyyy-MM-dd")
      val validStart = sdf.parse(s"${DT_CUR}").getTime
      val validEnd = sdf.parse(s"${DT_NEXT}").getTime
      if (bean.timestamp < validStart || bean.timestamp >= validEnd) flag = false

      flag
    })


    /**
     * session分割，添加新的newsessionid字段
     */
    val sessionSplitted: RDD[AppLogBean] = filtered.groupBy(bean => bean.sessionid).flatMapValues(iter => {

      val sortedEvents = iter.toList.sortBy(bean => bean.timestamp)
      var tmpSessionId = UUID.randomUUID().toString
      for (i <- 0 until sortedEvents.size) {
        sortedEvents(i).newsessionid = tmpSessionId
        if (i < sortedEvents.size - 1 && sortedEvents(i + 1).timestamp - sortedEvents(i).timestamp > 30 * 60 * 1000) tmpSessionId = UUID.randomUUID().toString
      }

      sortedEvents
    }).map(_._2)

    //sessionSplitted.toDF.show(100,false)

    // 验证切割效果
    /*sessionSplitted.toDF.createTempView("tmp")
    spark.sql(
      """
        |
        |select
        |sessionid,count(distinct newsessionid) as cnt
        |from tmp
        |group by sessionid
        |having count(distinct newsessionid) >1
        |
        |""".stripMargin).show(100,false)*/


    /**
     * 集成数据（地理位置）
     */
    val aread: RDD[AppLogBean] = sessionSplitted.mapPartitions(iter => {

      val geoDict: collection.Map[String, (String, String, String)] = bc1.value
      val ip2RegionDb: Array[Byte] = bc2.value

      val searcher = new DbSearcher(new DbConfig(), ip2RegionDb)

      iter.map(bean => {

        // 定义临时记录变量
        var country: String = "UNKNOWN"
        var province: String = "UNKNOWN"
        var city: String = "UNKNOWN"
        var region: String = "UNKNOWN"

        // 查询GEO字典获取省市区信息
        try {
          val lat = bean.latitude
          val lng = bean.longitude
          val geo = GeoHash.geoHashStringWithCharacterPrecision(lat, lng, 5)
          val area = geoDict.getOrElse(geo, ("UNKNOWN", "UNKNOWN", "UNKNOWN"))
          country = "CN"
          province = area._1
          city = area._2
          region = area._3
        } catch {
          case e: Exception => e.printStackTrace()
        }

        // 如果在geo字典中查询失败，则用ip地址再查询一次
        if ("UNKNOWN".equals(province)) {
          val block = searcher.memorySearch(bean.ip)

          // 中国|0|上海|上海市|电信
          try {
            val split = block.getRegion.split("\\|")
            country = split(0)
            province = split(2)
            city = split(3)
          } catch {
            case e: Exception => e.printStackTrace()
          }
        }

        bean.country = country
        bean.province = province
        bean.city = city
        bean.region = region

        bean
      })
    })

    /**
     * guid绑定生成
     */
    val guided: RDD[AppLogBean] = aread.mapPartitions(iter => {

      val deviceBindAccountDict = bc3.value

      // guid绑定
      iter.map(bean => {
        var guid: String = null

        // 如果该条数据中，有登录账号，则直接用该登录账号作为这条数据的全局用户标识
        if (StringUtils.isNotBlank(bean.account)) {
          guid = bean.account
        }
        // 如果该条数据中，没有登录账号，则用设备id去关联账号表中查找默认的账号，作为guid
        else {
          val findedAccount = deviceBindAccountDict.getOrElse(bean.deviceid, null)
          // 如果查询到的结果为不为null，则用查询到的account作为guid，否则用deviceid作为guid
          if(findedAccount != null) guid = findedAccount else guid=bean.deviceid
        }

        bean.guid = guid

        bean
      })

    })

    /**
     * 新老访客，通过deviceid及account来判断
     * 本处逻辑有不严谨之处
     * @deprecated
     * 换成下面的逻辑：根据全局guid表，对比guid来判断
     */
    /*val result = guided.mapPartitions(iter=>{
      val idSet = bc4.value
      iter.map(bean=>{
        var isnew = "1"
        if(idSet.contains(bean.deviceid) || idSet.contains(bean.account)) isnew = "0"
        bean.isnew = isnew
        bean
      })
    }).toDF()*/

    /**
     * 根据全局guid表来判断新老访客
     */
    val result = guided.mapPartitions(iter=>{
      val guidList = bc5.value
      iter.map(bean=>{
        var isnew = "1"
        if(guidList.contains(bean.guid)) isnew = "0"
        bean.isnew = isnew
        bean
      })
    }).toDF()
    result.cache()


    /**
     * 保存明细处理结果到目标表
     */
    result.createTempView("result")
    spark.sql(
      s"""
        |
        |insert into table dwd.event_app_detail partition(dt='${DT_CUR}')
        |select
        |account                          ,
        |nvl(appid      ,'UNKNOWN')       ,
        |nvl(appversion ,'UNKNOWN')       ,
        |nvl(carrier    ,'UNKNOWN')       ,
        |deviceid                         ,
        |nvl(devicetype,'UNKNOWN')        ,
        |eventid                          ,
        |ip                               ,
        |latitude                         ,
        |longitude                        ,
        |nvl(nettype   ,'UNKNOWN')        ,
        |nvl(osname    ,'UNKNOWN')        ,
        |nvl(osversion ,'UNKNOWN')        ,
        |properties                       ,
        |nvl(releasechannel,'UNKNOWN')    ,
        |nvl(resolution    ,'UNKNOWN')    ,
        |sessionid                        ,
        |timestamp                        ,
        |newsessionid                     ,
        |nvl(country    ,'UNKNOWN')       ,
        |nvl(province   ,'UNKNOWN')       ,
        |nvl(city       ,'UNKNOWN')       ,
        |nvl(region     ,'UNKNOWN')       ,
        |guid                             ,
        |isnew
        |
        |from result
        |
        |""".stripMargin)

    /**
     * 将新访客GUID插入全局guid表
     */

    spark.sql(
      s"""
        |with tmp as (
        |select
        |  guid
        |from result
        |where isnew='1'
        |group by guid
        |)
        |
        |insert into table dwd.user_guid_global partition (dt='${DT_CUR}')
        |select
        |row_number() over(order by guid) + (select nvl(max(id),0) from dwd.user_guid_global) as id,
        |guid
        |from tmp
        |
        |""".stripMargin)

    spark.close()
  }

}
