package com.spark.prepareJob.personal

import java.util

import com.spark.beans.Logs
import com.spark.prepareJob.tags._
import com.spark.utils.HbaseUtils
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

import scala.collection.mutable
/**
  * 用户标签抽取：
  *  广告位标签
  *  App名称标签
  *  渠道标签
  *  设备标签
  *  关键字标签
  *  地域标签
  *      最后汇总为上下文标签
  *
  */
object TagsExtractJob {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.spark-project").setLevel(Level.WARN)

    val conf = new SparkConf().setAppName("TagsExtractJob")
      .setMaster("local[*]")

    val sparkSession = SparkSession.builder().config(conf).getOrCreate()
    //封装广播变量
    val deviceInfo = sparkSession.sparkContext.textFile("file:///E:/scala/projectdmp/data/Device-type-mapping.txt").collect().map(line =>{
      val fields = line.split("\\s+")
      (fields(0).trim().toLowerCase.toInt,fields(1).trim)
    }).toMap

    println(deviceInfo)

    val ispInfo = sparkSession.sparkContext.textFile("file:///E:/scala/projectdmp/data/isp-mapping.txt").collect().map(line => {
      val fields = line.split("\\s+")
      (fields(0).trim().toLowerCase, fields(1).trim)
  }).toMap

    println(ispInfo)

    val networkInfo = sparkSession.sparkContext.textFile("file:///E:/scala/projectdmp/data/network-mapping.txt").collect().map(line =>{
      val fields = line.split("\\s+")
      (fields(0).trim().toLowerCase,fields(1).trim)
    }).toMap

    println(networkInfo)

    val deviceBC = sparkSession.sparkContext.broadcast(deviceInfo)
    val ispBC = sparkSession.sparkContext.broadcast(ispInfo)
    val networkBC = sparkSession.sparkContext.broadcast(networkInfo)

    //通过sparkContext对象读取数据
    import sparkSession.implicits._
    val adDataSet = sparkSession.read.load("file:///e:/out/standard/ad/").as[Logs]
    println("--------原始数据:----------")
    adDataSet.printSchema()
    adDataSet.show()
    println("--------提取标签:----------")
    val userTagRDD = adDataSet.map {
      logs => {
        //取广播变量值
        val deviceinfo = deviceBC.value
        val ispinfo = ispBC.value
        val networkinfo = networkBC.value
        //获取用户的唯一不为空的id
        val userId = getNotEmptyId(logs).get
        //广告位标签
        val adPostionTag = AdPositionTags.extractTags(logs)
        //App名称标签
        val appNameTag = AppNameTags.extractTags(logs)
        //渠道标签
        val channelIdTags = ChannelTags.extractTags(logs)
        //设备标签
        val deviceTags = DeviceTags.extractTags(logs, deviceinfo, ispinfo, networkinfo)
        //关键词标签
        val keywordTags = KeywordTags.extractTags(logs)
        //地域标签
        val areaTags = AreaTags.extractTags(logs)
        val tags = adPostionTag ++ appNameTag ++ channelIdTags ++ deviceTags ++ keywordTags ++ areaTags
        (userId, tags)
      }
    }.rdd
    //相同key做聚合
    val reduceByKeyRDD = userTagRDD.reduceByKey {
      case (map1, map2) => {
        val map = new mutable.HashMap[String, Int]()
        for ((k, v) <- map1) {
          map.put(k, map.getOrElse(k, 0) + v)
        }
        for ((k, v) <- map2) {
          map.put(k, map.getOrElse(k, 0) + v)
        }
        map.toMap
      }
    }
    //reduceByKeyRDD.foreach(println)
    reduceByKeyRDD.foreachPartition( partition =>{
      if(!partition.isEmpty){
        val connection = HbaseUtils.getConnection()
        val table = connection.getTable(TableName.valueOf("bd_1808:dmp_tags"))
        partition.foreach{
          case(userid,tags) =>{
            val puts = new util.ArrayList[Put]()
            tags.foreach{
              case(key,value) =>{
                val put = new Put(userid.getBytes())
                put.addColumn("cf".getBytes(),key.getBytes(),(value+"").getBytes())
                puts.add(put)
              }
            }
            table.put(puts)
          }
        }
        table.close()
        HbaseUtils.returnConnection(connection)
      }
    })

    sparkSession.stop()
  }

  def getNotEmptyId(log: Logs):Option[String]={
    //模式匹配。获取用户唯一不为空的id
    log match{
      case v if v.userid.nonEmpty =>Some("userId:"+v.userid.toUpperCase())
      case v if v.imei.nonEmpty => Some("IMEI:" + v.imei.replaceAll(":|-\\", "").toUpperCase)
      case v if v.imeimd5.nonEmpty => Some("IMEIMD5:" + v.imeimd5.toUpperCase)
      case v if v.imeisha1.nonEmpty => Some("IMEISHA1:" + v.imeisha1.toUpperCase)

      case v if v.androidid.nonEmpty => Some("ANDROIDID:" + v.androidid.toUpperCase)
      case v if v.androididmd5.nonEmpty => Some("ANDROIDIDMD5:" + v.androididmd5.toUpperCase)
      case v if v.androididsha1.nonEmpty => Some("ANDROIDIDSHA1:" + v.androididsha1.toUpperCase)

      case v if v.mac.nonEmpty => Some("MAC:" + v.mac.replaceAll(":|-", "").toUpperCase)
      case v if v.macmd5.nonEmpty => Some("MACMD5:" + v.macmd5.toUpperCase)
      case v if v.macsha1.nonEmpty => Some("MACSHA1:" + v.macsha1.toUpperCase)

      case v if v.idfa.nonEmpty => Some("IDFA:" + v.idfa.replaceAll(":|-", "").toUpperCase)
      case v if v.idfamd5.nonEmpty => Some("IDFAMD5:" + v.idfamd5.toUpperCase)
      case v if v.idfasha1.nonEmpty => Some("IDFASHA1:" + v.idfasha1.toUpperCase)

      case v if v.openudid.nonEmpty => Some("OPENUDID:" + v.openudid.toUpperCase)
      case v if v.openudidmd5.nonEmpty => Some("OPENDUIDMD5:" + v.openudidmd5.toUpperCase)
      case v if v.openudidsha1.nonEmpty => Some("OPENUDIDSHA1:" + v.openudidsha1.toUpperCase)
      case _ => None
    }
  }
}
