package com.zhao.biz.tag.perform

import com.typesafe.config.Config
import com.zhao.utils.{CommonUtil, JedisClusterUtils, TagUtils}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.{Admin, Connection, ConnectionFactory}
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, Row, SparkSession}
import redis.clients.jedis.JedisCluster

/**
 * Description: 数据标签化(统称上下文标签)<br/>
 * Copyright (c) ，2021 ， 赵 <br/>
 * A wet person does not fear the rain. <br/>
 * Date： 2021/1/14 16:17
 *
 * @author 柒柒
 * @version : 1.0
 */

object MakeDetailTags {

  def main(args: Array[String]): Unit = {

    //拦截非法的参数
//    if (args.length != 5){
//      println("argument is wrong!!!")
//      sys.exit()
//    }

    //获得参数
    val inputPathTmp = "a_data/outputpath"
    val outputPathTmp = "a_data/outputpath-tags"
    //下述app的字典文件中存储的是:使用爬虫技术从各个app注册平台爬取的app的信息(收集助手平台类的平台)
    val dicPathTmp = "a_data/app_dict.txt"
    //针对一些敏感的关键字,人为的需要过滤掉,通过编辑人员手动录入的
    val stopwordsTmp = "a_data/stopwords.txt"

    //从日志文件中筛选出当天的广告日志信息
    val dayTmp = "2018-10-01"

    val Array(inputPath,outputPath,dicPath,stopwords,day) = Array(inputPathTmp, outputPathTmp, dicPathTmp, stopwordsTmp, dayTmp)

    //1.SparkSession
    val spark: SparkSession = SparkSession.builder()
      .appName(this.getClass.getSimpleName)
      .master("local[*]")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    //2.准备Hbase
    //prepareHBaseTable(sc)

    //3.加载日志文件,封装为RDD,进行分析,RDD中每个元素的结果形如:userID,LC插屏:3,APP爱奇艺:1.....
    //前提:读取停用词,app字典中的数据,并封装到广告变量中
    //读取字典文件并广播(其中limit为非正整数表示模式被应用到尽可能多的次数,包括控制,比如-1)
    val dicMap: Map[String, String] = sc.textFile(dicPath).map(_.split("\t", -1)).filter(_.length >= 5)
      .map(arr => {
        (arr(4), arr(1))
      }).collect().toMap
    //广播变量
    val bdAppNameDic: Broadcast[Map[String, String]] = sc.broadcast(dicMap)

    //获取停用词库并广播
    val stopwordsDir: Map[String, Int] = sc.textFile(stopwords).map((_, 0)).collect.toMap
    val bdstopWordsDic: Broadcast[Map[String, Int]] = sc.broadcast(stopwordsDir)

    //分析广告日志文件
    //过滤需要的userID,因为userID很多,只需要过滤掉其中各一个不为空的标识符即可,就将最先找到的不为空
    //的标志作为UserID即可
    val filterAfterDF: Dataset[Row] = spark.read.parquet(inputPath)
      .filter(TagUtils.hasneedOneUserId)

    //分析过滤后的日志信息
    val tagAfterRDD: RDD[(String, List[(String, Int)])] = filterAfterDF.rdd.mapPartitions(part => {
      val jedisCluster: JedisCluster = JedisClusterUtils.getJedisCluster

      val itr: Iterator[(String, List[(String, Int)])] = part.map(row => {
        //处理一下userID,只拿到第一个不为空的userID作为这条数据的用户标识(userID)
        val userID: String = TagUtils.getAnyOneUserID(row)

        //根据每一条数据 打上对应的标签信息(7种标签)
        //开始打标签
        //广告标签和渠道
        val adTag: List[(String, Int)] = TagsAdAndPlatform.makeTags(row)

        //APP标签
        val appTag: List[(String, Int)] = TagsApp.makeTags(row, bdAppNameDic)

        //设备标签
        val deviceTag: List[(String, Int)] = TagsDevice.makeTags(row)

        //关键字标签
        val keywordTag: List[(String, Int)] = TagsKeyword.makeTags(row, bdstopWordsDic)

        //地域标签
        val tagsLocation: List[(String, Int)] = TagsLocation.makeTags(row)

        //商圈标签
        val tagsBusiness: List[(String, Int)] = TagsBusiness.makeTags(row, jedisCluster)

        (userID, adTag ++ appTag ++ deviceTag ++ keywordTag ++ tagsLocation ++ tagsBusiness)
      })

      //资源释放
      JedisClusterUtils.releaseResource(jedisCluster)

      //返回
      itr
    })

    //tagAfterRDD.foreach(println)

    //   (AODbebf579bc6718a4,List((LC09,1), (LN视频暂停悬浮,1), (CN100018,1), (APP爱奇艺,1), (D00010001,1), (D00020005,1), (D00030004,1), (华语剧场,1), (屌丝逆袭,1), (内地剧场,1), (网络剧,1), (ZP江西省,1), (ZC抚州市,1), (西三旗,1), (霍营,1), (回龙观,1)))
    // (AODbebf579bc6718a4,List((LC09,1), (LN视频暂停悬浮,1), (CN100018,1), (APP爱奇艺,1), (D00010001,1), (D00020005,1), (D00030004,1), (海外剧场,1), (三角恋,1), (偶像剧,1), (海外剧场,1), (青春剧,1), (言情剧,1), (ZP上海市,1), (ZC上海市,1), (东小口,1), (天通苑,1), (立水桥,1)))

    //上下文标签
    val nowPlatformResult: RDD[(String, List[(String, Int)])] = tagAfterRDD.reduceByKey((lst1, lst2) => {
      (lst1 ::: lst2).groupBy(_._1)
        .mapValues(_.foldLeft(0)(_ + _._2))
        .toList
    })

    nowPlatformResult.foreach(println)

    //nowPlatformResult.repartition(1).saveAsTextFile("a_data/outputpath-tags_local")

    //资源释放
    spark.stop()
  }

  /**
   * 准备HBASE表dmp_tags
   * @param sc
   */
  def prepareHBaseTable(sc: SparkContext) = {
    //配置HBASE的基本信息
    val load: Config = CommonUtil.load
    val hbaseTableName = load.getString("hbase.table.name")

    //配置HBASE的连接,下述的api涉及到Hbase和spark的整合
    val configuration: Configuration = sc.hadoopConfiguration
    configuration.set("hbase.zookeeper.quorum",load.getString("hbase.zookeeper.host"))

    val hbConn: Connection = ConnectionFactory.createConnection(configuration)
    //获得操作对象
    val hbadmin: Admin = hbConn.getAdmin

    //hbase中的表若是不存在,就创建
    if (!hbadmin.tableExists(TableName.valueOf(hbaseTableName))) {
      println("这个表不可用,现在要新建了!")
      //创建表对象
      val tableDescriptor: HTableDescriptor = new HTableDescriptor(TableName.valueOf(hbaseTableName))

      //创建一个列族
      val columnDescriptor: HColumnDescriptor = new HColumnDescriptor("tags")

      //将列族放入表中
      tableDescriptor.addFamily(columnDescriptor)

      hbadmin.createTable(tableDescriptor)
      hbadmin.close()
      hbConn.close()
    }

    //准备输出的对象
    //创建Job对象
    val jobConf: JobConf = new JobConf(configuration)
    //指定输出类型
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    //指定输出表
    jobConf.set(TableOutputFormat.OUTPUT_TABLE,hbaseTableName)
  }
}













