package com.headfirst.dmp.report.tags

import com.headfirst.dmp.utils.{RedisUtils, TagUtils}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import redis.clients.jedis.JedisCluster
/**
  * 读取parquet文件
  *
  * 对数据打标签
  *
  * 并将标签数据存储到HBASE中
  *
  */
object Tags4Context {

  def main(args: Array[String]): Unit = {

    //1.判断参数
    if (args.length != 5) {

      print(
        """
          |com.headfirst.dmp.report.tags.Tags4Context
          |需要参数：
          |       logInputPath
          |       resultOutputPath
          |       date                 日期，用做Hbase的列
          |       rulesFileInputPath   规则文件路径
          |       stopwordsInputPath   停用词库文件路径
        """.stripMargin)
      sys.exit(-1)
    }


    //2.接受参数
    val Array(logInputPath, resultOutputPath,date, rulesFileInputPath, stopwordsInputPath) = args



    //3.创建SparkSession对象
    val conf: SparkConf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName(s"${this.getClass.getSimpleName}")
    conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") //设置序列化方式采用KryoSerializer方式（默认的是java序列化）

    val session: SparkSession = SparkSession.builder().config(conf).getOrCreate()



    //4.读取规则文件并进行广播（app名称转换、停用词）
    val ruleFile: RDD[String] = session.sparkContext.textFile(rulesFileInputPath)
    val ruleMap: Map[String, String] = ruleFile.map(line => {
      val arr: Array[String] = line.split("\t", -1)
      (arr(4), arr(1))
    }).collect().toMap

    val stopwordFile: RDD[String] = session.sparkContext.textFile(stopwordsInputPath)
    val stopwordMap: RDD[(String, Int)] = stopwordFile.map((_, 0))

    val broadcastRule: Broadcast[Map[String, String]] = session.sparkContext.broadcast(ruleMap)
    val broadcastStopWord: Broadcast[RDD[(String, Int)]] = session.sparkContext.broadcast(stopwordMap)



    //5.读取parquet文件，并进行标签处理
    val logFile: DataFrame = session.read.parquet(logInputPath)

    import session.implicits._
    //先过滤掉无效数据（没有唯一标识字段数据的行），再进行标签化处理
    val rddData: RDD[(String, Map[String, Int])] = logFile.where(TagUtils.getAllUserId).mapPartitions(part => {

      val cluster: JedisCluster = RedisUtils.getRedisCluster()

      part.map(row => {
        //对行数据进行标签化处理
        val ads: Map[String, Int] = Tags4ADTag.makeTags(row)
        val apps: Map[String, Int] = Tags4APPTag.makeTags(row)
        val devices: Map[String, Int] = Tags4DevicesTag.makeTags(row)
        val kwywords: Map[String, Int] = Tags4KeywordsTag.makeTags(row, broadcastStopWord.value)
        val business: Map[String, Int] = Tags4BusinessTag.makeTags(row,cluster)

        //返回值，key为用户的唯一标识，value为标签
        (TagUtils.getAllUserId(row)(0), ads ++ apps ++ devices ++ kwywords ++ business )
      })
    }).rdd


    //对于一个用户多次出现的情况进行聚合
    val res: RDD[(String, Map[String, Int])] = rddData.reduceByKey((a, b) => {

      //可以看成一个典型的wordcount

      //将两次的map进行相加，然后进行分组操作
      //List(("K电视剧"，1)，("APP爱奇艺"，1)，("K电视剧"，1) => groupBy => Map["K电视剧"，List(("K电视剧",1),("K电视剧"，1))]
      val grouped: Map[String, Map[String, Int]] = (a ++ b).groupBy(_._1)

      /**
        * 方法一：
        */
      //对数据的value值进行操作，使用foldLeft, 从零开始规约累加
      // val resc: Map[String, Int] = grouped.mapValues(_.foldLeft(0)(_ + _._2))


      /**
        * 方法二：
        */
      //使用偏函数 (grouped的value是个list，List(("K电视剧",1),("K电视剧"，1))，取出第二个元素全部相加)
      val resc2: Map[String, Int] = grouped.map {
        case (k, sameTags) => (k, sameTags.map(_._2).sum)
      }

      //返回值
      //resc
      resc2
    })


    //保存输出
    //res.saveAsTextFile(resultOutputPath)

    /**
      * 将数据保存到HBASE中
      */
    //1.获取admin对象
    val load: Config = ConfigFactory.load()
    val configuration: Configuration = HBaseConfiguration.create()
    configuration.set("hbase.zookeeper.quorum",load.getString("hbase.zookeeper"))

    val conn: Connection = ConnectionFactory.createConnection(configuration)
    val hbAdmin: Admin = conn.getAdmin

    val tableName =load.getString("hbase.tableName")
    val colFamilyName = load.getString("hbase.colFamilyName")


    //2.判断表是否存在，不存在则创建
    if(!hbAdmin.tableExists(TableName.valueOf(tableName))){

      print(s"$tableName 表不存在....")
      print(s"$tableName 表正在创建....")

      //表结构描述对象
      val t_name: TableName = TableName.valueOf(tableName)   //表名
      val tdb: TableDescriptorBuilder = TableDescriptorBuilder.newBuilder(t_name) //表结构对象

      //定义列族
      val cf_name: ColumnFamilyDescriptor = ColumnFamilyDescriptorBuilder.of(colFamilyName)

      //添加列族
      tdb.setColumnFamily(cf_name)

      //创建表
      hbAdmin.createTable(tdb.build())

      //关闭连接
      hbAdmin.close()
      conn.close()

    }


    //3.将数据保存到base中

    configuration.set("hbase.mapred.outputtable",tableName)
    configuration.set("mapreduce.outputformat.class","org.apache.hadoop.hbase.mapreduce.TableOutputFormat")

    res.map{
      case (userId,userTags) => {
        val put = new Put(Bytes.toBytes(userId))
        val tags = userTags.map(t => t._1 + ":" + t._2).mkString(",")   //将list转化成字符串

        //添加列，并设置保存时间30天
        put.addColumn(Bytes.toBytes(colFamilyName),Bytes.toBytes(s"day$date"),Bytes.toBytes(tags)).setTTL(30*24*60*60)

        //返回值
        (new ImmutableBytesWritable(),put)
      }
    }.saveAsNewAPIHadoopDataset(configuration)


    //关流
    session.stop()

  }
}
