package com.yanduo.tags

import com.typesafe.config.ConfigFactory
import com.yanduo.utils.{JedisPools, TagsUtils}
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.{ConnectionFactory, Put}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
import redis.clients.jedis.{Jedis, JedisPool}

/**
  * 用户标签实现-step1- step4
  *https://www.bilibili.com/video/BV1F4411i7jK?p=29
  *
  * @author Gerry chan
  * 2020/5/5 10:43
  * @version 1.0
  */
object Tags4Ctx {

  def main(args: Array[String]): Unit = {
    if (args.length != 3) {
      println(
        """
          |com.yanduo.tags.Tags4Ctx
          |参数：
          |  输入路径
          |  字典文件路径
          |  停用字典
          |  日期
          |  输出路径
        """.stripMargin)
      sys.exit()
    }
    val Array(inputPath,dictFilePath,stopWordsPath, day, outputPath) = args

    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")

    //RDD序列化方式，worker 与worker之间的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(sparkConf)
    val spark = new SparkSession(sc)

    //字典文件--appMapping, 注意要：collect 动作
    val dictMap: Map[String, String] = sc.textFile(dictFilePath).map(line => {
      val fields = line.split("\t", -1)
      (fields(4), fields(1))
    }).collect().toMap

    //停用词, 读取的文件是一个停用词一行，为了转换成map, 搞成格式（停用词1,0,这里的0没有实际意义，只是为了方便转换为map,
    // 注意要：collect 动作
    val stopWordsMap: Map[String, Int] = sc.textFile(stopWordsPath).map((_,0)).collect().toMap


    // 将字典数据广播executor
    val broadcastAppDict = sc.broadcast(dictMap)
    val broadcastStopWordsDict = sc.broadcast(stopWordsMap)

    //获取配置
    val load = ConfigFactory.load()
    val hbTableName = load.getString("hbase.table.name")

    //判断hbase中表是否存储在，如果不存在则创建
    val configuration = sc.hadoopConfiguration
    configuration.set("hbase.zookeeper.quorum", load.getString("hbase.zookeeper.host"))

    val hbConn = ConnectionFactory.createConnection(configuration)
    val hbAdmin = hbConn.getAdmin

    if (hbAdmin.tableExists(TableName.valueOf(hbTableName))) {
      println(s"$hbTableName 不存在....")
      println(s"$hbTableName 创建中....")
      val tableDescriptor = new HTableDescriptor(TableName.valueOf(hbTableName))
      //添加列族
      val columnDescriptor = new HColumnDescriptor("cf")
      tableDescriptor.addFamily(columnDescriptor)
      hbAdmin.createTable(tableDescriptor)
      //释放连接
      hbAdmin.close()
      hbConn.close()

    }
    val jobConf = new JobConf(configuration)
    //设置输出到表
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    //指定表
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, hbTableName)

    //读取日志parpuet文件,过滤到为空的, 最后处理结果保存到hbase
    //如果有连接要释放 注意用mapPartitions，foreach ,不要用map
    import org.apache.spark.sql.functions._
    spark.read.parquet(inputPath).where(TagsUtils.hasSomeUserIdCondition).mapPartitions(par => {
      val jedis = JedisPools.getJedis
      val listBuffer = new collection.mutable.ListBuffer[(String, List[(String, Int)])]()
      // 注意要用foreach
      par.foreach(row => {
        //行数据进行标签化处理
        val adsTags = Tags4Ads.makeTags(row)
        val appTags = Tags4App.makeTags(row, broadcastAppDict.value)
        val deviceTags = Tags4Devices.makeTags(row)
        val keywordsTags = Tags4KeyWords.makeTags(row, broadcastStopWordsDict.value)
        //商圈标签
        val business = Tags4Business.makeTags(row, jedis)

        //以用户作为key
        val allUserId = TagsUtils.getAllUserId(row)

        //2020/05/30, 从第二个元素截取
        //(mac,0),(idfa,0)
        val otherUserId = allUserId.slice(1, allUserId.length).map(uId => (uId, 0)).toMap
        //选取第一个用户Id, 其他id放到List中，方便后面用graphx进行用户标签合并处理
        listBuffer.append((allUserId(0), (adsTags++appTags++deviceTags++keywordsTags ++ otherUserId).toList))
        listBuffer
      })
      jedis.close()
      listBuffer.iterator

    }).rdd.reduceByKey((a,b) => {
      // 难点:聚合后可能还具有相同的标签,需要在此处理
      // List(("K电视据",1),("App爱奇艺",1),("K电视剧",1))  => groupBy变换后的结果 =>Map("K电视剧"-> List(("K电视据",1),("K电视剧",1)))
      //知识点:foldLeft,参考：https://blog.csdn.net/qq_44884269/article/details/89520994
      (a++b).groupBy(_._1).mapValues(_.foldLeft(0)(_ + _._2)).toList
//      val map: Map[String, Any] = (a ++ b).groupBy(_._1).map {
//        case (k, someTags) => (k, sameTags.map(_._2).sum)
//      }
    }).map{
      case (userId, userTags) => {
        val put = new Put(Bytes.toBytes(userId))
        val tags = userTags.map(t => t._1+":"+t._2).mkString(",")
        put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes(s"day$day"), Bytes.toBytes(tags))
        (new ImmutableBytesWritable(), put)
      }
        //保存到hbase, 使用scan "表名" 可在hbase 中查询数据
    }.saveAsHadoopDataset(jobConf)

    sc.stop()
    spark.stop()
  }
}
