package com.yanduo.tags

import com.typesafe.config.ConfigFactory
import com.yanduo.utils.{JedisPools, TagsUtils}
import org.apache.hadoop.hbase.client.{ConnectionFactory, Put}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.graphx.{Edge, Graph}
import org.apache.spark.sql.{Dataset, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ListBuffer

/**
  * 利用Graphx 实现上下文标签合并
  * 同时聚合标签
  *https://www.bilibili.com/video/BV1F4411i7jK?p=43
  *
  * @author Gerry chan
  * 2020/5/31 10:43
  * @version 1.0
  */
object Tags4CtxV2 {

  def main(args: Array[String]): Unit = {
    if (args.length != 3) {
      println(
        """
          |com.yanduo.tags.Tags4Ctx
          |参数：
          |  输入路径
          |  字典文件路径
          |  停用字典
          |  日期
          |  输出路径
        """.stripMargin)
      sys.exit()
    }
    val Array(inputPath,dictFilePath,stopWordsPath, day, outputPath) = args

    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")

    //RDD序列化方式，worker 与worker之间的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(sparkConf)
    val spark = new SparkSession(sc)

    //字典文件--appMapping, 注意要：collect 动作
    val dictMap: Map[String, String] = sc.textFile(dictFilePath).map(line => {
      val fields = line.split("\t", -1)
      (fields(4), fields(1))
    }).collect().toMap

    //停用词, 读取的文件是一个停用词一行，为了转换成map, 搞成格式（停用词1,0,这里的0没有实际意义，只是为了方便转换为map,
    // 注意要：collect 动作
    val stopWordsMap: Map[String, Int] = sc.textFile(stopWordsPath).map((_,0)).collect().toMap


    // 将字典数据广播executor
    val broadcastAppDict = sc.broadcast(dictMap)
    val broadcastStopWordsDict = sc.broadcast(stopWordsMap)

    //获取配置
    val load = ConfigFactory.load()
    val hbTableName = load.getString("hbase.table.name")

    //判断hbase中表是否存储在，如果不存在则创建
    val configuration = sc.hadoopConfiguration
    configuration.set("hbase.zookeeper.quorum", load.getString("hbase.zookeeper.host"))

    val hbConn = ConnectionFactory.createConnection(configuration)
    val hbAdmin = hbConn.getAdmin

    if (hbAdmin.tableExists(TableName.valueOf(hbTableName))) {
      println(s"$hbTableName 不存在....")
      println(s"$hbTableName 创建中....")
      val tableDescriptor = new HTableDescriptor(TableName.valueOf(hbTableName))
      //添加列族
      val columnDescriptor = new HColumnDescriptor("cf")
      tableDescriptor.addFamily(columnDescriptor)
      hbAdmin.createTable(tableDescriptor)
      //释放连接
      hbAdmin.close()
      hbConn.close()

    }
    val jobConf = new JobConf(configuration)
    //设置输出到表
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    //指定表
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, hbTableName)

    val baseData = spark.read.parquet(inputPath).where(TagsUtils.hasSomeUserIdCondition)

    //读取日志parpuet文件,过滤到为空的, 最后处理结果保存到hbase
    //如果有连接要释放 注意用mapPartitions，foreach ,不要用map
    val uv: Dataset[(Long, (ListBuffer[String], List[(String, Int)]))] = baseData.mapPartitions(par => {
      val jedis = JedisPools.getJedis

      //val listBuffer = new collection.mutable.ListBuffer[(String, List[(String, Int)])]()
      val listBuffer = new collection.mutable.ListBuffer[(Long, (ListBuffer[String], List[(String, Int)]))]

      // 注意要用foreach
      par.foreach(row => {
        //行数据进行标签化处理
        val adsTags = Tags4Ads.makeTags(row)
        val appTags = Tags4App.makeTags(row, broadcastAppDict.value)
        val deviceTags = Tags4Devices.makeTags(row)
        val keywordsTags = Tags4KeyWords.makeTags(row, broadcastStopWordsDict.value)
        //商圈标签
        val business = Tags4Business.makeTags(row, jedis)

        //以用户作为key
        val allUserId = TagsUtils.getAllUserId(row)

        //2020/05/30, 从第二个元素截取
        //(mac,0),(idfa,0)
        val otherUserId = allUserId.slice(1, allUserId.length).map(uId => (uId, 0)).toMap

        val tags = (adsTags ++ appTags ++ deviceTags ++ keywordsTags).toList

        //选取第一个用户Id, 其他id放到List中，方便后面用graphx进行用户标签合并处理
        //构建点 (uid,(uidList,utagsList))
        listBuffer.append((allUserId(0).hashCode.toLong, (allUserId, tags)))

        //listBuffer.append((allUserId(0), (adsTags++appTags++deviceTags++keywordsTags ++ otherUserId).toList))
        listBuffer
      })
      jedis.close()
      listBuffer.iterator

    })

    //构建边集合
    val ue: Dataset[Edge[Int]] = baseData.flatMap(row => {
      val allUserId = TagsUtils.getAllUserId(row)
      allUserId.map(uId => Edge(allUserId(0).hashCode.toLong, uId.hashCode.toLong, 0))
    })

    //构建图
    val graph = Graph(uv.rdd, ue.rdd)
    val cc = graph.connectedComponents().vertices

    cc.join(uv.rdd).map{
      case (xxId,(cmdId, (uids,tags))) => (cmdId, (uids, tags))
    }.reduceByKey {
      case (a, b) => {
        //一对用户ID
        val uIds = a._1 ++ b._1
        val tags: List[(String, Int)] = (a._2 ++b._2).groupBy(_._1).mapValues(_.foldLeft(0)(_ +_._2)).toList
        //用户id去重
        (uIds.distinct, tags)
      }
    }.saveAsTextFile(outputPath)

    sc.stop()
    spark.stop()
  }
}
