package com.rz.spark.tags

import com.rz.spark.utils.TagsUtils
import com.typesafe.config.ConfigFactory
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.{ConnectionFactory, Put}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

object Tags4Ctx {
  def main(args: Array[String]): Unit = {
    if (args.length != 5) {
      println(
        """
          |com.rz.spark.tags.Tags4Ctx
          |参数：
          | 输入路径
          | 字典文件路径
          | 日期
          | 停用词库
          | 输出路径
        """.stripMargin)
    }


    val Array(inputPath, dictFilePath, stopWordsFilePath,day, outputPath) = args

    // 2 创建sparkConf-》sparkContext
    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")
    // RDD 序列化到磁盘 worker与worker之间的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(sparkConf)
    val sQLContext = new SQLContext(sc)

    // 字典文件 --appMaping
    // 如果不收集数据的话，只有部分数据，我们广播变量是要全部完整的数据
    val dictMap = sc.textFile(dictFilePath).map(line => {
      val fields = line.split("\t", -1)
      (fields(4), fields(1))
    }).collect().toMap

    // 字典文件 --stopWords
    val stopWordsMap = sc.textFile(dictFilePath).map((_,0)).collect().toMap

    // 将字典数据广播executor
    val broadcastAppDict = sc.broadcast(dictMap)
    val broadcastStopWordsDict = sc.broadcast(stopWordsMap)

    val load = ConfigFactory.load()
    val hbTableName = load.getString("hbase.table.name")

    // 判断hbase中的表是否存在，如果不存在则创建
    val configuration = sc.hadoopConfiguration
    configuration.set("hbase.zookerper.quorum",load.getString("hbase.zookeeper.host"))
    val hbConn = ConnectionFactory.createConnection(configuration)
    val hbAdmin = hbConn.getAdmin

    if (!hbAdmin.tableExists(TableName.valueOf(hbTableName))){
      println(s"$hbTableName 不存在......")
      println(s"正在创建 $hbTableName ......")

      val tableDescriptor = new HTableDescriptor(TableName.valueOf(hbTableName))
      val columnDescriptor = new HColumnDescriptor(s"day$day")
      tableDescriptor.addFamily(columnDescriptor)
      hbAdmin.createTable(tableDescriptor)

      // 释放连接
      hbAdmin.close()
      hbConn.close()

    }

    // 指定key的输出类型
    val jobConf = new JobConf(configuration)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    // 指定表的名称
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, hbTableName)



    // 读取数据
    sQLContext.read.parquet(inputPath).where(TagsUtils.hasSomeUserIdCondition).map(row => {
      // 行数据进行标签化处理
      val ads = Tags4Ads.makeTags(row)
      val apps = Tags4App.makeTags(row, broadcastAppDict.value)
      val devices = Tags4Devices.makeTags(row)
      val keywords = Tags4KeyWords(row, broadcastStopWordsDict)
      val allUserId = TagsUtils.getAllUserId(row)
      (allUserId(0), (ads ++ apps ++ devices ++ keywords).toList)
    }).reduceByKey((lista, listb) => {
      // [(String, List[(String, Int)])]
      //  Map[String, List[(String, Int)]]
      //(lista ++ listb).groupBy(_._1).mapValues(_.foldLeft(0)(_+_._2)).toList

      (lista ++ listb).groupBy(_._1).map{
        case (k, somTags) =>(k,somTags.map(_._2).sum)

      }.toList
    }).map{
      case (userId, userTags) =>{
        val put = new Put(Bytes.toBytes(userId))
        val tags: String = userTags.map(t=>t._1+":"+t._2).mkString(",")
        put.addColumn(Bytes.toBytes("cf"),Bytes.toBytes(s"day$day"),Bytes.toBytes(tags))

        (new ImmutableBytesWritable(), put) // ImmutableBytesWritable =>rowkey

      }
    }.saveAsHadoopDataset(jobConf)

      //.saveAsTextFile(outputPath)

    sc.stop()
  }
}
