package com.tags

import com.beans.AdLog
import com.dmp.RptUtils
import org.apache.commons.lang.StringUtils
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.{ConnectionFactory, Put}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.{SparkConf, SparkContext}

/**
  * ClassName:UserTags
  * Package:com.tags
  * Desciption:用户标签
  *
  * @date:2019 /8/24 16:36
  * @author:17611219021 @sina.cn
  */
object UserTags {
  def main(args: Array[String]): Unit = {
    if(args.length !=2){
      println(
        """
          |com.dmp.fileToParquet
          |参数：
          | inputpath
          | outputPath
          | directPath
          | stopwords
        """.stripMargin)
      sys.exit()
    }
    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")
    //指定keyo序列化器与snappy格式，默认是gzip格式
    sparkConf.set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    sparkConf.set("spark.sql.parquet.compression.codec","snappy")
    val sc: SparkContext = new SparkContext(sparkConf)
    val Array(inputpath,outputPath)= args

    val configuration = sc.hadoopConfiguration
    val fs = FileSystem.get(configuration)
    val path = new Path(outputPath)
    if(fs.exists(path)){
      fs.delete(path,true)
    }

    //判断HBASE中的表，如果不存在，就创建
    val hdconfiguration=sc.hadoopConfiguration
    hdconfiguration.set("hbase.zookeeper.quorum","node02,node03,node04")
    //hdconfiguration.set("")
    val hbconn = ConnectionFactory.createConnection(hdconfiguration)
    val admin = hbconn.getAdmin
    if(!admin.tableExists(TableName.valueOf("tb_tag"))){
      val descriptor = new HTableDescriptor("tb_tag")
      descriptor.addFamily(new HColumnDescriptor("day$day"))
        admin.createTable(descriptor)
      admin.close()
      hbconn.close()
    }

    val jobconf = new JobConf(hdconfiguration)
    jobconf.setOutputFormat(classOf[TableOutputFormat])
    jobconf.set(TableOutputFormat.OUTPUT_TABLE,"tb_tag")

    //字典文件
   /* val directdata = sc.textFile(directPath)
    val directMap = directdata.map(line => {
      val fieds = line.split(",", -1)
      (fieds(0), fieds(1))
    }).collect().toMap
    val broadcast = sc.broadcast(directMap)*/

   /* //停用词文件
    val stopwordsMap = sc.textFile(stopwords).map((_,1)).collect().toMap
    val stopwordBroadcast = sc.broadcast(stopwordsMap)*/

    sc.textFile(inputpath).map(_.split(",",-1)).filter(_.length>=3).map(
      arr=>{
        val log = AdLog(arr)
        val map = TagsToAd.makeTags(log)
        (log.imei,map.toList)
      }).reduceByKey((list1,list2) =>{
      // List((爱奇艺，1)，（PPTV，1），(爱奇艺，1))  groupBy  Map（"爱奇艺"，List（(爱奇艺，1)，(爱奇艺，1)））
      //group后的List中的元组，是group之前的个数，例如爱奇艺，操作之后，依然只有两个爱奇艺
      (list1++list2).groupBy(_._1).map{
        case (k,sampleTag) => (k,sampleTag.map(_._2).sum)
      }.toList
    }).map{
      case (imei,userTag) =>{
        val put = new Put(Bytes.toBytes(imei))
        val tags = userTag.map(t=> t._1+":"+t._2).mkString(",")
        put.addColumn(Bytes.toBytes("cf"),Bytes.toBytes("day$day"),Bytes.toBytes(tags))  //每天增加一个列
        (new ImmutableBytesWritable(),put)  //ImmutableBytesWritable =>rowkey
      }
    }.saveAsHadoopDataset(jobconf)
    sc.stop()
  }

}
