package cn.edu360t.Tags
import cn.edu360t.utils.{TUtils, Tools}
import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
object Tags4ContextPlus {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setAppName("用户上下文标签")
      .setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(sparkConf)
    val sqlc = new SQLContext(sc)
import sqlc.implicits._
    // 注册一个函数，用来判断一个字符串是不是空值
    sqlc.udf.register("isNotEmpty", (str: String) => StringUtils.isNotEmpty(str))

    // 广播app字典数据
    val appdict = sc.textFile(Tools.load.getString("app.dict.path")).map(_.split("\t", -1))
      .filter(_.length >= 5)
      .map(arr => (arr(4), arr(1))).collect().toMap

    val appbcst = sc.broadcast(appdict)


    // 敏感词库数据
    val stopdict: Map[String, Null] = sc.textFile(Tools.load.getString("stopword.dict.path"))
      .map((_, null)).collect().toMap
    val stopbcst = sc.broadcast(stopdict)


    // 读取数据
    val dataFrame = sqlc.read.parquet(Tools.load.getString("dmp.parquet.path"))


    val datasetResult: Dataset[(String, List[(String, Int)])] = dataFrame
      .filter(TUtils.hasUserIds) // 过滤掉15个用户id都为空的数据
      .map(row => {
      // 整理一下数据

      // 1.将用户的id拿出来
      val userId = TUtils.getUserId(row)
      val adTags = Tags4Ads.makeTags(row)
      val deviceTags = Tags4Device.makeTags(row, appbcst)
      val keywordTags = Tags4KeyWords.makeTags(row, stopbcst)
      val areaTags = Tags4Area.makeTags(row)

      val currentLineTags = adTags ++ deviceTags ++ keywordTags ++ areaTags

      // 聚合(key=userId, value=List((K:1)))
      (userId, currentLineTags.toList)
    })
    val rddResult: RDD[(String, List[(String, Int)])] = datasetResult.rdd
    rddResult
      .reduceByKey((list1, list2) => { // list1,list2,list3,list4 = List((k1, 1).....(k1,1))
      //(list1 ++ list2).groupBy(_._1).mapValues(_.map(_._2).sum).toList
      (list1 ++ list2).groupBy(_._1).mapValues(_.foldLeft(0)(_+_._2)).toList
      //  (list1 ++ list2).groupBy(_._1).map{case (key, values) => (key, values.map(t => t._2).sum)}.toList
    })
      .map{
        case (uId, tags) => uId+"\t"+tags.map(tp => tp._1+":"+tp._2).mkString("\t")
      }
      .saveAsTextFile("F:\\dmp\\tag-ctx")



    sc.stop()
  }

}