package profile.dsplog2

import org.apache.commons.lang3.StringUtils
import org.apache.spark.sql.{Dataset, SparkSession}

import scala.collection.mutable

/**
  * Created by hunter.coder 涛哥  
  * 2019/4/18 9:10
  * 交流qq:657270652
  * Version: 1.0
  * 更多学习资料：https://blog.csdn.net/coderblack/
  * Description:  DSP数据的用户画像标签抽取
  **/
object DspUserTagGener2 {

  /**
    * 合并ids标签
    *
    * @param idsMap1
    * @param idsMap2
    * @return
    */
  def mergeIds(idsMap1: Map[String, List[(String, Double)]], idsMap2: Map[String, List[(String, Double)]]): Map[String, List[(String, Double)]] = {

    val imeiLst = idsMap1.getOrElse("imei", Nil) ++ idsMap2.getOrElse("imei", Nil)
    val idfa = idsMap1.getOrElse("idfa", Nil) ++ idsMap2.getOrElse("idfa", Nil)
    val androidid = idsMap1.getOrElse("androidid", Nil) ++ idsMap2.getOrElse("androidid", Nil)

    // 对imei、idfa、android的各类标签值的分数累加
    val imeiRes = imeiLst.groupBy(_._1).mapValues(_.map(_._2).sum).toList
    val idfaRes = idfa.groupBy(_._1).mapValues(_.map(_._2).sum).toList
    val androididRes = androidid.groupBy(_._1).mapValues(_.map(_._2).sum).toList


    Array("imei", "idfa", "androidid").zip(Array(imeiRes, idfaRes, androididRes)).toMap

  }

  /**
    * 合并兴趣关键字标签
    *
    * @param kwds1
    * @param kwds2
    * @return
    */
  def mergeKwds(kwds1: Map[String, List[(String, Double)]], kwds2: Map[String, List[(String, Double)]]): Map[String, List[(String, Double)]] = {

    // 将两个bean中的 interest关键词，加到一个列表里
    val interestKwdsList: List[(String, Double)] = kwds1.getOrElse("interestKwds", Nil) ++ kwds2.getOrElse("interestKwds", Nil)

    // 将相同关键词的分数累加
    val interestKwdsRes = interestKwdsList.groupBy(_._1).mapValues(_.map(_._2).sum).toList


    Map("interestKwds" -> interestKwdsRes)
  }

  /**
    * 将两个bean按标签体系类别合并--》 标签值union，分数累加
    *
    * @param b1
    * @param b2
    * @return
    */
  def merge(b1: DspUserTagBean, b2: DspUserTagBean): DspUserTagBean = {


    // 合并ids
    val mergedIdsTagMap: Map[String, List[(String, Double)]] = mergeIds(b1.idsTags, b2.idsTags)

    // 合并兴趣keywords
    val mergedKwdsTagMap: Map[String, List[(String, Double)]] = mergeKwds(b1.dspKwTags, b2.dspKwTags)

    // 合并设备信息标签 deviceTags

    // 合并地理位置信息标签 locTags

    // 合并广告栏位属性标签 adTags

    // 合并app信息标签 appTags


    DspUserTagBean(b1.guid, mergedIdsTagMap, mergedKwdsTagMap)
  }

  def main(args: Array[String]): Unit = {

    val inpath = "G:\\testdata\\usertags\\dspidlog-T"
    val outpath = "G:\\testdata\\usertags\\out-dsptags-T"

    val spark = SparkSession
      .builder()
      .appName("dspusertag")
      .master("local[*]")
      .getOrCreate()

    import spark.implicits._

    val df = spark.read.parquet(inpath)

    val tagRes: Dataset[DspUserTagBean2] = df.rdd
      .map(row => {
        val guid = row.getAs[String]("guid")

        val imei = row.getAs[String]("imei")
        val idfa = row.getAs[String]("idfa")
        val androidid = row.getAs[String]("androidid")

        val keywords = row.getAs[String]("keywords")

        // 以及  province、city、appname、appcat、appdesc_kwds

        val idsMap = new mutable.HashMap[String, List[(String, Double)]]()
        if (StringUtils.isNotBlank(imei)) idsMap.put("imei", (imei, 1d) :: Nil)
        if (StringUtils.isNotBlank(idfa)) idsMap.put("idfa", (idfa, 1d) :: Nil)
        if (StringUtils.isNotBlank(androidid)) idsMap.put("androidid", (androidid, 1d) :: Nil)

        //  利用拉链，将 各个标签名 和 各个标签值列表 组成hashmap
        //val idsMap = (Array("imei", "idfa", "androidid").zip(Array((imei, 1d) :: Nil, (idfa, 1d) :: Nil, (androidid, 1d) :: Nil))).toMap
        val keywordsTags = Map("interestKwds" -> keywords.split(" ").filter(_.size > 1).map((_, 1d)).toList)

        DspUserTagBean(guid, idsMap.toMap[String, List[(String, Double)]], keywordsTags)

      })
      .map(bean => (bean.guid, bean))
      .reduceByKey((b1, b2) => {
        merge(b1, b2)
      })
      .map(_._2)
      //重构时加上的代码，将List[(标签，分数)]  换成  Map[标签,分数]
      .map(b=>DspUserTagBean2(b.guid,b.idsTags.mapValues(_.toMap),b.dspKwTags.mapValues(_.toMap)))
      .toDS()

    tagRes.show(10,false)

    tagRes.write.parquet(outpath)


    spark.close()

  }

}
