package cn.doitedu.dmp

import org.apache.commons.lang3.StringUtils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable.ListBuffer

/**
 * DMP系统用户画像标签计算
 */
object UserProfileTagGen {
  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)

    val spark = SparkSession.builder()
      .appName("画像标签计算")
      .master("local")
      .getOrCreate()

    import spark.implicits._


    val df: DataFrame = spark.read.parquet("user_portrait/data/dmp_log_output/2020-09-01")

    //  目标：[guid,tag_name,tag_value],score
    // 一行数据，将产生多个 标签KV
    val tags = df.flatMap(row=>{
      // 广告位类型名称
      val adspacetypename = row.getAs[String]("adspacetypename")
      // 所使用的app名称
      val appname = row.getAs[String]("appname")
      // 广告位来源渠道：广告位提供商
      val adplatformproviderid = row.getAs[Int]("adplatformproviderid").toString
      // 所使用的操作系统（1 android  2 ios  3 winphone
      val client = row.getAs[Int]("client").toString
      val networkmannername = row.getAs[String]("networkmannername")
      val ispname = row.getAs[String]("ispname")
      val province = row.getAs[String]("provincename")
      val city = row.getAs[String]("cityname")
      val district = row.getAs[String]("district")
      val apptype = row.getAs[Int]("apptype").toString


      val keywords: Array[String] = row.getAs[String]("keywords").split("\\|").map(_.trim).filter(StringUtils.isNotBlank(_))

      // 生成目标格式：guid,tag_name,tag_value,score
      // g01,APNM,捕鱼达人,1
      // g01,NTTP,4g,1
      // g01,OSNM,ios,1
      val guid = row.getAs[Long]("guid")

      val lst = new ListBuffer[(Long, String, String, Double)]
      lst += ((guid,TagNames.ADSP,adspacetypename,1))
      lst += ((guid,TagNames.APNM,appname,1))
      lst += ((guid,TagNames.ADPL,adplatformproviderid,1))
      lst += ((guid,TagNames.OSNM,client,1))
      lst += ((guid,TagNames.NTTP,networkmannername,1))
      lst += ((guid,TagNames.ISPN,ispname,1))
      lst += ((guid,TagNames.PROV,province,1))
      lst += ((guid,TagNames.CITY,city,1))
      lst += ((guid,TagNames.DIST,district,1))
      lst += ((guid,TagNames.APTP,apptype,1))
      for (elem <- keywords) {
        lst += ((guid,TagNames.KWDS,elem,1))
      }

      lst
    }).toDF("guid","tagname","tagvalue","score")


    import org.apache.spark.sql.functions._
    val tagAndScoreToday = tags.groupBy("guid","tagname","tagvalue").agg(sum(("score")) as "score")


    // 伪造一条8-31号的标签结果数据
    // val tags20200831 = spark.createDataset(Seq((1,"APNM","捕鱼达人",7))).toDF("guid","tagname","tagvalue","score")
    // tags20200831.write.parquet("user_portrait/data/dmp_tag_output/2020-08-31")


    /**
     * 合并前一日的画像标签数据
     */
    val tagsAndScoreLastDay = spark.read.parquet("user_portrait/data/dmp_tag_output/2020-08-31")

    /* td   ld
       a,3 a,4
           b,5
       c,6
     */
    tagsAndScoreLastDay.createTempView("ld")
    tagAndScoreToday.createTempView("td")
    val combinedTagsAndScore = spark.sql(
      """
        |
        |select
        |nvl(td.guid,ld.guid) as guid,
        |nvl(td.tagname,ld.tagname) as tagname,
        |nvl(td.tagvalue,ld.tagvalue) as tagvalue,
        |
        |case
        | when td.guid is not null and ld.guid is not null then td.score+ld.score
        | when td.guid is not null and ld.guid is null then td.score
        | when td.guid is null and ld.guid is not null then ld.score*0.9
        |end as score
        |
        |from td full join ld on td.guid=ld.guid and td.tagname=ld.tagname and td.tagvalue=ld.tagvalue
        |
        |""".stripMargin)


    // 过滤分值低于阈值的标签
    val result = combinedTagsAndScore.where("score >= 1")


    result.cache()
    result.show(100,false)
    result.write.parquet("user_portrait/data/dmp_tag_output/2020-09-01")
    spark.close()

  }

}
