package cn.doitedu.dmp

import java.util

import cn.doitedu.commons.utils.SparkUtil
import com.alibaba.fastjson.JSON
import com.google.gson.Gson
import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.Term
import org.apache.spark.rdd.RDD

import scala.collection.mutable
import scala.collection.mutable.ListBuffer

object Tags01 {
  def main(args: Array[String]): Unit = {
    val spark = SparkUtil.getSparkSession("dmp用户画像标签计算")
    import spark.implicits._


    // 加载app信息字典
    val appInfo = spark.read.textFile("crawler/data/appinfo.txt")
    // com.youku.phone优酷视频视频播放【精品内容，尽在优酷】 《看我的生活》明星生活状态大揭秘，一起感受美好生活...
    val appInfoMap = appInfo.rdd.map(line => {
      val fields = line.split("\001")

      (fields(0), (fields(1), fields(2), fields(3)))
    }).collectAsMap()
    // 广播出去
    val bc = spark.sparkContext.broadcast(appInfoMap)


    // 加载当天日志
    val ds = spark.read.textFile("portrait/testdata/dmp/testinput/test.day02.log")
    //g01,com.creditwealth.client,mi6,中国移动,陕西省,西安市,大唐区,4G,刘亦菲|神仙姐姐|小龙女|姑姑|古墓
    val tagsRdd: RDD[(String, String, String, Double)] = ds.rdd.flatMap(line => {
      val appInfoDict = bc.value


      // 构造一个存储标签的集合
      val tagList = new ListBuffer[(String, String, String, Double)]

      val split = line.split(",")
      val guid = split(0)
      val appid = split(1)
      val info: Option[(String, String, String)] = appInfoDict.get(appid)

      // app信息标签化
      if (info.isDefined) {
        val appInfo = info.get
        tagList += ((guid, "APPNAME", appInfo._1, 10))
        tagList += ((guid, "APPTYPE", appInfo._2, 10))

        // 将app描述文本，进行分词，用以生成兴趣词标签
        import scala.collection.JavaConversions._
        val keywords: mutable.Seq[String] = HanLP.segment(appInfo._3).map(term => term.word).filter(_.size > 1)
        for (w <- keywords) {
          tagList += ((guid, "KEYWORD", w, 10))
        }

      }

      // 手机型号标签化
      tagList += ((guid, "DEVICE", split(2), 10))

      // 运营商标签化
      tagList += ((guid, "OPERATOR", split(3), 10))

      // 地理位置：省标签化
      tagList += ((guid, "PROVINCE", split(4), 10))


      // 地理位置：市标签化
      tagList += ((guid, "CITY", split(5), 10))


      // 地理位置：区标签化
      tagList += ((guid, "REGION", split(6), 10))


      // 联网方式标签化
      tagList += ((guid, "NETTYPE", split(7), 10))

      // 关键词标签化
      val wds = split(8).split("\\|")
      for (w <- wds) {
        tagList += ((guid, "KEYWORD", w, 10))
      }

      tagList
    })


    // 聚合相同人、相同标签，相同标签值 的 分数
    tagsRdd.toDF("guid","tagname","tagvalue","score").createTempView("rawtags")
    val day02Tags = spark.sql(
      """
        |
        |select
        |guid,tagname,tagvalue,sum(score) as score
        |from  rawtags
        |group by guid,tagname,tagvalue
        |
        |""".stripMargin)


    // 将当日计算结果，合并上一日的结果
    // 加载上一日的标签结果
    val day01Tags = spark.read.parquet("portrait/testdata/dmp/testoutput/day01")

    day01Tags.createTempView("day01tags")
    day02Tags.createTempView("day02tags")

    val finalResult = spark.sql(
      """
        |select
        |
        |if(a.guid is not null ,a.guid,b.guid) as guid,
        |if(a.guid is not null ,a.tagname,b.tagname) as tagname,
        |if(a.guid is not null ,a.tagvalue,b.tagvalue) as tagvalue,
        |case
        |-- 左右都有,分数累加
        |when a.guid is not null and b.guid is not null then a.score+b.score
        |-- 左有右没,分数衰减
        |when a.guid is not null and b.guid is null then a.score*0.75
        |-- 左没右有,分数取右
        |when a.guid is null and b.guid is not null then b.score
        |end as score
        |
        |from day01tags a full join day02tags b on a.guid=b.guid and a.tagname=b.tagname and a.tagvalue=b.tagvalue
        |
        |""".stripMargin)


    // 保存当天标签计算最终结果平表
    finalResult.write.parquet("portrait/testdata/dmp/testoutput/day02")

    // 将平表结果，整理成json格式，保存;   并由我的好基友xxx导入HBASE（或elastic search）
    val jsonResult = finalResult.rdd.map(row=>{
      val guid = row.getAs[String]("guid")
      val tagname = row.getAs[String]("tagname")
      val tagvalue = row.getAs[String]("tagvalue")
      val score = row.getAs[Double]("score")
      (guid,(tagname,tagvalue,score))
    })
        .groupByKey()
        .map(tp=>{
          val guid = tp._1
          val tags = tp._2

          //           标签名   List[ ValueAndScore]
          import scala.collection.JavaConverters

          val tmp: Map[String, util.List[ValueAndScore]] = tags.groupBy(_._1)
            .mapValues(iter=>{
              val seq = iter.map(tp=>new ValueAndScore(tp._2,tp._3)).toSeq
              val lst: util.List[ValueAndScore] = JavaConverters.seqAsJavaListConverter(seq).asJava
              lst
            })

          val javaTagMap: util.Map[String, util.List[ValueAndScore]] = JavaConverters.mapAsJavaMapConverter[String, util.List[ValueAndScore]](tmp).asJava

          val gson = new Gson()
          val tagJson = gson.toJson(javaTagMap)
          (guid,tagJson)
        })

    // 再保存json格式结果
    jsonResult.foreach(println)




    spark.close()
  }

}
