package com.itcj.dmp.tags

import com.itcj.dmp.casename._
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.graphframes.GraphFrame

import scala.collection.immutable

object MergeTags {

  //统一用户识别
  def process(unionDs: Dataset[tagcase], sparkSession: SparkSession): Dataset[Tags] = {
    //    1.构建顶点
    import sparkSession.implicits._
    val vertexDf: DataFrame = unionDs.map(row => {
      Vertex(row.mainIp, row.ips, row.tags)
    }).toDF()
    //    2. 构建边，从一个row里面的ips中取出一个属性，和另一个属性建立联系
    //    形成row的所有1对1 edge，将所有的edge用flatmap进行压平，得到每个edge用于后面图形计算
    val edgeDf: DataFrame = unionDs.flatMap(row => {
      val ips: Map[String, String] = row.ips
      val edgs: immutable.Iterable[Edge] =for (ip1 <- ips; ip2 <- ips; if ip1 != ip2) yield Edge(ip1._2, ip2._2)
      edgs
    }).toDF()
    //      3. 构建连通图
    sparkSession.sparkContext.setCheckpointDir("checkpoint")
    val graphDf: Dataset[VertexComponent] = GraphFrame(vertexDf, edgeDf).connectedComponents.run().
      as[VertexComponent]
    //      4. 聚合,合并多个用户的id和tags
    val reds: Dataset[(Long, VertexComponent)] =graphDf.groupByKey(_.component).reduceGroups(reduceUser _)
    //    5. Map转换为字符串,返回Tags类型的数据
    reds.map(mapTags)

  }


  def reduceUser(v1: VertexComponent, v2: VertexComponent): VertexComponent = {
    val mainId=v1.id
    val ids=v1.ids ++ v2.ids

    val v1tags: collection.Map[String, Double] = v1.tags
    val v2tags: collection.Map[String, Double] = v2.tags

    val newv1=v1tags.map(row=>{
      if(v2tags.contains(row._1)) {
        (row._1,row._2+v2tags(row._1))
      }else{
        (row._1,row._2)
      }
    })
    val tags=v2tags ++ newv1
    VertexComponent(mainId,ids,tags,v1.component)
  }


  def mapTags (vertexComponent: (Long, VertexComponent)): Tags ={
      val str1 = TagUtils.map2Str(vertexComponent._2.tags)
      val str2 = TagUtils.map2Str(vertexComponent._2.ids)
      Tags(vertexComponent._2.id,str2,str1)
  }
}
