package idmp

import java.util

import cn.doitedu.commons.utils.SparkUtil
import cn.doitedu.dwetl.beans.{AccountScore, GuidBinBean}
import com.alibaba.fastjson.JSON
import com.google.gson.Gson
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.types.{DataTypes, StructType}

import scala.collection.immutable
import scala.collection.mutable.ListBuffer

case class BindScore(account: String, timestamp: Long, score: Int)


/**
 * @Title: ${file_name}
 * @Package ${package_name}
 * @Description: app的设备id和用户id绑定表生成程序
 * @Author hunter@doitedu.cn
 * @date 2020/8/29:56     
 */
object AppIdBInd {
  def main(args: Array[String]): Unit = {

    val spark = SparkUtil.getSparkSession("APP设备id账号id绑定")
    import spark.implicits._

    // 1. 加载 T日的日志
    val schema = new StructType()
      .add("account", DataTypes.StringType)
      .add("deviceid", DataTypes.StringType)
      .add("timestamp", DataTypes.LongType)

    val tLog = spark.read.schema(schema).csv("testdata\\idmp\\eventlog\\day2")
    tLog.show(20, false)
    tLog.printSchema()

    // 2. 对当天日志中的(设备,账号)组合，进行去重（只取时间最早的一条）
    /*tLog.createTempView("tlog")
    val pairs = spark.sql(
      """
        |
        |select
        |account,deviceid,timestamp
        |from
        |(
        |select
        |account,
        |deviceid,
        |timestamp,
        |row_number() over(partition by account,deviceid order by timestamp) as rn
        |from tlog
        |) o
        |where rn=1
        |
        |""".stripMargin)*/

    import org.apache.spark.sql.functions._
    val window = Window.partitionBy('account, 'deviceid).orderBy('timestamp)
    val pairs = tLog.select('account, 'deviceid, 'timestamp, row_number() over (window) as "rn")
      .where("rn=1")
      .select('account, 'deviceid, 'timestamp)

    pairs.show(20, false)

    // 3. 同一个设备上，对不同账号打不同的分（登录时间越早，分数越高）
    // 3.1 先将相同设备的数据，分组
    val scoredRdd = pairs.rdd.map(row => {
      val account = row.getAs[String]("account")
      val deviceid = row.getAs[String]("deviceid")
      val timestamp = row.getAs[Long]("timestamp")
      (account, deviceid, timestamp)
    }).groupBy(_._2).map(tp => {
      val deviceid = tp._1

      // 3.2 按时间先后顺序打分
      val lst = tp._2.toList.filter(_._1 != null).sortBy(_._3)
      val scoreLst: immutable.Seq[BindScore] = for (i <- 0 until lst.size) yield BindScore(lst(i)._1, lst(i)._3, 100 - 10 * i)

      // （设备号,登录过的所有账号及分数）
      (deviceid, scoreLst.toList)
    })

    scoredRdd.foreach(println)

    println("------------------------")

    /**
     * (d0,List())
     * (d1,List((u1,11,100), (u2,13,90)))
     * (d2,List((u2,14,100)))
     * (d3,List((u3,14,100)))
     * (d4,List((u4,15,100)))
     * (d5,List())
     * (d8,List())
     * (d9,List((u4,18,100)))
     */

    // 4. 加载 T-1日 在绑定记录表
    val bindTable = spark.read.textFile("testdata\\idmp\\bindtable\\day1")
    val bindTableRdd = bindTable.rdd.map(line => {
      val obj = JSON.parseObject(line)
      val deviceid = obj.getString("deviceid")
      val guid = obj.getString("guid")

      val lstArray = obj.getJSONArray("lst")
      val lst = new ListBuffer[BindScore]()
      for (i <- 0 until lstArray.size()) {
        val bindObj = lstArray.getJSONObject(i)
        val bindScore = BindScore(bindObj.getString("account"), bindObj.getLong("timestamp"), bindObj.getIntValue("score"))
        lst += bindScore
      }
      (deviceid, (lst.toList, guid))
    })
    bindTableRdd.foreach(println)

    /**
     * (d0,(List(BindScore(u0,7,100), BindScore(u1,8,20)),u0))
     * (d1,(List(BindScore(u1,9,100)),u1))
     * (d2,(List(BindScore(u2,8,100)),u2))
     * (d3,(List(BindScore(u2,9,90)),u2))
     * (d4,(List(),d4))
     * (d5,(List(),d5))
     */

    val joined = scoredRdd.fullOuterJoin(bindTableRdd)
    println("------------------------")
    joined.foreach(println)

    /**
     * (d0,(Some(List()),Some((List(BindScore(u0,7,100), BindScore(u1,8,20)),u0))))
     * (d1,(Some(List(BindScore(u1,11,100), BindScore(u2,13,90))),Some((List(BindScore(u1,9,100)),u1))))
     * (d2,(Some(List(BindScore(u2,14,100))),Some((List(BindScore(u2,8,100)),u2))))
     * (d3,(Some(List(BindScore(u3,14,100))),Some((List(BindScore(u2,9,90)),u2))))
     * (d4,(Some(List(BindScore(u4,15,100))),Some((List(),d4))))
     * (d5,(Some(List()),Some((List(),d5))))
     * (d8,(Some(List()),None))
     * (d9,(Some(List(BindScore(u4,18,100))),None))
     * (d6,(None,Some(List(BindScore(u4,18,100))))
     */
    val result = joined.map(tp => {
      val deviceid = tp._1
      val left: Option[List[BindScore]] = tp._2._1
      val right: Option[(List[BindScore], String)] = tp._2._2

      // 事先定义好要返回的几个变量 (deviceid,lst,guid)
      var resLst = List.empty[BindScore]
      var resGuid: String = ""

      // 分情况，处理合并
      // 情况1： 右表（历史）根本没有这个设备
      if (right.isEmpty) {
        resLst = left.get
        if (resLst.size < 1) resGuid = deviceid else resGuid = getGuid(resLst)
      }

      // 情况2：左表（今日）根本没有这个设备
      // lst和guid，都保留历史的
      if (left.isEmpty) {
        resGuid = right.get._2
        resLst = right.get._1
      }

      // 情况3： 左右表都有some，需要对两边的lst进行分数合并
      if (left.isDefined && right.isDefined) {

        val lst1 = left.get
        val lst2 = right.get._1

        // 判断两边的list是否都是空list
        if (lst1.size < 1 && lst2.size < 1) {
          resGuid = deviceid
        } else {
          // 合并分数
          resLst = mergeScoreList(lst1, lst2)
          resGuid = getGuid(resLst)
        }
      }

      // 返回最后的结果（设备id，登录账号分数记录列表，guid）
      (deviceid, resLst, resGuid)
    })


    println("-------------------------------")
    result.foreach(println)

    val jsonResult = result.map(tp=>{
      val deviceid = tp._1
      val lst: List[BindScore] = tp._2
      val guid = tp._3

      val scores: util.ArrayList[AccountScore] = new util.ArrayList[AccountScore]()
      for (elem <- lst) {
        val as = new AccountScore(elem.account,elem.timestamp,elem.score)
        scores.add(as)
      }

      val gb = new GuidBinBean(deviceid,guid,scores)

      val gson = new Gson()
      gson.toJson(gb)
    })

    println("-------------------------------")
    jsonResult.foreach(println)

    spark.close()
  }

  def getGuid(lst: List[BindScore]): String = {
    val sorted = lst.sortBy(b => (-b.score, b.timestamp))
    sorted(0).account
  }

  def mergeScoreList(lst1: List[BindScore], lst2: List[BindScore]): List[BindScore] = {
    val lst = lst1 ::: lst2
    // List(BindScore(u1,11,100), BindScore(u2,13,90),BindScore(u1,9,100))
    val sorted = lst.sortBy(b => (b.account, b.timestamp))
    val res: immutable.Iterable[BindScore] = sorted.groupBy(_.account).map(tp => {
      val account = tp._1
      tp._2.reduce((x, y) => BindScore(x.account, x.timestamp, x.score + y.score))
    })
    res.toList
  }


}
