package spark.person

import java.time.LocalDateTime
import java.util

import com.inf.featureCompare.FeatureCompare
import com.sun.jersey.core.util.Base64
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import utils.JedisUtil

import scala.collection.JavaConversions

/**
  * Created by zhangbn on 2018/10/29.
  */
object TestClassifyPersonWithBase_Slow {

  def main(args: Array[String]): Unit = {
    //    System.setProperty("hadoop.home.dir", "E:\\hadoop-common-2.7.3-bin-master")
    //    System.setProperty("HADOOP_USER_NAME", "admin")

    val startTime: Long = System.currentTimeMillis
    println(s"[${LocalDateTime.now()}]  ClassifyPersonWithBase作业启动.......................  now_ms=${startTime}")

    val sparkConf = new SparkConf()
      .setAppName("ClassifyPersonWithBase")
    //      .setMaster("local[*]")
    sparkConf.set("spark.sql.crossJoin.enabled", "true")

    val spark = SparkSession
      .builder()
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()
    spark.sql("use ods")

    //-----------------------人员库的数据
    val ori_repo = spark.sql(
      s"""
         |select uuid,pass_id,feature from
         |(select view.*,row_number() over (partition by view.uuid order by view.quality_score desc) rank
         | from (select a.uuid,b.*
         |   from pid_vid a
         |   join t_person_passinfo b
         |   on a.pass_id=b.pass_id) view) super_view
         |where rank<=1""".stripMargin
    )
    println("读取人员库后ori_repo的分区数：" + ori_repo.rdd.partitions.size)
    val repoRdd = ori_repo.rdd.cache()
    println("人员库的数据量为：" + repoRdd.count())
    val repoUUIDList = repoRdd.map(a => a.getAs[String]("uuid")).collect().toList
    //val repoPassIDList = repoRdd.map(a => a.getAs[String]("pass_id")).collect().toList
    val repoFeatureList = repoRdd.map(b => {
      val str = b.getAs[String]("feature")
      Base64.decode(str)
    }).collect().toList
    val repoUUIDListBC = spark.sparkContext.broadcast(repoUUIDList)
    val repoFeatureListBC = spark.sparkContext.broadcast(JavaConversions.seqAsJavaList(repoFeatureList))

    //-----------------------新一天的数据
    val ori = spark.sql(
      s"""
         |select row_number() over(partition by 1 order by a.quality_score desc) as rn, a.pass_id, a.feature
         |from t_person_passinfo a
         |where dt=${(args(0))} limit ${args(1)}""".stripMargin
    )
    println("读取一天数据后ori的分区数：" + ori.rdd.partitions.size)
    val rowRdd = ori.rdd.cache()
    println("一天的的数据量为：" + rowRdd.count())
    val rnList = rowRdd.map(a => a.getAs[Int]("rn")).collect()
    val passidList = rowRdd.map(p => p.getAs[String]("pass_id")).collect()
    val featureList = rowRdd.map(b => {
      val str = b.getAs[String]("feature")
      Base64.decode(str)
    }).collect().toList
    val rnListBC = spark.sparkContext.broadcast(rnList)
    val featureListBC = spark.sparkContext.broadcast(JavaConversions.seqAsJavaList(featureList))
    val rpMaps = rnList.zip(passidList).toMap
    val rpMapsBC = spark.sparkContext.broadcast(rpMaps)

    val rf = rowRdd.map(row => (row.getAs[Int]("rn"), row.getAs[String]("feature")))

    println("初始时DF的分区数：" + rf.partitions.size)
    val rfRdd = rf.repartition(Integer.valueOf(args(2)))
    println("重置后RDD的分区数：" + rfRdd.partitions.size)

    rfRdd.foreachPartition(iterator => {
      val jedis = JedisUtil.getJedis
      val tag = args(0) + "_tag"
      val pid_vid = args(0) + "_pid_vid"
      while (iterator.hasNext) {
        val next = iterator.next()
        if (!jedis.getbit(tag, next._1)) {
          var isTag: Boolean = false
          val var1 = Base64.decode(next._2)
          var doubleArr = new FeatureCompare().featureCompByByte(var1, repoFeatureListBC.value)
          val max = doubleArr.max
          if (max > args(3).toDouble) {
            val idx = getMaxInArrayIdx(max, doubleArr)
            val uuid = repoUUIDListBC.value(idx)
            val pass_id = rpMapsBC.value.getOrElse(next._1, "")
            if (!jedis.getbit(tag, next._1)) {
              jedis.setbit(tag, next._1, true)
              jedis.hsetnx(pid_vid, pass_id, uuid)
              isTag = true
            }
          }
          if ((!isTag) && (!jedis.getbit(tag, next._1))) {
            doubleArr = new FeatureCompare().featureCompByByte(var1, featureListBC.value)
            if (!jedis.getbit(tag, next._1)) {
              var list = List[Int](next._1)
              for (i <- 0 to (doubleArr.length - 1)) {
                if (doubleArr(i) > args(3).toDouble) {
                  val rn = rnListBC.value(i)
                  if (!list.contains(rn)) {
                    list = list :+ rn
                  }
                }
              }
              if (!jedis.getbit(tag, next._1)) {
                val vid = rpMapsBC.value.getOrElse(next._1, "")
                list.foreach(x => {
                  jedis.setbit(tag, x, true)
                  //jedis.sadd(String.valueOf(next._1), String.valueOf(x))
                  val pid = rpMapsBC.value.getOrElse(x, "")
                  jedis.hsetnx(pid_vid, pid, vid)
                })
              }
            }
          }
        }
      }
    })
    val endTime: Long = System.currentTimeMillis
    println(s"[${LocalDateTime.now()}]  ClassifyPersonWithBase作业结束.......................  now_ms=${endTime}")
  }


  /**
    * 获取最大元素在Array中的下标
    *
    * @param max
    * @param array
    * @return
    */
  def getMaxInArrayIdx(max: Double, array: Array[Double]): Int = {
    var idx = 0
    for (i <- 0 until array.length) {
      if (array(i) == max) {
        return i
      }
    }
    idx
  }

  /**
    * 摸拟调用SO比较相似度
    *
    * @param var1
    * @param var2
    * @return
    */
  def featureCompByByte(var1: Array[Byte], var2: util.List[Array[Byte]]): Array[Double] = {
    val size = var2.size()
    val doubleArr = new Array[Double](size)
    for (i <- 0 to (doubleArr.length - 1)) {
      doubleArr(i) = Math.random()
    }
    doubleArr
  }


}