package spark.person

import java.time.LocalDateTime
import java.util

import com.inf.featureCompare.FeatureCompare
import com.sun.jersey.core.util.Base64
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.storage.StorageLevel
import utils.JedisUtil

/**
  * Created by zhangbn on 2018/10/26.
  */
object ClassifyPersonWithBase2 {

  def main(args: Array[String]): Unit = {
    //    System.setProperty("hadoop.home.dir", "E:\\hadoop-common-2.7.3-bin-master")
    //    System.setProperty("HADOOP_USER_NAME", "admin")

    val startTime: Long = System.currentTimeMillis
    println(s"[${LocalDateTime.now()}]  ClassifyPersonWithBase作业启动.......................  now_ms=${startTime}")

    val sparkConf = new SparkConf()
      .setAppName("ClassifyPersonWithBase")
    //      .setMaster("local[*]")
    sparkConf.set("spark.sql.crossJoin.enabled", "true")
    sparkConf.set("spark.default.parallelism","100")

    val spark = SparkSession
      .builder()
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()
    spark.sql("use ods")

    //-----------------------人员库的数据
    val ori_repo = spark.sql(
      s"""
         |select uuid,pass_id,feature from
         |(select view.*,row_number() over (partition by view.uuid order by view.quality_score desc) rank
         | from (select a.uuid,b.*
         |   from pid_vid a
         |   join t_person_passinfo b
         |   on a.pass_id=b.pass_id) view) super_view
         |where rank<=1""".stripMargin
    )
    println("读取人员库后ori_repo的分区数：" + ori_repo.rdd.partitions.size)
    val repoRdd = ori_repo.rdd.persist(StorageLevel.MEMORY_AND_DISK)
    println("人员库的数据量为：" + repoRdd.count())

    val schema_repo = StructType(List(
      StructField("uuid", StringType, nullable = false),
      StructField("pass_id", StringType, nullable = false),
      StructField("feature", StringType, nullable = false)
    ))
    val ori_repo_DF = spark.sqlContext.createDataFrame(repoRdd, schema_repo)
    import spark.implicits._
    val repoFeatureList = ori_repo_DF.select("feature")
      .map(row => {
        val str = row.get(0).asInstanceOf[String]
        Base64.decode(str)
      }).collectAsList()
    val repoFeatureListBC = spark.sparkContext.broadcast(repoFeatureList)
    val repoUUIDList = ori_repo_DF.select("uuid").map(row => row.get(0).asInstanceOf[String]).collect()
    val repoUUIDListBC = spark.sparkContext.broadcast(repoUUIDList)
    //val repoPassIDList = ori_repo_DF.select("pass_id").map(row => row.get(0).asInstanceOf[String]).collect()

    //-----------------------新一天的数据
    val ori = spark.sql(
      s"""
         |select row_number() over(partition by 1 order by a.quality_score desc) as rn, a.pass_id, a.feature
         |from t_person_passinfo a
         |where dt=${(args(0))} limit ${args(1)}""".stripMargin
    )
    println("读取一天数据后ori的分区数：" + ori.rdd.partitions.size)
    val rowRdd = ori.rdd.persist(StorageLevel.MEMORY_AND_DISK)
    println("一天的数据量为：" + rowRdd.count())

    val schema = StructType(List(
      StructField("rn", IntegerType, nullable = false),
      StructField("pass_id", StringType, nullable = false),
      StructField("feature", StringType, nullable = false)
    ))
    val ori_DF = spark.sqlContext.createDataFrame(rowRdd, schema)
    val featureList = ori_DF.select("feature")
      .map(row => {
        val str = row.get(0).asInstanceOf[String]
        Base64.decode(str)
      }).collectAsList()
    val featureListBC = spark.sparkContext.broadcast(featureList)
    val rnList = ori_DF.select("rn").map(row => row.get(0).asInstanceOf[Int]).collect()
    val rnListBC = spark.sparkContext.broadcast(rnList)
    val passidList = ori_DF.select("pass_id").map(row => row.get(0).asInstanceOf[String]).collect()
    val rpMaps = rnList.zip(passidList).toMap
    val rpMapsBC = spark.sparkContext.broadcast(rpMaps)

    val rf = rowRdd.map(row => (row.getAs[Int]("rn"), row.getAs[String]("feature")))

    println("初始时DF的分区数：" + rf.partitions.size)
    val rfRdd = rf.repartition(Integer.valueOf(args(2)))
    println("重置后RDD的分区数：" + rfRdd.partitions.size)

    rfRdd.foreachPartition(iterator => {
      val jedis = JedisUtil.getJedis
      val tag = args(0) + "_tag"
      val pid_vid = args(0) + "_pid_vid"
      while (iterator.hasNext) {
        val next = iterator.next()
        if (!jedis.getbit(tag, next._1)) {
          var isTag: Boolean = false
          val var1 = Base64.decode(next._2)
          val r1 = System.currentTimeMillis()
          var doubleArr = new FeatureCompare().featureCompByByte(var1, repoFeatureListBC.value)
          val r2 = System.currentTimeMillis() - r1
          //println("compare repo：" + r2)
          jedis.set("repo", String.valueOf(r2))
          val max = doubleArr.max
          if (max > args(3).toDouble) {
            //val r1 = System.currentTimeMillis()
            val idx = getMaxInArrayIdx(max, doubleArr)
            val uuid = repoUUIDListBC.value(idx)
            val pass_id = rpMapsBC.value.getOrElse(next._1, "")
            if (!jedis.getbit(tag, next._1)) {
              jedis.setbit(tag, next._1, true)
              jedis.hsetnx(pid_vid, pass_id, uuid)
              isTag = true
            }
            //val r2 = System.currentTimeMillis() - r1
            //println("for：" + r2)
            //jedis.set("for", String.valueOf(r2))
          }
          if ((!isTag) && (!jedis.getbit(tag, next._1))) {
            val e1 = System.currentTimeMillis()
            doubleArr = new FeatureCompare().featureCompByByte(var1, featureListBC.value)
            val e2 = System.currentTimeMillis() - e1
            //println("compare self：" + e2)
            jedis.set("self", String.valueOf(e2))
            if (!jedis.getbit(tag, next._1)) {
              var list = List[Int](next._1)
              for (i <- 0 to (doubleArr.length - 1)) {
                if (doubleArr(i) > args(3).toDouble) {
                  val rn = rnListBC.value(i)
                  if (!list.contains(rn)) {
                    list = list :+ rn
                  }
                }
              }
              if (!jedis.getbit(tag, next._1)) {
                val vid = rpMapsBC.value.getOrElse(next._1, "")
                list.foreach(x => {
                  jedis.setbit(tag, x, true)
                  //jedis.sadd(String.valueOf(next._1), String.valueOf(x))
                  val pid = rpMapsBC.value.getOrElse(x, "")
                  jedis.hsetnx(pid_vid, pid, vid)
                })
              }
            }
          }
        }
      }
    })
    val endTime: Long = System.currentTimeMillis
    println(s"[${LocalDateTime.now()}]  ClassifyPersonWithBase作业结束.......................  now_ms=${endTime}")
  }


  /**
    * 获取最大元素在Array中的下标
    *
    * @param max
    * @param array
    * @return
    */
  def getMaxInArrayIdx(max: Double, array: Array[Double]): Int = {
    var idx = 0
    for (i <- 0 until array.length) {
      if (array(i) == max) {
        return i
      }
    }
    idx
  }

  /**
    * 摸拟调用SO比较相似度
    *
    * @param var1
    * @param var2
    * @return
    */
  def featureCompByByte(var1: Array[Byte], var2: util.List[Array[Byte]]): Array[Double] = {
    val size = var2.size()
    val doubleArr = new Array[Double](size)
    for (i <- 0 to (doubleArr.length - 1)) {
      doubleArr(i) = Math.random()
    }
    doubleArr
  }


}