package com.central.bean

import java.sql.Timestamp
import java.util
import java.util.{Date, UUID}

import com.central.bean
import com.central.utils.StringUtils
import org.apache.spark.HashPartitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import scala.util.{Failure, Random, Success, Try}

class ClueRdd(private val rdd: RDD[WifiEntity]) extends Serializable {

  def clue(spark: SparkSession, jobId: String, threshold: Int): RDD[ClueResultWithDetail] = {
    //    import com.trs.media.spark.rdd.ExtensionRdd._
    val pairsRdd: RDD[(String, WifiEntity)] = rdd
      .map(wifiEntity => (s"${wifiEntity.deviceNumber}-${Random.nextInt(100)}", wifiEntity))
      .partitionBy(new HashPartitioner(20))
      .persist()

    val value = pairsRdd
      .aggregateByKey(List.empty[WifiEntity])(
        (entities, entity) => if (entities.contains(entity)) entities else entities :+ entity,
        (listOne, listAnother) => listOne.foldLeft(listAnother)((items, item) => {
          items :+ item
        })
      )

      .map {
        case (key, list) => (key.split("-").head, list)
      }
      .reduceByKey { case (list1, list2) => (list1 ++: list2).distinct }
      .flatMap(wifiEntities => wifiEntities._2.map(entity => (entity.idNumber, entity)))
    val clueResultRdd: RDD[ClueResultWithDetail] = value //转换为(idNumber,wifiEntity)

      .aggregateByKey(List.empty[WifiEntity])(
        (entities, entity) => entities :+ entity,
        (listOne, listAnother) => listOne.foldLeft(listAnother)((items, item) => {
          items :+ item
        })
      )
      //      .o·ptGroupBy
      .filter { case (idNumber, sameEntityList) => {
        //对idnumber进行groupBy获得大于threshold的
        sameEntityList
          .groupBy(entity => entity.deviceNumber).keys.size >= threshold
      }
      }
      .map { case (idNumber, sameEntityList) => {
        val id = UUID.randomUUID() + ""
        val details = sameEntityList.map(x => {

          val deviceName = x.deviceName
          val address = x.address
          //          val location = x.location
          val phoneNumber = x.phoneNumber
          val captime = x.captime
          ResultDetail(
            //            id,
            x.id,
            x.idNumber,
            x.idNumberType,
            x.deviceNumber.toInt,
            deviceName,
            address,
            x.location,
            phoneNumber, captime, x.latitude, x.longitude, x.picpath)
        })
        val entities = new util.ArrayList[ResultDetail]()
        for (i <- 0 to details.size - 1) {
          entities.add(details(i))
        }
        ClueResultWithDetail(
          jobId,
          sameEntityList.head.showedIdNumber,
          sameEntityList.head.idNumberType,
          sameEntityList.size,
          //          sameEntityList.map(entity => entity.id).mkString(";")
          entities,
          1,
          id,
          sameEntityList.head.createTime,
          sameEntityList.head.address,
          sameEntityList.head.location,
          sameEntityList.head.phoneNumber,
          sameEntityList.head.picpath
        )
      }
      }
    if (clueResultRdd.isEmpty())
      spark.sparkContext.parallelize(List(bean.ClueResultWithDetail(jobId, "0", "None", 0, null, 0, null, null, null, null, null, null)))
    else
      clueResultRdd
  }

  def clueAsUnion(spark: SparkSession, jobId: String): RDD[ClueResultWithDetail] = {
    //    import com.trs.media.spark.rdd.ExtensionRdd._
    val pairsRdd: RDD[(String, WifiEntity)] = rdd
      .map(wifiEntity => (s"${wifiEntity.deviceNumber}-${Random.nextInt(100)}", wifiEntity))
      .partitionBy(new HashPartitioner(20))
      .persist()
    //通过生成随机数均匀分布key，避免数据倾斜
    val clueResultRdd: RDD[ClueResultWithDetail] = pairsRdd

      .aggregateByKey(List.empty[WifiEntity])(
        (entities, entity) => if (entities.contains(entity)) entities else entities :+ entity,
        (listOne, listAnother) => listOne.foldLeft(listAnother)((items, item) => {
          items :+ item
        })
      )

      //      .optGroupByWithCustomizeSeqOp { case (entities, entity) => {
      //        //根据设备对设备的重复的idNumber进行排除
      //        if (entities.contains(entity)) entities else entities :+ entity
      //      }
      //      }

      .map { case (key, list) => (key.split("-").head, list) }
      .reduceByKey { case (list1, list2) => (list1 ++: list2).distinct } //(deviceNumber,List[WifiEntity])
      //去掉随机数，并按device_number进行聚合，以上步骤最终目的：对相同点位（device_number）的数据进行去重


      //随后将数据，转换为以（code，单条数据详情）,然后再以groupby进行聚合，得到（code，相同侦码数据集）
      .flatMap(wifiEntities => wifiEntities._2.map(entity => (entity.idNumber, entity))) //转换为(idNumber,wifiEntity)

      .aggregateByKey(List.empty[WifiEntity])(
        (entities, entity) => entities :+ entity,
        (listOne, listAnother) => listOne.foldLeft(listAnother)((items, item) => {
          items :+ item
        })
      )
      //      .optGroupBy

      .map { case (idNumber, sameEntityList) => {
        val id = UUID.randomUUID() + ""
        val details = sameEntityList.map(x => {
          val deviceNumber = x.deviceNumber.toInt
          val tagcode = x.idNumber
          val codeType = x.idNumberType
          val deviceName = x.deviceName
          val address = x.address
          //        val location = x.location
          val phoneNumber = x.phoneNumber
          val captime = x.captime
          ResultDetail(
            id,
            x.idNumber,
            x.idNumberType,
            x.deviceNumber.toInt,
            deviceName,
            address,
            x.location,
            x.phoneNumber, captime, x.latitude, x.longitude, x.picpath)
        })

        val entities = new util.ArrayList[ResultDetail]()
        for (i <- 0 to details.size - 1) {
          entities.add(details(i))
        }
        //      ClueResult(
        //        jobId,
        //        sameEntityList.head.showedIdNumber,
        //        sameEntityList.head.idNumberType,
        //        sameEntityList.size,
        //        sameEntityList.map(entity => entity.id).mkString(";")
        //      )
        ClueResultWithDetail(
          jobId,
          sameEntityList.head.showedIdNumber,
          sameEntityList.head.idNumberType,
          sameEntityList.size,
          entities,
          1,
          id,
          sameEntityList.head.createTime,
          sameEntityList.head.address,
          sameEntityList.head.location,
          sameEntityList.head.phoneNumber,
          sameEntityList.head.picpath
        )


      }
      }
    if (clueResultRdd.isEmpty())
      spark.sparkContext.parallelize(List(bean.ClueResultWithDetail(jobId, "0", "None", 0, null, 0, null, null, null, null, null, null)))
    else
      clueResultRdd
  }


}

case class OriginalDataEntity(id: String, mac: String, imsi: String, imei: String, captime: String, crtime: String, device_code: String, phone: String)

case class WifiEntity(id: String,
                      showedIdNumber: String,
                      idNumber: String,
                      idNumberType: String,
                      behaviorStartTime: Timestamp,
                      behaviorEndTime: Timestamp,
                      createTime: Date,
                      deviceNumber: String,
                      deviceName: String,
                      address: String,
                      location: String,
                      captime: String,
                      latitude: java.lang.Double,
                      longitude: java.lang.Double,


                      phoneNumber: String,
                      json: String,
                      picpath: String) { //, kafkaTime:Timestamp, finishedTime:Timestamp) 暂时不要这两个时间字段


  def toBehaviorDetail(json: Option[String]): BehaviorDetail = {
    //    import com.trs.media.utils.GlobalImplicit._
    //这里写死了BehaviorDetail对应的表名,因为所有wifi数据都保存在该表里,通过id就能够获取到对应的信息
    BehaviorDetail(id, "system.wifi", String.valueOf(createTime), idNumber, behaviorStartTime.getTime, behaviorEndTime.getTime, deviceNumber, json.getOrElse(""), "105")
  }

  override def toString: String = {
    implicit val formats = Serialization.formats(NoTypeHints)
    import org.json4s.jackson.Serialization.write
    write(this)
  }

  override def equals(obj: scala.Any): Boolean = {

    //    val log = LogHelper(Some(classOf[WifiEntity]))

    if (obj != null && obj.isInstanceOf[WifiEntity]) {
      val that = obj.asInstanceOf[WifiEntity]
      Try { //由于可能出现位置的异常,例如idNumber为null的情况,这里添加了对异常的判断逻辑
        that.idNumber.equals(idNumber) &&
          that.idNumberType.equals(idNumberType) &&
          that.behaviorStartTime.getTime == behaviorStartTime.getTime &&
          that.behaviorEndTime.getTime == behaviorEndTime.getTime &&
          that.deviceNumber.equals(deviceNumber)
      } match {
        case Success(isSame) => isSame
        case Failure(ex) => {
          false
        }
      }
    } else if (obj != null)
      super.equals(obj)
    else
      false
  }
}


//case class ClueResult(jobId: String, idNumber: String, dataType: String, count: Int, ids: String, conditionCount: Int = 1) {
//  def merge(another: ClueResult): ClueResult = {
//    if (idNumber.equals(another.idNumber))
//      ClueResult(jobId, idNumber, dataType, count + another.count, s"$ids;${another.ids}")
//    else
//      this
//  }
//
//  def mergeWithPlusingConditionCount(another: ClueResult): ClueResult = {
//    if (idNumber.equals(another.idNumber))
//      ClueResult(jobId, idNumber, dataType, count + another.count, s"$ids;${another.ids}", conditionCount + 1)
//    else
//      this
//  }
//}


case class ClueResultWithDetail(jobId: String,
                                idNumber: String,
                                dataType: String,
                                count: Int,
                                details: util.ArrayList[ResultDetail]

                                = new util.ArrayList[ResultDetail](),
                                //                               (ResultDetail("None","None","None",0,"None","None","None","None",00.00,00.00))
                                var conditionCount: Int = 1,

                                resultid: String,
                                createtime: Date,
                                address: String,
                                location: String,
                                phone: String,
                                picpath: String
                               ) {

  def setConditionCount(count: Int) = {
    conditionCount = count
  }

  //  {
  //    details.add(new ResultDetail("None","None","None",0,"None","None","None","None",00.00,00.00))
  //  }

  def merge(another: ClueResultWithDetail): ClueResultWithDetail = {
    if (idNumber.equals(another.idNumber)) {
      for (i <- 0 to another.details.size() - 1) {
        //进行merge时，需要注意resultId，由于每个条件内的resultId一致，但是条件间的resultId不一致，所以merge时需要手动将后续条件间的resultId设置为前面
        //的resultId，从而是详情的resultId统一，避免最终结果详情resultId有多个
        if (conditionCount >= 1) {
          val preId = details.get(0).resultid
          var detail = another.details.get(i)
          detail.setResultId(preId)
          details.add(detail)
        }
        else {
          details.add(another.details.get(i))
        }
      }
      ClueResultWithDetail(jobId, idNumber, dataType, count + another.count, details, conditionCount + 1, resultid, createtime, address, location, phone, picpath)
    }
    else
      this
  }

  def mergeWithPlusingConditionCount(another: ClueResultWithDetail): ClueResultWithDetail = {
    if (idNumber.equals(another.idNumber)) {
      for (i <- 0 to another.details.size() - 1) {
        //进行merge时，需要注意resultId，由于每个条件内的resultId一致，但是条件间的resultId不一致，所以merge时需要手动将后续条件间的resultId设置为前面
        //的resultId，从而是详情的resultId统一，避免最终结果详情resultId有多个
        if (conditionCount >= 1) {
          val preId = details.get(0).resultid
          val detail = another.details.get(i)
          detail.setResultId(preId)
          details.add(detail)
        }
        else {
          details.add(another.details.get(i))
        }
      }
      ClueResultWithDetail(jobId, idNumber, dataType, count + another.count, details, conditionCount + 1, resultid, createtime, address, location, phone, picpath)
    }
    else
      this
  }
}


case class BehaviorDetail(ID: String, TABLE: String, PRODUCER_TIME: String, ID_NUMBER: String, BSTART_TIME_MS: Long, BEND_TIME_MS: Long, POSITION: String, detail: String, datatype: String) {
  override def toString: String = s"""{"id":"$ID", "table":"$TABLE"}"""

  def md5: String = StringUtils.convertToMD5(s"$ID_NUMBER,$BSTART_TIME_MS,$BEND_TIME_MS,$TABLE,$POSITION")
}


/**
 * 关注人信息,关注人信息包含:关注人身份证和关注人信息
 *
 * @param sfzh
 * @param info 信息可能是很多元素组成,例如:所属表和所在表的主键组成的内容
 */
case class FocusPerson(sfzh: String, info: FocusPersonDetail)

case class FocusPersonDetail(table: String,
                             begintime: Long,
                             endtime: Long,
                             id: String,
                             groupId: String) {
  override def toString: String = s"""{"id":"${id}","table":"${table}","groupid":${groupId}}"""

  def isInGroup = !(groupId.isEmpty || groupId.equals("-1"))
}

case class WarningMessage(ID_NUMBER_INFO: String, BEHAVIOR: String, FOCUS_OBJECT: String, KAFKA_TIME: String, STREAMING_TIME: String, ID_NUMBER: String, MD5: String, ADD1FIELD: String, ADD2FIELD: String)

case class
ResultDetail(
              //                         id: String,
              var resultid: String,
              tagCode: String,
              codetype: String,
              devicecode: Integer,
              deviceName: String,
              address: String,
              location: String,
              phone: String,
              captime: String,
              latitude: java.lang.Double,
              longtitude: java.lang.Double,
              picpath: String
            ) {
  def setResultId(string: String) = {
    resultid = string
  }
}