package com.lw.scalaspark.core.transformations
import java.util
import java.util.Iterator

import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.{SparkConf, SparkContext, broadcast}

/**
  * flatMap 是一对多的关系
  * 处理一条数据得到多条数据结果
  */




object Transformations_flatMap {


  def map_(): Unit = {
    val row = Row(1, true, "a string", null)
    val conf = new SparkConf()
    conf.setAppName("map").setMaster("local")
    val sc = new SparkContext(conf)
    val infos = sc.parallelize(Array[String]("hello spark","hello hdfs","hello hive"))
    val result = infos.flatMap(one=>{
      one.split(" ")
    })
    result.foreach(println)
  }

  def row_(): Unit = {
    val row = Row(1, true, "a string", null)
//    val conf = new SparkConf()
//    conf.setAppName("map").setMaster("local")
//    val sc = new SparkContext(conf)
//    val result = infos.flatMap(one=>{
//      one
//    })
//    result.foreach(println)

    println((row(2),(row,row)))
  }
  def for_(): Unit = {
    val row = Row(1, true, "a string", null)
//    val conf = new SparkConf()
//    conf.setAppName("map").setMaster("local")
//    val sc = new SparkContext(conf)
//    val result = infos.flatMap(one=>{
//      one
//    })
//    result.foreach(println)

//    val xs = List(1, 2, 3, 4, 5)
//
//
//    val ait = Iterator(xs)
//    while(ait.hasNext){
//      println(ait.next())
//    }

//    println((row(2),(row,row)))
  }


  def optional_(): Unit ={

    val myMap: Map[String, String] = Map("key1" -> "value")
    val value1: Option[String] = myMap.get("key1")
    val value2: Option[String] = myMap.get("key2")

    println(show(value1)) // Some("value1")
    println(value2.isEmpty) // None

  }
  def show(x: Option[String]) = x match {
    case Some(s) => s
    case None => "?"
  }



  def return_(x:Int): Int ={
    if(x==1){
      return 1
    }else{
      return 3
    }
  }


  def reg_(str:String): Any ={

    import scala.util.matching.Regex
    val numberPattern: Regex = "\\=(\\S+)\\|".r
    val regStr = numberPattern.findFirstMatchIn(str) match {
      case Some(s) => s.group(1)
      case None => "not match"
    }
    return regStr
  }

  def strToList_(str:String): List[String] ={
    import collection.breakOut
    val trimmedList: List[String] = str.split(",").map(_.trim)(breakOut)
    return trimmedList
  }
  def list_diff_(): Unit = {
    val a1=List(5,6,4,7)
    val a2=List(1,2,3,5)

    // 交集
    val c1 = a1.intersect(a2)
    println(c1)

    // 并集     distinct去重
    val c2 = (a1:::a2).distinct
    println(c2)

    // 差集
    val c3 = a1.diff(a2)
    val c4 = a2.diff(a1)
    println(c3)
    println(c4)
  }
  def list_diff(leftList: List[String], rightList: List[String]): List[String] ={

    return leftList.diff(rightList)
  }


  def listToStringJoinCount(a: List[String],count: Int): String ={

    val a = List("apple", "banana", "cherry")
    val str = a.mkString(",")
    return "cameraid="+str+"|count:"+count
  }









  def main(args: Array[String]): Unit = {


//   spark-shell 执行


//    spark.sql("use traffic")
//    val camera_rdd = spark.sql("select * from monitor_camera_info").rdd
//    val flow_rdd = spark.sql("select * from monitor_flow_action").rdd
//    var flow_rdd_right = flow_rdd.map(key=>{(key.get(1),key.get(2))}).groupByKey().map(GetSpark_2.fun1)
//    var camera_rdd_left = camera_rdd.map(key=>{(key(0),key)}).groupByKey().map(GetSpark.fun1)
//
//    camera_rdd_left.leftOuterJoin(flow_rdd_right).map(fun1).take(100).foreach(println)
//


    //    map_
//    row_
//for_

//    optional_
//    println(return_(1))

    val strReg = reg_("cameraIds=16051,63326,23229,17087,75828|cameraCount=4")
    println(strReg)




//    strToList_("16051,63326,23229,17087,75828")

//    val a1=List("5","6","4","7")
//    val a2=List("1","2","3","5")
//    println(list_diff(a1,a2))


//    listToStringJoinCount

  }
}







object GetSpark{
  def fun1(tuple:((Any, Iterable[org.apache.spark.sql.Row]))) = {
    val monitorId = tuple._1
    val cameraIterator = tuple._2.iterator
    var count = 0 //标准表中当前卡扣对应的摄像头个数
    val cameraIds = new StringBuilder
    while (cameraIterator.hasNext) {
      cameraIds.append("," + cameraIterator.next.get(1))
      count += 1
    }
    //cameraIds=00001,00002,00003,00004|cameraCount=4
    val cameraInfos ="cameraIds=" + cameraIds.toString.substring(1) + "|cameraCount=" + count
    (monitorId, cameraInfos)
  }
}



object GetSpark_2{
  def fun1(tuple:((Any, Iterable[Any]))):(Any,String) = {
    val monitorId = tuple._1
    val cameraIterator = tuple._2.iterator
    var count = 0 //标准表中当前卡扣对应的摄像头个数
    val cameraIds = new StringBuilder
    while (cameraIterator.hasNext) {
      cameraIds.append("," + cameraIterator.next)
      count += 1
    }
    //cameraIds=00001,00002,00003,00004|cameraCount=4
    val cameraInfos ="cameraIds=" + cameraIds.toString.substring(1) + "|cameraCount=" + count
    return (monitorId, cameraInfos)
  }
}

object GetSpark_3 {


  def strToList_(str:String): List[String] ={
    import collection.breakOut
    val trimmedList: List[String] = str.split(",").map(_.trim)(breakOut)
    return trimmedList
  }

  def reg_(str:String): String ={

    import scala.util.matching.Regex
    val numberPattern: Regex = "\\=(\\S+)\\|".r
    val regStr = numberPattern.findFirstMatchIn(str) match {
      case Some(s) => s.group(1)
      case None => "not match"
    }
    return regStr
  }

  def show(x: Option[String]) = x match {
    case Some(s) => s
    case None => "?"
  }

  def list_diff(leftList: List[String], rightList: List[String]): List[String] ={

    return leftList.diff(rightList)
  }

  def listToStringJoinCount(a: List[String],count: Int): String ={

    val str = a.mkString(",")
    return "cameraid="+str+"|count:"+count
  }
//count:10
  def split_(): Unit ={
    val str =  "cameraid=apple,banana,cherry|count:10".split("\\|",0)(1)
    println(str)
  }


  def fun1(tuple: ((Any, (String, Option[String])))):(Any,String) = {

    val monitorId = tuple._1
    val left_str = tuple._2._1
    val right_str = tuple._2._2
    var ExpMonitorsStr = ""
    if(right_str.isEmpty){
      ExpMonitorsStr = left_str
    }else{
      if(left_str.split("\\|",0)(1)!=show(right_str).split("\\|",0)(1)) {
        val leftListStr = reg_(left_str)
        val rightListStr = reg_(show(right_str))
        val leftList = strToList_(leftListStr)
        val rightList = strToList_(rightListStr)
        val ExpMonitors = list_diff(leftList, rightList)
        val count = ExpMonitors.length
        ExpMonitorsStr = listToStringJoinCount(ExpMonitors, count)
      }
    }

    return (monitorId, ExpMonitorsStr)
  }

//  def main(args: Array[String]): Unit ={
//    split_
//  }


}

//===================================任务二、监控点依据车辆数目排行============================================


object MainSortJob{


  def main(args: Array[String]): Unit = {
//    spark.sql("use traffic")
//    val flow_rdd = spark.sql("select * from monitor_flow_action").rdd
//
//    val flow_rdd_MonitorCarNumSort_tupple = flow_rdd.map(MonitorCarNumSort.concatMC).distinct()
//    flow_rdd_MonitorCarNumSort_tupple.map(key=>{(key._1,1)}).reduceByKey(_+_).map(key=>{(key._2,key._1)}).sortByKey(false).take(100).foreach(println)
  }

}




object MonitorCarNumSort{
  def concatMC(tupple:(org.apache.spark.sql.Row)) = {
        val monitorId:Any = tupple.get(1)
        val cameraId:Any = tupple.get(2)
        val carlicensePlate:Any = tupple.get(3)
    (monitorId,cameraId+"-"+carlicensePlate)
  }
}

//============================================任务三：使用广播变量进行统计top3卡扣车辆 详细信息============



object MonitorCarDetails{


  def main(args: Array[String]): Unit = {

    var list: List[String] =List()

    val arr = Array(("7305","0007"), ("7193","0001"), ("7164","0005"), ("7147","0006"), ("7128","0002"), ("7124","0003"), ("7064","0008"), ("7047","0000"), ("7019","0004"))
    for (each <- arr){
      list = list:+each._2
    }
    println(list.contains("1007"))


//    spark.sql("use traffic")
//    val flow_rdd = spark.sql("select * from monitor_flow_action").rdd
//
//    val flow_rdd_MonitorCarNumSort_tupple = flow_rdd.map(MonitorCarNumSort.concatMC).distinct()
////    flow_rdd_MonitorCarNumSort_tupple.map(key=>{(key._1,1)}).reduceByKey(_+_).map(key=>{(key._2,key._1)}).sortByKey(false).take(100).foreach(println)
//    val broadcastVariables = flow_rdd_MonitorCarNumSort_tupple.map(key=>{(key._1,1)}).reduceByKey(_+_).map(key=>{(key._2,key._1)}).sortByKey(false).collect()
//    val bc = sc.broadcast(broadcastVariables)
////    得到topN 监控下所有车辆的详细信息
//    flow_rdd.filter(MonitorCarDetailsFilter.Filter).map(key=>{(key.get(1),(key.get(2),key.get(3),key.get(4),key.get(5)))}).take(10).foreach(println)
//




  }

}



//    val arr = Array(("7305","0007"), ("7193","0001"), ("7164","0005"), ("7147","0006"), ("7128","0002"), ("7124","0003"), ("7064","0008"), ("7047","0000"), ("7019","0004"))
object MonitorCarDetailsFilter{
  def Filter(tupple:(org.apache.spark.sql.Row)) = {
//    val arr:Array[(Int, Any)] =bc.value
        val arr = Array(("7305","0007"), ("7193","0001"), ("7164","0005"), ("7147","0006"), ("7128","0002"), ("7124","0003"), ("7064","0008"), ("7047","0000"), ("7019","0004"))

    var list: List[Any] =List()
    for (each <- arr){
      list = list:+each._2
    }
    list.contains(tupple.get(1))
  }
}




//===================================任务四：使用自定义累加器学习使用================================



object monitorSpeedRank {
  def speedCollection(tuple: ((Any, Iterable[Any]))) = {
    val monitorId = tuple._1
    val speedCollection = tuple._2.iterator
    var highCount = 0
    var mediumCount = 0
    var lowCount = 0
    while (speedCollection.hasNext) {
      var speed = speedCollection.next.asInstanceOf[String].toInt
      if (speed < 100 && speed > 0)
        lowCount += 1
      else if (speed <= 200 && speed >= 100)
        mediumCount += 1
      else
        highCount += 1
    }
    (monitorId, (highCount.toString, mediumCount.toString, lowCount.toString))
  }
}
object monitorSpeedRank1{



  def main(args: Array[String]): Unit = {

    val list =  List("4", "4", "1", "9").sortWith((e1, e2) => (e1 compareTo e2) < 0)
    list.foreach(println)



    val mysortBy = new Ordering[Tuple3[String, String, String]] {
      override def compare(x: (String, String, String), y: (String, String, String)): Int = {
        val r = x._1.compare(y._1)
        val r2 = x._2.compare(y._2)
        if (r == 0) {
          if (r2 == 0) y._3.compare(x._3)  else r2
        } else r
      }
    }
  }

}


//=======================================任务五、每个卡扣下车速最快的top10=======================


/**
  *
(monitorid,((carid,speed),(carid,speed),(carid,speed),(carid,speed),(carid,speed).......)
groupby().map(func)
(monitorid,(carid,speed))

-------------------

def func(){
	tuplleIt = tupple._2.iterator
	array = new Array(10)
	for(each<-tuplleIt){
		if(array.length<10){
			for (i=0,i<10,i++){
				array.add(each)
			}
		}
		min = findMin(array)
		if (each>min.value){
			array[min.index]= each
		}
	}
}

	findMin(array:Array[int]):Min{
	minvalue = array.min()
	minIndex = array.findIndex(value)
	return Min(minvalue,min)
}

  *
  */






//==========================================任务六、特定卡扣下车辆的轨迹==========================
//implicit class TupOps2[A, B](val x: (A, B)) extends AnyVal {
//  def :+[C](y: C) = (x._1, x._2, y)
//  def +:[C](y: C) = (y, x._1, x._2)
//}

object trajectoryTrack {
  def trackAssemb(tuple:(Any, Iterable[(Any, Any)])) = {
    val carid = tuple._1
    val listAfterSort =tuple._2.iterator.toList.sortBy(key=>key._2.asInstanceOf[String])
    var traceTimeList: List[String]= List.empty
    var traceMonitorsList: List[String] =  List.empty
    for (each <-listAfterSort){
      traceMonitorsList =  traceMonitorsList:+each(0).asInstanceOf[String]
      traceTimeList = traceTimeList:+each(1).asInstanceOf[String]
    }
    (carid,(traceTimeList,traceMonitorsList))
  }
}

object trajectoryTrack1{

//  def compare(): Boolean ={
//
//  }
  def trackAssemb(tuple:(Any, Iterable[(Any, Any)])): (String,String) ={
    val carid = tuple._1
    val listAfterSort =tuple._2.iterator.toList.sortBy(tupple=>tupple._2)
    var traceTimeList = List.empty
    var traceMonitorsList =  List.empty
    for (each <-listAfterSort){
      traceMonitorsList =  traceMonitorsList:+each(0)
      traceTimeList = traceTimeList:+each(1)
    }
    (carid,(traceTimeList,traceMonitorsList))
  }

//  def compareTo(tuple: (String, String)):Boolean= {
//
//    val passTime = tuple._2
//
//
//  }
//

  def main(args: Array[String]): Unit ={
    val test = trackAssemb
    println("================================")
    println(test)
    val testIt:Iterable[(String, String)] = Iterable(("0004","2019-06-04 11:33:56"),("0001","2019-06-04 11:59:45"),("0006","2019-06-04 11:21:47"),("0000","2019-06-04 11:23:57"),("0001","2019-06-04 11:22:52"),("0007","2019-06-04 11:48:26"),("0001","2019-06-04 11:11:43"))
    val listAfterSort =testIt.toList.sortBy(tupple=>tupple._2)


    val  bccarid = flow_rdd.filter(row=>{row.get(1)=="0001"}).map((_.get(3))).collect()
    val traceroute = flow_rdd.filter(row=>{bccarid.contains(row.get(3))}).map(key=>{(key.get(3),(key.get(1),key.get(4)))}).groupByKey().map(trajectoryTrack.trackAssemb).collect()


//    import shapeless.syntax.std.tuple._
//    import shapeless._
//    import syntax.std.traversable._
//    val x = List(1, 2, 3)
//    val xHList = x.toHList[Int::Int::Int::HNil]
//    val t = xHList.get.tupled



    var x: List[Int] = List()
    x = 1+:x
    println(x)

  }
}




