package spark.kafka

import java.io.{BufferedInputStream, BufferedReader, InputStreamReader}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, FileStatus, FileSystem, Path}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, ConsumerStrategy, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.util.LongAccumulator

import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.util.control.Breaks

/**
  * SparkStreaming 接收 kafka 的消息
  * Kafka 与 Spark Streaming 集成有 Receiver 方式和 Direct 方式两种接收数据的方式
  */
object SparkStreamingKafka {
	def main(args: Array[String]): Unit = {
		val topic = "wordcount"
		val brokers = "nn1.hadoop:9092,nn2.hadoop:9092,s1.hadoop:9092"
		val conf = new SparkConf()
		conf.setAppName("SparkStreamingKafka")
		conf.setMaster("local[*]")
		val streamingContext = new StreamingContext(conf,Durations.seconds(5))
		val kafkaParams: mutable.HashMap[String, Object] = mutable.HashMap[String,Object]()
		kafkaParams.put("bootstrap.servers",brokers)
		kafkaParams.put("group.id","groupmame_wordcount")
		kafkaParams.put("key.serializer",classOf[StringSerializer].getName())
		kafkaParams.put("key.deserializer",classOf[StringDeserializer].getName())
		kafkaParams.put("value.serializer",classOf[StringSerializer].getName())
		kafkaParams.put("value.deserializer",classOf[StringDeserializer].getName())
		
		val offset = new mutable.HashMap[TopicPartition,Long]()
		offset += new TopicPartition(topic,0) -> 1L
		val topicSet: Set[String] = topic.split(",").toSet
		//订阅一个固定的主题集合,topicSet 是订阅的主题集合，kafkaParams 是kafka 参数，offset 指定偏移量
		val value: ConsumerStrategy[String, String] = ConsumerStrategies.Subscribe(topicSet,kafkaParams,offset)
		//Receiver 方式是通过KafkaUtils.createStream方法来创建一个lnputDStream 对象，订阅消费策略
		val lines: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(streamingContext,LocationStrategies.PreferConsistent,value)
		val count: DStream[(String, Int)] = lines.flatMap(_.value().split(" ")).map((_,1)).reduceByKey(_+_)
		count.foreachRDD((r,t) => println(s"t:${t},${r.collect().toList}"))
		streamingContext.start()
		streamingContext.awaitTermination()
	}
}

//SparkStreaming多revicer方式去对接kaka数据源
//更新广播变量替代cogroup  fileStream达到更改配置的目的
//累加器重置
object SparkStreamingKafkaBroadcastUpdate {
	def main(args: Array[String]): Unit = {
		val topic = "hainiu_html"
		val brokers = "nn1.hadoop:9092,nn2.hadoop:9092,s1.hadoop:9092"
		val conf = new SparkConf()
		conf.setAppName("manyreceiver")
		conf.setMaster("local[2]")
		/**
		  * 这个配置对于StreamingContext创建的流是作起来的，比如socket或者file，但是对于KafkaUtils创建流不起作用
		  * 因为kafka源是根据topic的分区数来决定并发度的
		  */
		//conf.set("spark.streaming.blockInterval","1000ms")
		val streamingContext = new StreamingContext(conf, Durations.seconds(3))
		val kafkaParams: mutable.HashMap[String, Object] = mutable.HashMap[String, Object]()
		kafkaParams.put("bootstrap.servers", brokers)
		kafkaParams.put("group.id", "mygroup1")
		kafkaParams.put("key.serializer", classOf[StringSerializer].getName())
		kafkaParams.put("key.deserializer", classOf[StringDeserializer].getName())
		kafkaParams.put("value.serializer", classOf[StringSerializer].getName())
		kafkaParams.put("value.deserializer", classOf[StringDeserializer].getName())
		
//		kafkaParams.put("session.timeout.ms", "180000")
//		kafkaParams.put("request.timeout.ms", "180001")
//		以上是kafka的数据源
		
		val dstreamList = new ListBuffer[InputDStream[ConsumerRecord[String, String]]]()
		for(i <- 0 until 3){
			println("分区")
			//一共是18个分区，3个receiver，每个接收6个分区的数据
			var partitions: ListBuffer[TopicPartition] = new ListBuffer[TopicPartition]()
			for(ii <- 0 until 6){
				// topic 名称和partition的编号
				partitions += new TopicPartition(topic,ii)
			}
			// 分配消费策略，每个6个分区
			val value: ConsumerStrategy[String,String] = ConsumerStrategies.Assign[String,String](partitions,kafkaParams)
			
			val lines: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(streamingContext,LocationStrategies.PreferConsistent,value)
			
			dstreamList += lines
			
		}
		//最终将多个流合并成一个，这样就提高了receiver的并发性
		val inputDStream: DStream[ConsumerRecord[String, String]] = streamingContext.union(dstreamList)
		val words: DStream[String] = inputDStream.flatMap(_.value().split(" "))
		
//		val count: DStream[(String, Int)] = inputDStream.flatMap(_.value().split(" ")).map((_,1)).reduceByKey(_+_)
//		count.foreachRDD((r,t) => println(s"t:${t},${r.collect().toList}"))
		
		//用来存放使用的配置
		import scala.collection.mutable.Map
		var mapBroadcast: Broadcast[Map[String, String]] = streamingContext.sparkContext.broadcast(Map[String,String]())

		//配置更新数据间隔时间
		val updateInterval = 10000L

		//记录最后一次更新的时间
		var lastUpdateTime = 0L

		//匹配到和没有匹配到累加器
		val matchAccumulator: LongAccumulator = streamingContext.sparkContext.longAccumulator
		val nomatchAccumulator: LongAccumulator = streamingContext.sparkContext.longAccumulator

		// 内部函数是在Driver上运行的,用来更新配置
		words.foreachRDD(r => {
			println("")
			if(System.currentTimeMillis() - lastUpdateTime > updateInterval || mapBroadcast.value.isEmpty){
				println(mapBroadcast.value.size)
				// 读取HDFS上的文件，放到map中
				val map: mutable.Map[String, String] = Map[String,String]()
				val fileSystem: FileSystem = FileSystem.get(new Configuration())
				val statuses: Array[FileStatus] = fileSystem.listStatus(new Path("D:\\temp\\path"))
				for(f <- statuses){ // 读到了文件
					val path: Path = f.getPath
					val stream: FSDataInputStream = fileSystem.open(path)
					val reader = new BufferedReader(new InputStreamReader(stream))
					var line: String = reader.readLine()
					while(line != null){
						val strings: Array[String] = line.split(" ")
						map(strings(0)) = strings(1)
						line = reader.readLine()
					}
					reader.close()
				}
				//取消广播变量的持久化
				mapBroadcast.unpersist(false) //是否阻塞，等待销毁完成
				mapBroadcast = streamingContext.sparkContext.broadcast(map)
				lastUpdateTime = System.currentTimeMillis()
				// it 是 kafka 的输入 DStream[ConsumerRecord[String, String]]，对每个分区进行操作
				r.foreachPartition(it => {
					val value: mutable.Map[String, String] = mapBroadcast.value
					import scala.util.control.Breaks.{breakable,break}
					println(s"broadcast:${value},list:${it}")
					// 输入 DStream[ConsumerRecord[String, String]]
					it.foreach(f => {
						breakable{
							if(value.contains(f)){
								matchAccumulator.add(1L)
								break()
							}
							nomatchAccumulator.add(1L)
						}
					})
//					for(i <- map){
//						breakable{
//							if(value.contains(i)){
//								matchAccumulator.add(1)
//								break()
//							}
//							nomatchAccumulator.add(1L)
//						}
//					}
				})
				println("匹配到的："+ matchAccumulator.value)
				println("没有匹配："+ nomatchAccumulator.value)
				// 累加器清零
				matchAccumulator.reset()
				nomatchAccumulator.reset()
			}
		})
		streamingContext.start()
		streamingContext.awaitTermination()
	}
}
