package spark.sparkStreaming

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext, Time}

/**
  * 接收器（receiver）占用一个Executor，至少会占用一个core，所以
  * 批处理间隔、窗口间隔、滑动间隔
  * 会忘记之前的批次
*/
object SparkStreamingSocketPort {
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setMaster("local[2]").setAppName("SparkStreamingSocketPort")
		val streamingContext = new StreamingContext(conf, Durations.seconds(5)) //
		val line: ReceiverInputDStream[String] = streamingContext.socketTextStream("nn1.hadoop",6666)
		val split: DStream[(String,Int)] = line.flatMap(_.split(" ")).map((_,1))
		val count: DStream[(String, Int)] = split.reduceByKey(_+_)
		/**
		  * t 是当前时间的毫秒数时间
		  * s 是一个批次包含的 rdd 数据
		  */
		count.foreachRDD((s,t) => {
			val tuples: Array[(String, Int)] = s.collect();
			println(s"${t};${tuples.toList}")
		})
		streamingContext.start()
		streamingContext.awaitTermination()
	}
}

/**
  * 会一直记得之前批次的数据，新的旧的都打印；但是不能记忆历史数据
  */
object UpdateStateByKe{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setMaster("local[2]").setAppName("UpdateStateByKe")
		val streamingContext = new StreamingContext(conf,Durations.seconds(5))
		streamingContext.checkpoint("D:\\temp\\SparkStreaming0721")
		val line: ReceiverInputDStream[String] = streamingContext.socketTextStream("nn1.hadoop",6666)
		val split: DStream[String] = line.flatMap(_.split(" "))
		val map: DStream[(String, Int)] = split.map((_,1))
		val count: DStream[(String, Int)] = map.reduceByKey(_+_)
		/**
		  * 相同Key的value进行操作
		  * s 是之前批次所有的数据；o 是本批所有的数据（已定义、未定义）
		  */
		val updateValue: DStream[(String, Int)] = count.updateStateByKey((s: Seq[Int], o: Option[Int]) => {
			//设置本次 o 的初始值，如果历史有就的到，没有就初始化o
			var last = if (o.isDefined) o.get else 0
			// 遍历历史
			for (i <- s) {
				last += i
			}
			Some(last)
		})
		updateValue.foreachRDD((r,t) => {println(s"${t},${r.collect().toList}")})
		streamingContext.start()
		streamingContext.awaitTermination()
	}
}

/**
  * 用SpakStreaming 恢复hdfs历史数据;回复的是 StreamingContext
  * 新的旧创建，旧的就恢复，恢复旧的改端口号是没有用的，还用旧的端口号
  */
object GetOrCreateStreamContext{
	def main(args: Array[String]): Unit = {
		val checkPath = "D:/temp/checkPath"
		//			param checkpointPath
		//			param creatingFunc
		//			param hadoopConf
		//			param createOnError
		val streamingContext: StreamingContext = StreamingContext.getOrCreate(checkPath, { () => {
			val conf: SparkConf = new SparkConf().setAppName("GetOrCreateStreamContext").setMaster("local[2]")
			val streamingContext = new StreamingContext(conf, Durations.seconds(5))
			// 要设置检查点
			streamingContext.checkpoint(checkPath)
			
			val line: ReceiverInputDStream[String] = streamingContext.socketTextStream("nn1.hadoop", 6666)
			val split: DStream[String] = line.flatMap(_.split(" "))
			val count: DStream[(String, Int)] = split.map((_,1)).reduceByKey(_+_)
			val updateValue: DStream[(String, Int)] = count.updateStateByKey((s: Seq[Int], o: Option[Int]) => {
				var last = if (o.isDefined) o.get else 0
				for (i <- s) {
					last += i
				}
				Some(last)
			})
			updateValue.foreachRDD((r:RDD[(String,Int)],t:Time) => {println(s"${t},${r.collect().toList}")})
			streamingContext
			}
		}, new Configuration(), false)
		streamingContext.start()
		streamingContext.awaitTermination()
	}
}

/**
  * 只操作有更新的数据，老数据不动
  */
object GetOrCreateOnlyUpdate{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setAppName("GetOrCreateOnlyUpdate").setMaster("local[2]")
		val checkPath = "D://temp/SparkStreaming0721_1"
		val streamingContext: StreamingContext = StreamingContext.getOrCreate(checkPath, () => {
			val streamingContext = new StreamingContext(conf, Durations.seconds(5))
			streamingContext.checkpoint(checkPath)
			val source: ReceiverInputDStream[String] = streamingContext.socketTextStream("nn1.hadoop", 6666)
			val split: DStream[String] = source.flatMap(_.split(" "))
			val map: DStream[(String, ValueUpdateStatus)] = split.map((_, new ValueUpdateStatus(1)))
			val count: DStream[(String, ValueUpdateStatus)] = map.reduceByKey((a, b) => new ValueUpdateStatus(a.value + b.value))
			val onlyUptateValue: DStream[(String, ValueUpdateStatus)] = count.updateStateByKey((s: Seq[ValueUpdateStatus], o: Option[ValueUpdateStatus]) => {
				var total = 0
				for (i <- s) {
					total += i.value
				}
				val last: ValueUpdateStatus = if (o.isDefined) o.get else new ValueUpdateStatus(0)
				if(s.size !=0){
					last.isUpdate = true
				}else{
					last.isUpdate = false
				}
				val now = total + last.value
				last.value = now
				Some(last)
			})
			// 参数是函数
			onlyUptateValue.map(_._1)
			
			// 参数是RDD,是transform,rdd可以转df、ds，可以使用spark-SQL
			onlyUptateValue.transform(r => r.map(_._1))
			
			// 是一个 Action
			onlyUptateValue.foreachRDD((r:RDD[(String,ValueUpdateStatus)],t) => {
				val filter: RDD[(String, ValueUpdateStatus)] = r.filter(_._2.isUpdate)
				println(s"${r.collect().toList}")
				println(s"${filter.collect().toList}")

			})
			streamingContext
		}, new Configuration(), false)
		streamingContext.start()
		streamingContext.awaitTermination()
	}
}

case class ValueUpdateStatus(var value:Int,var isUpdate:Boolean = false){
	override def toString: String = {s"value:${value},${isUpdate}"}
}

/**
  * 只操作有更新的数据，老数据不动的窗口操作
  */
object GetOrCreateOnlyUpdateWindow{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setAppName("GetOrCreateOnlyUpdate").setMaster("local[2]")
		val checkPath = "D://temp/SparkStreaming26"
		val streamingContext: StreamingContext = StreamingContext.getOrCreate(checkPath, () => {
			val streamingContext = new StreamingContext(conf, Durations.seconds(5))
			streamingContext.checkpoint(checkPath)
			val source: ReceiverInputDStream[String] = streamingContext.socketTextStream("nn1.hadoop", 6666)
			val split: DStream[String] = source.flatMap(_.split(" "))
			val map: DStream[(String, ValueUpdateStatus)] = split.map((_, new ValueUpdateStatus(1)))
			// 窗口间隔，滑动间隔，不设置滑动间隔，就和批次间隔是一样的
			val count: DStream[(String, ValueUpdateStatus)] = map.reduceByKey((a, b) => ValueUpdateStatus(a.value + b.value)).window(Durations.seconds(20),Durations.seconds(10))
			// 每个窗口间隔执行一次
			val onlyUptateValue: DStream[(String, ValueUpdateStatus)] = count.updateStateByKey((s: Seq[ValueUpdateStatus], o: Option[ValueUpdateStatus]) => {
				var total = 0
				for (i <- s) {
					total += i.value
				}
				val last: ValueUpdateStatus = if (o.isDefined) o.get else new ValueUpdateStatus(0)
				if(s.size !=0){
					last.isUpdate = true
				}else{
					last.isUpdate = false
				}
				val now = total + last.value
				last.value = now
				Some(last)
			})
			// 参数是函数
			onlyUptateValue.map(_._1)
			
			// 参数是RDD,是transform,rdd可以转df、ds，可以使用spark-SQL
			onlyUptateValue.transform(r => r.map(_._1))
			
			// 是一个 Action
			onlyUptateValue.foreachRDD((r:RDD[(String,ValueUpdateStatus)],t) => {
				val filter: RDD[(String, ValueUpdateStatus)] = r.filter(_._2.isUpdate)
				println(s"${r.collect().toList}")
				println(s"${filter.collect().toList}")
				
			})
			streamingContext
		}, new Configuration(), false)
		streamingContext.start()
		streamingContext.awaitTermination()
	}
}

/**
  * 扫描，join，改动的时候使用新的配置文件
  */
object SparkStreamingFile {
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setMaster("local[2]")
				.setAppName("SparkStreamingFile")
				// 设置对多久时间以内的文件进行处理
				.set("spark.streaming.fileStream.minRememberDuration","2592000s")
		val streamingContext = new StreamingContext(conf,Durations.seconds(5))
		val input = "D:/temp/input/"
		//param directory HDFS directory to monitor for new file
		//param filter Function to filter paths to process
		//param newFilesOnly Should process only new files and ignore existing files in the directory
		//param conf Hadoop configuration
		//tparam K Key type for reading HDFS file
		//tparam V Value type for reading HDFS file
		//tparam F Input format for reading HDFS file
		// false 只针对新文件，忽略老文件
		val inputFile: InputDStream[(LongWritable, Text)] = streamingContext.fileStream[LongWritable,Text,TextInputFormat](input,(path: Path) => path.getName.endsWith(".txt") ,false, new Configuration())
		val flatMap: DStream[String] = inputFile.flatMap(_._2.toString.split(" "))
		val count: DStream[(String, Int)] = flatMap.map((_,1)).reduceByKey(_+_)
		count.foreachRDD((r,t) => {println(s"t:t${t},${r.collect().toList}")})
		streamingContext.start()
		streamingContext.awaitTermination()
	}
}

object CogroupDStream{
	def main(args: Array[String]): Unit = {
	val conf = new SparkConf().setMaster("local[2]").setAppName("CogroupDStream")
			// 设置对多久时间以内的文件进行处理
			.set("spark.streaming.fileStream.minRememberDuration","2592000s")
//		/**
//		  * 使用同一个 StreamingContext
//		  */
		val streamingContext = new StreamingContext(conf,Durations.seconds(5))
		val line: ReceiverInputDStream[String] = streamingContext.socketTextStream("nn1.hadoop",6666)
		val split: DStream[(String,Int)] = line.flatMap(_.split(" ")).map((_,1))
		val socketCount: DStream[(String, Int)] = split.reduceByKey(_+_)
		
		
		val input = "D:/temp/input/"
		val inputFile: InputDStream[(LongWritable, Text)] = streamingContext.fileStream[LongWritable,Text,TextInputFormat](input,(path: Path) => path.getName.endsWith(".txt") ,false, new Configuration())
		val flatMap: DStream[String] = inputFile.flatMap(_._2.toString.split(" "))
		val fileCount: DStream[(String, Int)] = flatMap.map((_,1)).reduceByKey(_+_)
//		count.foreachRDD((r,t) => {println(s"t:t${t},${r.collect().toList}")})
		val cogroup: DStream[(String, (Iterable[Int], Iterable[Int]))] = socketCount.cogroup(fileCount)
		cogroup.foreachRDD((r:RDD[(String, (Iterable[Int], Iterable[Int]))],t) => println(s"t:${t},${r.collect().toList}"))
//		socketCount.cogroup(fileCount).foreachRDD((r,t) => println(s"count time:${t},${r.collect().toList}"))
		streamingContext.start()
		streamingContext.awaitTermination()
	}
}