package edu.csl.study.spark.basic


import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable


object SparkStreaming_WordCount {
  val rootDir = System.getProperty("user.dir")+ "\\testFile\\"

  /**
   * 获取上下文
   * @return
   */
  def newContext:StreamingContext={
    //注意：线程数不能设置为1，必须大于1 .因为必须有一个线程接受数据，其他线程处理数据
    val conf:SparkConf = new SparkConf().setAppName("WordCount").setMaster("local[2]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("warn")
    new StreamingContext(sc,Seconds(5))
  }
  /**
   * 关闭流上下文
   * @param scc
   */
  def closeStreamingContext(scc:StreamingContext):Unit={
    scc.start()
    scc.awaitTermination()
    scc.stop()
  }
  /**
   * socket输入：启动程序前要先在centos20中执行nc -lk 8090，启动程序后可以发送数据
   * @param scc
   */
  def socketTextStream(scc:StreamingContext):DStream[String]={
    scc.socketTextStream("192.168.100.20",8090)
  }
  /**
   * 监听文件夹：
   * HDFS
   * @param scc
   */
  def HDFStextFileStream(scc:StreamingContext):DStream[String]={
    //也可以是本地文件路径file:///，但是要求是以流的方式。（window中的拷贝文件是不触发的），可以在linux中用cp命令
    val inputFile = "hdfs://192.168.100.20:9000/test/"
    scc.textFileStream(inputFile)
  }
  /**
   * queueStream 队列数据
   * @param scc
   */
  def queueStream(scc:StreamingContext):DStream[String]={
    val rddQueue = new mutable.SynchronizedQueue[RDD[Int]]
    //线程：每隔2秒进行累加数据
    val addQueueThread = new Thread(new Runnable {
      override def run(): Unit = {
        for(i <- 5 to 10){
          val addRDD = scc.sparkContext.parallelize(1 to i,1);

          rddQueue += addRDD
          println("RESULT = "+addRDD.collect().mkString(","))
          Thread.sleep(3000)
        }
        println("添加结束 = ")
      }
    })
    addQueueThread.start()
    val InputDStream:InputDStream[Int]    =  scc.queueStream(rddQueue)
    InputDStream.map(_.toString)
  }
  def main(args: Array[String]): Unit = {
      //设置了日志的级别
      Logger.getLogger("org").setLevel(Level.ERROR)
      // 1.配置信息
      val scc = newContext
      //2.数据输入 :
      val dataDStream:DStream[String] = socketTextStream(scc)
      //3.处理过程
      val wordDStream:DStream[String] = dataDStream.flatMap(_.split(" "))
      val wordAndOneDStream:DStream[(String,Int)] = wordDStream.map((_,1))
      //处理方式：每个周期重新累加
      //val resultDStream:DStream[(String,Int)] = new WordCount_SparkStreaming().reduceByKey(wordAndOneDStream)
      //处理方式：累计所有周期内的数据。
      val resultDStream:DStream[(String,Int)] = new WordCount_SparkStreaming().updateStateByKey(scc,wordAndOneDStream)

       //4.数据输出
      resultDStream.print()

    /**
     * 注意：foreachRDD的使用方法:存储到MySQL
     *      foreachRDD 中函数是在Driver中被调用的。
     */
    resultDStream.foreachRDD((rdd,time) =>{
        /**注意：不能在这里写 获取Connetcion的代码，因为这里是Driver中调用。*/
        rdd.foreachPartition(iter =>{
          /**在这里写获取Connetcion的代码*/
          iter.foreach(item =>{
            //这里是每条数据的遍历。
          }
          )
        })
      })
       //关闭上下文
      closeStreamingContext(scc)
  }

  class WordCount_SparkStreaming{
    /**
     * reduceByKey 每个周期都会重新统计，无法累加
     * @param wordAndOneDStream
     * @return
     */
    def reduceByKey(wordAndOneDStream:DStream[(String,Int)]):DStream[(String,Int)] = {
      wordAndOneDStream.reduceByKey(_+_)
    }

    /**
     * updateStateByKey 累加所有周期的数据
     * @param scc
     * @param wordAndOneDStream
     * @return
     */
    def updateStateByKey(scc:StreamingContext,wordAndOneDStream:DStream[(String,Int)]):DStream[(String,Int)] = {
         //1设置checkpoint directory
          scc.checkpoint(rootDir+"checkPointDir")
         //2updateStateByKey
          wordAndOneDStream.updateStateByKey((values:Seq[Int],state:Option[Int]) =>{
             Some(values.sum + state.getOrElse(0) )
          })
    }

  }
}
