package sparkstreaming.lesson01

import org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer.HashPartition
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.{Partition, SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * Created by Administrator on 2018/5/11.
  */
object NetWordCount {
  def main(args: Array[String]): Unit = {
    /**
      * 初始化一个程序入口
      * Recevier  task  一个线程
      * Task  另外一个线程处理数据
      *
      *
      * spark,spark,spark
      *       spark,3
      * hadoop,hdaoop,hadoop,spark
      *      hadoop,3   spark1
      *  每次处理是本批次的数据：Seconds(2)
      */
    val conf = new SparkConf().setMaster("local[2]").setAppName("NetWordCount")

    conf.set("spark.serializer","org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(conf)

    val ssc = new StreamingContext(sc,Seconds(2))
    /**
      * 通过程序入口获取DStream
      */
    val dstream: ReceiverInputDStream[String] = ssc.socketTextStream("hadoop1",9999)

    /**
      * 对DStream流进行操作
      *
      * spark.default.parallelism =100
      */
    val wordCountDStream: DStream[(String, Int)] = dstream.flatMap(line => line.split(","))
      .map((_, 1))
      .reduceByKey( (x:Int,y:Int) => x+y)


    wordCountDStream.print()
    /**
      * 启动应用程序
      */
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }

}
