package com.spark.mooc.ch7_sparkstreaming.part02_basicInputSource.socket

import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * @description: 使用套接字流作为数据源
 * @time: 2020/11/29 14:22
 * @author: lhy
 */
object NetworkWordCount {
    def main(args: Array[String]): Unit = {

//        if (args.length < 2){
        ////            System.err.println("Usage:NetworkWordCount <hostname> <port>")
        ////            System.exit(1)
        ////        }
        StreamingExamples.setStreamingLogLevels()       // 设置日志显示级别
        val sparkConf: SparkConf = new SparkConf().setAppName("NetworkWordCount").setMaster("local[2]")
        val ssc = new StreamingContext(sparkConf,Seconds(1))
        // StorageLevel.MEMORY_AND_DISK_SER 保存数据方式：此处为内存+磁盘
        val lines: ReceiverInputDStream[String] = ssc.socketTextStream("localhost",9999,StorageLevel.MEMORY_AND_DISK_SER)
        /*
        如果打包提交到集群，采用下面的这行代码,并打开if (args.length < 2)的注释，setMaster不写 采用yarn方式
         */
//        val lines: ReceiverInputDStream[String] = ssc.socketTextStream(args(0),args(1).toInt,StorageLevel.MEMORY_AND_DISK_SER)
        val words: DStream[String] = lines.flatMap(_.split(" "))
        val wordCounts: DStream[(String, Int)] = words.map(x => (x,1)).reduceByKey(_+_)
        wordCounts.print()
        ssc.start()
        ssc.awaitTermination()
    }
}
