package cn.itcast.dstream

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream

object SaveAsTextFilesTest {
    def main(args: Array[String]): Unit = {
        System.setProperty("HADOOP_USER_NAME", "root")
        //1.创建SparkConf对象 设置appName和master地址  local[2] 表示本地采用2个线程运行任务
        val sparkConf: SparkConf = new SparkConf().setAppName("SaveAsTextFilesTest").setMaster("local[2]")
        //2.创建SparkContext对象,它是所有任务计算的源头，它会创建DAGScheduler和TaskScheduler
        val sc: SparkContext = new SparkContext(sparkConf)
        //3.设置日志级别
        sc.setLogLevel("WARN")
        //4.创建StreamingContext,需要2个参数，一个是SparkContext，一个是批处理的时间间隔
        val ssc: StreamingContext = new StreamingContext(sc,Seconds(5))
        //5.对接socket数据创建DStream对象，需要socket服务的地址、端口号及存储级别(默认的)
        val dstream: ReceiverInputDStream[String] = ssc.socketTextStream("192.168.121.134",9999)
        //6.调用saveAsTextFiles操作，将nc窗口输出的内容保存到HDFS上
        dstream.saveAsTextFiles("hdfs://hadoop01:9000/data/saveAsTextFiles/satf","txt")
        //7.开启流式计算
        ssc.start()
        //8.让程序一直运行，除非人为干预停止
        ssc.awaitTermination()
    }
}
