package cn.tedu.stream.source

import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}

/**
 * @author Amos
 * @date 2022/5/22
 */

object StreamSourceDemo {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment

    // 1. 通过本地集合获取数据源数据
    import org.apache.flink.api.scala._
    val source1: DataStream[String] = env.fromElements("hadoop", "flink", "flume")

    val source2: DataStream[String] = env.fromCollection(List("hadoop", "flink", "flume"))

    val source3: DataStream[Long] = env.generateSequence(1, 10)

    // 2. 读取外部文件获取数据源
    val source4: DataStream[String] = env.readTextFile("FLINKSCALA/data/distribute_cache_student")

    val source5: DataStream[String] = env.readTextFile("hdfs://hadoop01:8020/test/input/wordcount.txt")

    // 3. 读取socket数据
    val source6: DataStream[String] = env.socketTextStream("hadoop01",9999)

    // 4. 自定义数据源



    env.execute()
  }

}
