package com.atguigu.api

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011

/**
 * @description: source/sink是必须要有的
 * @time: 2020/6/19 15:13
 * @author: baojinlong
 **/

case class SensorReading(id: String, timestamp: Long, temperature: Double)

object SourceTest {
  def main(args: Array[String]): Unit = {
    // 创建执行环境,在本地环境中应该调用local    StreamExecutionEnvironment.createLocalEnvironment(2)
    //    StreamExecutionEnvironment.createRemoteEnvironment("", 3333),getExecutionEnvironment自动适配环境
    val environment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    // 设置全局并行度
    environment.setParallelism(1)
    // 0:从集合中读取数据
    val stream1: DataStream[SensorReading] = environment.fromCollection(List(
      SensorReading("sensor_01", 1547718199, 35.8),
      SensorReading("sensor_06", 1547718201, 15.8),
      SensorReading("sensor_07", 1547718202, 6.8),
      SensorReading("sensor_110", 1547718205, 38.8),
      SensorReading("sensor_120", 1547718205, 38.8),
      SensorReading("sensor_310", 1547718305, 38.8),
      SensorReading("sensor_140", 1547718505, 45.8),
      SensorReading("sensor_170", 1547718805, 23.8)
    ))
    // environment.fromElements(1,2,3)
    // 打印输出
    stream1.print
    // 1:从文件中读取
    val stream2: DataStream[String] = environment.readTextFile("E:/qj_codes/big-data/FlinkTutorial/src/main/resources/sensor.data")
    // 2:从socket文本流读取
    val socketSource: DataStream[String] = environment.socketTextStream("localhost", 777)
    socketSource.print("environment")
    // 打印输出,流式读取文件的时候是按照一行一样处理的,可以通过socket文本流读取
    stream2.print
    // 3:从kafka读取数据
    val properties = new Properties()
    properties.setProperty("bootstrap.servers", "localhost:9092")
    properties.setProperty("group.id", "consumer-group")
    properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    properties.setProperty("auto.offset.reset", "latest")

    val stream3: DataStream[String] = environment.addSource(new FlinkKafkaConsumer011[String]("test1", new SimpleStringSchema(), properties))
    stream3.print

    // 从元素集合中获取
    val stream4: DataStream[Any] = environment.fromElements(1, 23, "sfdsf")
    stream4.print

    // 自定义数据源
    val stream5: DataStream[SensorReading] = environment.addSource(new MySensorSource())
    stream5.print

    environment.execute("source  job test")
  }


}
