package com.shujia.flink.source

import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.connector.file.src.FileSource
import org.apache.flink.core.fs.Path
import org.apache.flink.formats.csv.CsvReaderFormat
import org.apache.flink.streaming.api.scala._
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.formats.csv.CsvReaderFormat
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.csv.CsvMapper
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.csv.CsvSchema

import java.time.Duration

object Demo2FileSource {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    /**
     * 基于文件构建DataStream --- 有界流
     *
     */

    //简写
    //只读取一次
    val studentDS: DataStream[String] = env.readTextFile("data/students.txt")

    //studentDS.print()

    /**
     * 循环监控一个目录-- 无界流
     *
     */



    val mapper = new CsvMapper()

    val schema: CsvSchema = mapper
      .schemaFor(classOf[Student])
      .withoutQuoteChar
      .withColumnSeparator(',') //设置数据分隔符

    //构建format，数据格式
    val csvFormat: CsvReaderFormat[Student] = CsvReaderFormat
      .forSchema(mapper, schema, TypeInformation.of(classOf[Student]))


    //否见source ,指定format, 读取数据的路径，扫描目录的间隔时间
    val fileSource: FileSource[Student] = FileSource
      .forRecordStreamFormat(csvFormat, new Path("data/flink_stream_file"))
      .monitorContinuously(Duration.ofSeconds(5))
      .build()

    //基于文件的source 构建DataStream
    val fileStreamDS: DataStream[Student] = env.fromSource(fileSource, WatermarkStrategy.noWatermarks(), "filte source")

    fileStreamDS.print()

    env.execute()

  }


}
