package com.gitee.sink

import org.apache.flink.api.scala.operators.ScalaCsvOutputFormat
import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment, _}
import org.apache.flink.core.fs.FileSystem.WriteMode
//主意：不论是本地还是hdfs.
//如果Parallelism>1将把path当成目录名称，
//如果Parallelism=1将把path当成文件名。
//OVERWRITE模式下如果文件已经存在，则覆盖
object BatchSink {
  def main(args: Array[String]): Unit = {
    val env = ExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    val ds: DataSet[(String,Int)] = env.fromElements(("spark",1),("flink",1))

    //sink到标准输出
    ds.print()

    //sink到标准error输出
    ds.printToErr()

    //sink到本地文件
    // TODO 没有文件输出,奇怪的知识增加了,原因:忘记触发程序执行了
    ds.writeAsText("file:///D:/data/input/sink",WriteMode.OVERWRITE)

    //sink到hdfs输出
    ds.writeAsText("hdfs://node01:9000/workcount/output8",WriteMode.OVERWRITE)

    //sink到csv文件
    // TODO sink到csv只能用元组数据集 Erroe:CSV output can only be used with Tuple DataSets.
    ds.writeAsCsv("file:///D:/data/input/sink.csv",ScalaCsvOutputFormat.DEFAULT_LINE_DELIMITER,ScalaCsvOutputFormat.DEFAULT_FIELD_DELIMITER,WriteMode.OVERWRITE)

    //触发程序执行
    env.execute()
  }

}
