package com.offcn.bigdata.datastream.sink

import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.core.fs.FileSystem.WriteMode

/**
 * @Auther: BigData-LGW
 * @ClassName: FileSink
 * @Date: 2020/12/16 19:03
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object FileSink {
    def main(args: Array[String]): Unit = {
        val env = ExecutionEnvironment.getExecutionEnvironment
        val lines = env.fromElements(
            "implicit you",
            "implicit you",
            "implicit me"
        )
        val ret = lines.flatMap(_.split("\\s+"))
            .map((_,1))
            .groupBy(0)
            .sum(1)
        val info = ret.collect()
        println(s"info:${info.mkString("[", ", ", "]")}")
        ret.setParallelism(1)
            //            .writeAsText("file:/D:/tmp/csvoutput/text",WriteMode.OVERWRITE)
            .writeAsCsv(filePath = "file:/D:/tmp/csvoutput",
                rowDelimiter = ";",
                fieldDelimiter = ",",
                writeMode = WriteMode.OVERWRITE
            )
        env.execute(s"${KafkaSink.getClass.getSimpleName }")
    }
}
