package cn.itcast.b_etl.sink;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.core.fs.FileSystem;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * 案例: sink算子使用
 *
 * 1.Sink到本地
 * 2.Sink到HDFS
 * 3.Sink到Socket
 */
public class SinkDemo {

    public static void main(String[] args) throws Exception {
        //添加hadoop root用户权限
        System.setProperty("HADOOP_USER_NAME","root");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        DataStreamSource<String> source = env.fromElements("a", "a", "a", "c", "d", "df");
        //1.Sink到本地
//        source.writeAsText("C:\\Users\\Administrator\\Desktop\\test\\output", FileSystem.WriteMode.OVERWRITE).setParallelism(1);
       // source.writeAsText("C:\\Users\\Administrator\\Desktop\\code\\out", FileSystem.WriteMode.OVERWRITE).setParallelism(1);
        //2.Sink到HDFS
        //source.writeAsText("hdfs://node1:8020/test/test.txt", FileSystem.WriteMode.OVERWRITE).setParallelism(1);
        //3.Sink到Socket
        source.writeToSocket("node1",8090,new SimpleStringSchema());

        env.execute();
    }
}
