package cn._51doit.flink.day02.transformations;

import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class MapDemo1 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //spark hadoop
        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);

//        SingleOutputStreamOperator<String> upperStream = lines.map(new MapFunction<String, String>() {
//            @Override
//            public String map(String s) throws Exception {
//                return s.toUpperCase();
//            }
//        });

        //lines.map(a => a.toUpperCase)
        //lines.map(_.toUpperCase)
        //SingleOutputStreamOperator<String> upperStream = lines.map(line -> line.toUpperCase()).setParallelism(2);
        SingleOutputStreamOperator<String> upperStream = lines.map(String::toUpperCase).setParallelism(2);



        upperStream.print();

        env.execute();
    }
}
