package cn._51doit.flink.day02.transformations;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.operators.StreamMap;

/**
 * 不直接调用map方法，而是使用顶层的transform方法，实现map方法的功能
 */
public class MapDemo2 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //spark hadoop
        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);
        //map方法底层调用的是transform方法
        //SingleOutputStreamOperator<String> upperStream = lines.map(String::toUpperCase).setParallelism(2);
        MapFunction<String, String> mapFunction = new MapFunction<String, String>() {
            @Override
            public String map(String s) throws Exception {
                return s.toUpperCase();
            }
        };

        SingleOutputStreamOperator<String> upperStream = lines.transform("MyMap", TypeInformation.of(String.class), new StreamMap<>(mapFunction));

        upperStream.print();

        env.execute();
    }
}
