package com.example.split;

import com.example.bean.WaterSensor;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

/**
 * Created with IntelliJ IDEA.
 * ClassName: WordCountStream
 * Package: com.example.wordcount
 * Description:
 * User: fzykd
 *
 * @Author: LQH
 * Date: 2023-07-17
 * Time: 10:52
 */


//测流输出
public class SideOutputDemo {

    public static void main(String[] args) throws Exception {

        //总结
        //1.使用process
        //2.定义OutputTag
        //3.调用ctx.out输出测流
        //4.通过主流获取测流


        OutputTag<WaterSensor> tag1 =  new OutputTag<WaterSensor>("s1", Types.POJO(WaterSensor.class)); ;
        OutputTag<WaterSensor> tag2 = new OutputTag<WaterSensor>("s2", Types.POJO(WaterSensor.class));

        //1.创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(1);

        //2.读取数据 从socket读
        final SingleOutputStreamOperator<WaterSensor> hadoop102 = env.socketTextStream("hadoop102", 7777)
                .map(new MapFunction<String, WaterSensor>() {
                    @Override
                    public WaterSensor map(String s) throws Exception {

                        String[] split = s.split(",");


                        return new WaterSensor(split[0], Long.valueOf(split[1]), Integer.parseInt(split[2]));
                    }
                });

        //侧流输出
        //WaterSensor的数据 根据id分流
        //当现成的算子 不符合逻辑业务 那就考虑实现process
        //process是最底层的 就是最灵活的                       定义的输出类型是主流的输出类型
        SingleOutputStreamOperator<WaterSensor> process = hadoop102.process(new ProcessFunction<WaterSensor, WaterSensor>() {
            @Override
            public void processElement(WaterSensor value, Context ctx, Collector<WaterSensor> out) throws Exception {
                //处理元素 输入的一条数据 上下文对象 向下游发送数据
                String id = value.getId();

                if ("s1".equals(id)) {
                    //如果是s1 放到测输出流中
                    //1.标签名
                    //2.放入输出流的类型 必须是Typeinfomation
                    //上下文是传输侧流的数据 两个参数
                    //第一个OutputTag对象
                    //第二个是方法输出流中的数据
                    ctx.output(tag1, value);
                } else if ("s2".equals(id)) {
                    //s2也是到侧输出流
                    ctx.output(tag2, value);
                } else {
                    //主干道
                    //直接调用采集器是主流
                    out.collect(value);
                }
            }
        });

        //打印主流的数据
        process.print("主流");
        //测流的数据从主流里面拿
        process.getSideOutput(tag1).print("s1-");
        process.getSideOutput(tag2).printToErr("s2-");//输出红色




        env.execute();
    }
}
