package com.atguigu.day07;

import com.atguigu.utils.ClickEvent;
import com.atguigu.utils.ClickSource;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.CoFlatMapFunction;
import org.apache.flink.util.Collector;

public class Example1 {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        DataStreamSource<ClickEvent> ckickSource = env.addSource(new ClickSource());
        DataStreamSource<String> queryStream = env.socketTextStream("hadoop102", 9999);

        ckickSource
                .keyBy(r -> r.url)
                .connect(
                        //广播之前为什么将流的并行度设置为1呢？因为要将流中的数据按照顺序广播出去
                        queryStream.setParallelism(1).broadcast()
                )
                //CoFlatMapFunction<IN1,IN2,OUT>
                .flatMap(new CoFlatMapFunction<ClickEvent, String, ClickEvent>() {
                    //用来保存查询字符串
                    private String query = "";
                    //第一条流的数据到达合流时，调用flatMap1
                    @Override
                    public void flatMap1(ClickEvent value, Collector<ClickEvent> out) throws Exception {
                        //如果点击事件的用户名等于查询字符串，那么将数据发送下去
                        if (value.username.equals(query)) out.collect(value);
                    }

                    //第二条流的数据到达合流时，调用flatMap2
                    @Override
                    public void flatMap2(String value, Collector<ClickEvent> out) throws Exception {
                        //将查询字符串保存下来
                        query = value;
                    }
                })
                .print();


        env.execute();
    }
}
