package day02;

import day01.RichHdfsSourceFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.ConnectedStreams;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.CoProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

public class UnionOprationTest {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        //hdfs流
        DataStreamSource<String> ds = env.addSource(new RichHdfsSourceFunction("/user/yeniu/data/country_data1"));
        env.setParallelism(1);


        //kafka
        Properties properties = new Properties();
        //连不上第一台kafka 连第二台
       // properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"s1.hadoop:9092，s3.hadoop:9092");
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"s1.hadoop:9092");
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group_33");//消费者组
        properties.setProperty("flink.partition-discovery.interval-millis", "5000");
        FlinkKafkaConsumer010<String> kafkaSource = new FlinkKafkaConsumer010<>("topic_33", new SimpleStringSchema(), properties);
        kafkaSource.setStartFromLatest();
        DataStreamSource<String> ds2 = env.addSource(kafkaSource);

        //connect
        // kafka -->String
        // hdfs  -->Tuple2<String, String>

        //专门处理connect的process() 里面CoProcessFunction（第一个流 第二个流 返回值）
        //connect 返回 ConnectedStreams DataStream
        //union 返回
        //join操作

        DataStream<String> union = ds.union(ds2);
        ds.union(ds2).process(new ProcessFunction<String, String >() {
            Map<String,String> map= new HashMap<>();
            @Override
            public void processElement(String value, Context ctx, Collector<String> out) throws Exception {
                if(value.indexOf("\t")>-1){
                    //hdfs
                    System.out.println("hdfs--->"+value);
                    String[] arr = value.split("\t");
                    map.put(arr[0],arr[1]);
                }else {
                    System.out.println("kafka--->"+value);
                    String s = map.get(value);
                    String s1 = s == null ? "unknow" : s;
                    out.collect(s1);
                }
            }
        }).print();


        env.execute();
    }
}
