package day04;

import day01.RichHdfsSourceFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.CoProcessFunction;
import org.apache.flink.streaming.api.functions.co.KeyedCoProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

public class ConnectWithMap {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        //hdfs流
        DataStreamSource<Map<String, String>> ds = env.addSource(new RichHdfsSourceFunctionWithMap("/user/yeniu/data/country_data1"));
        //env.setParallelism(1);

        //kafka
        Properties properties = new Properties();
        //连不上第一台kafka 连第二台
       // properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"s1.hadoop:9092，s3.hadoop:9092");
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"s1.hadoop:9092");
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group_33");//消费者组
        properties.setProperty("flink.partition-discovery.interval-millis", "5000");
        FlinkKafkaConsumer010<String> kafkaSource = new FlinkKafkaConsumer010<>("topic_33", new SimpleStringSchema(), properties);
        kafkaSource.setStartFromLatest();
        DataStreamSource<String> ds2 = env.addSource(kafkaSource);

        //hdfs在返回一个map connect后 发给多个并行solt 而map只有一个
        ds.connect(ds2).process(new CoProcessFunction<Map<String, String>, String, String>() {
            Map<String, String> map =null;//下游有多少并行度 声明多少个map
            @Override
            public void processElement1(Map<String, String> value, Context ctx, Collector<String> out) throws Exception {
                map=value;//只有一个map被赋予下游
            }

            @Override
            public void processElement2(String value, Context ctx, Collector<String> out) throws Exception {
                String addr =map.get(value);
                if(addr==null) addr="unknow";

                out.collect(addr);


            }
        }).print();


        env.execute();
    }
}
