package com.atguigu.chapter5.source;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;


import org.apache.flink.streaming.util.serialization.JSONKeyValueDeserializationSchema;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;

import java.security.Provider;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;


/**
 * @ClassName: Source
 * @Description:
 * @Author: kele
 * @Date: 2021/4/1 13:37
 *
 *
 * 1、读取hdfs上的数据
 * 2、将集合转化为ds
 *
 **/

//读取hdfs上的数据
public class Source {

    private StreamExecutionEnvironment exec;

    @Before
    public void before(){

        Configuration conf = new Configuration();

        conf.setInteger("rest.port",20000);

        exec = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        //設置並行度
        exec.setParallelism(1);

    }

    @After
    public void after() throws Exception {

        //执行StreamExecutionEnvironment
        exec.execute();

    }



    //读取HDFS上的数据
    @Test
    public void HdfsSource(){

        //读取hdfs上的数据
        DataStreamSource<String> ds = exec.readTextFile("hdfs://hadoop162:8020/input");

        ds.print();

    }

    /**
     * 读取集合中的数据1
     * @throws Exception
     */
    @Test
    public void ListSource1(){

        List<Integer> list= Arrays.asList(1,2,3,4,5,6);

        DataStreamSource<Integer> ds = exec.fromCollection(list);

        SingleOutputStreamOperator<Integer> filter = ds.filter(x -> x % 2 == 0);

        filter.print();

    }

    //从集合中读取数据
    @Test
    public void ListSource2() {

        DataStreamSource<Integer> ds = exec.fromElements(1, 2, 3, 4, 5, 6, 7);

        SingleOutputStreamOperator<Integer> filter = ds.filter(x -> x % 2 == 0);

        filter.print();

    }


    /**
     * source是从kafka读取数据，
     * flinkkakfaconsumer
     */
    @Test
    public void kafkaSource(){

        Properties prop = new Properties();
        prop.setProperty("bootstrap.servers", "hadoop162:9092,hadoop163:9092,hadoop164:9092");
        prop.setProperty("group.id", "kafkasource");
        prop.setProperty("auto.offset.reset", "latest");


        /*exec.addSource(new FlinkKafkaConsumer<String>("senion", new SimpleStringSchema(),prop))
                .print();
        */

        DataStreamSource<ObjectNode> ds = exec.addSource(new FlinkKafkaConsumer<>("senion", new JSONKeyValueDeserializationSchema(false), prop));

        ds.map(x->x.findValue("name")).print();

    }

}
