package com.atguigu.flink.chapter10.join;

import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.io.IOException;
import java.nio.charset.StandardCharsets;

public class ConsumeUpertKafkaWithStream {
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port",2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);

        KafkaSource.<String>builder()
                .setBootstrapServers("hadoop102:9092")
                .setGroupId("a")
                .setTopics("t13")
                .setStartingOffsets(OffsetsInitializer.latest())
                .setValueOnlyDeserializer(new DeserializationSchema<String>() {
                    //反序列化
                    @Override
                    public String deserialize(byte[] message) throws IOException {
                        if (message != null){
                            return new String(message, StandardCharsets.UTF_8);
                        }
                        return null;
                    }

                    //是否结束流
                    @Override
                    public boolean isEndOfStream(String s) {
                        return false;
                    }

                    @Override
                    public TypeInformation<String> getProducedType() {
                        return Types.STRING;
                    }
                })
                .build();

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
}



//public class ConsumeUpertKafkaWithStream {
//    public static void main(String[] args) {
//        Configuration conf = new Configuration();
//        conf.setInteger("rest.port",2000);
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
//        env.setParallelism(1);
//
//        KafkaSource.<String>builder()
//                .setBootstrapServers("hadoop102:9092")
//                .setGroupId("a")
//                .setTopics("t13")
//                .setStartingOffsets(OffsetsInitializer.latest())
//                .setValueOnlyDeserializer(new DeserializationSchema<String>() {
//                    //反序列化
//                    @Override
//                    public String deserialize(byte[] message) throws IOException {
//                        if (message != null){
//                            return new String(message, StandardCharsets.UTF_8);
//                        }
//                        return null;
//                    }
//
//                    //是否结束流
//                    @Override
//                    public boolean isEndOfStream(String s) {
//                        return false;
//                    }
//
//                    @Override
//                    public TypeInformation<String> getProducedType() {
//                        return Types.STRING;
//                    }
//                })
//                .build();
//
//        try {
//            env.execute();
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//
//    }
//}









