package cn.doitedu.api;

import beans.Employee;
import com.alibaba.fastjson.JSON;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;

import java.io.IOException;

/**
 * @Author: 深似海
 * @Site: <a href="www.51doit.com">多易教育</a>
 * @QQ: 657270652
 * @Date: 2024/2/21
 * @Desc: 学大数据，上多易教育
 * <p>
 * kafka常用命令：
 * [root@doitedu kafka_2.12-2.3.1]# bin/kafka-topics.sh --bootstrap-server doitedu:9092 --create --topic doit46 --partitions 2 --replication-factor 1
 * [root@doitedu kafka_2.12-2.3.1]# bin/kafka-topics.sh --bootstrap-server doitedu:9092 --list
 * [root@doitedu kafka_2.12-2.3.1]# bin/kafka-console-producer.sh --broker-list doitedu:9092 --topic doit46
 * [root@doitedu kafka_2.12-2.3.1]# bin/kafka-console-consumer.sh --bootstrap-server doitedu:9092 --from-beginning --topic doit46
 **/
public class _04_KafkaSource {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //env.setParallelism(1);


        // 添加 kafka 数据源，先构造一个kafkaSource 对象
        KafkaSource<String> kafkaSource =
                KafkaSource.<String>builder()
                        .setTopics("doit46")
                        .setBootstrapServers("doitedu:9092")
                        // 优先选择上次提交的位移开始读；如果不存在之前提交的位移，则按照reset重置策略指定的位置读
                        .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST))
                        .setValueOnlyDeserializer(new SimpleStringSchema())
                        .setGroupId("g1")
                        .setClientIdPrefix("doit")
                        .build();

        // 添加数据源到env
        // 假设从kafka中读取的数据是json字符串：  {"name":"zs","age":18,"gender":"male","salary":18000}
        DataStreamSource<String> stream = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka-source");

        // 把读到的json，转成  JavaBean  ，然后统计每种性别中薪资最高的人
        stream.map(new MapFunction<String, Employee>() {
                    @Override
                    public Employee map(String json) throws Exception {
                        return JSON.parseObject(json, Employee.class);
                    }
                })
                .keyBy(Employee::getGender)
                .maxBy("salary")
                .print();

        env.execute();

    }
}
