package com.flink.readsource;

import com.flink.entity.User;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction;
import org.apache.flink.streaming.api.functions.source.SourceFunction;

import java.time.LocalDate;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;

/**
 * 描述: 读取数据的方式
 * flinkReadDataSourceDemo
 *
 * @author yanzhengwu
 * @create 2022-07-20 13:41
 */
public class ReadDataSourceDemo {


    public static void main(String[] args) throws Exception {

        //声明执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //测试为了保证元素顺序并行度设置为1，可以理解为单线程执行
        env.setParallelism(1);


        readFile(env);
        readCollection(env);
        readElements(env);
        userDefinedDataSource(env);

        //执行环境的执行入口
        env.execute();
    }

    /**
     * 读取文件流
     *
     * @param env
     */
    static void readFile(StreamExecutionEnvironment env) {
        DataStreamSource<String> fileStream = env.readTextFile("input/input.txt");
        fileStream.print("fileStream-->");

    }

    /**
     * 读取监听端口
     *
     * @param env
     */
    static void readSocket(StreamExecutionEnvironment env) {
        String host = "192.168.43.8";
        int port = 8989;
        DataStreamSource<String> socketStream = env.socketTextStream(host, port);
        socketStream.print("socketStream-->");
    }

    /**
     * 读取集合数据 TODO 本地测试环境 推荐此用法
     *
     * @param env
     */
    static void readCollection(StreamExecutionEnvironment env) {
        List<Integer> nums = new ArrayList<Integer>();
        nums.add(12);
        nums.add(13);
        nums.add(14);
        DataStreamSource<Integer> collectionStream = env.fromCollection(nums);
        collectionStream.print("collection-->");

        List<User> userList = new ArrayList<>();
        userList.add(new User("用户1", "/etc", LocalDate.of(2022, 6, 1), 12,2000L));
        userList.add(new User("用户2", "/opt", LocalDate.of(2022, 6, 2), 20,555L));
        userList.add(new User("用户3", "/flink", LocalDate.of(2022, 6, 3), 30,555L));
        DataStreamSource<User> userListStream = env.fromCollection(userList);
        userListStream.print("userList-->");
    }

    /**
     * 读取元素(对象)数据 TODO 本地测试环境 推荐此用法
     *
     * @param env
     */
    static void readElements(StreamExecutionEnvironment env) {
        DataStreamSource<Object> elementsStream = env.fromElements(
                new User("用户2", "家电", LocalDate.of(2022, 6, 2), 12, 10000L),
                new User("用户1", "零食", LocalDate.of(2022, 1, 1), 11, 10000L),
                new User("用户2", "洗发水", LocalDate.of(2022, 6, 2), 13, 10000L),
                new User("用户3", "椅子", LocalDate.of(2022, 6, 3), 14, 10000L),
                new User("用户3", "手机", LocalDate.of(2022, 6, 3), 15, 10000L),
                new User("用户1", "电脑", LocalDate.of(2022, 6, 2), 13, 10000L),
                new User("用户1", "手机", LocalDate.of(2022, 6, 16), 16, 10000L),
                new User("用户2", "洗发水", LocalDate.of(2022, 6, 2), 13, 60000L)
        );

        elementsStream.print("element-->");

    }

    /**
     * 读取kafka数据流
     *
     * @param env
     */
    static void readKafka(StreamExecutionEnvironment env) {
        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers("192.168.46.8")
                .setTopics("input-topic")
                .setGroupId("my-group")
                .setStartingOffsets(OffsetsInitializer.earliest())
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        DataStreamSource<String> kafkaSource = env.fromSource(source, WatermarkStrategy.noWatermarks(), "Kafka Source");
        kafkaSource.print("kafka->");
    }

    /**
     * 自定义数据源 TODO 写一个自旋模拟一个无界流
     *
     * @param env
     */
    static void userDefinedDataSource(StreamExecutionEnvironment env) {
        DataStreamSource<Integer> definedStream = env.addSource(new UserDefinedParallelSource())
                .setParallelism(4);

        definedStream.print("defined->");


    }

    /**
     * 使用忙循环模拟一个无界流
     * 两个方法 run负责执行；cancel负责停止 记得给一个状态位
     * TODO 单线程SourceFunction 并行度只能是1 不能设置多个并行度 否则报错
     */
    public static class UserDefinedSource implements SourceFunction<Integer> {
        boolean isRunning = true;

        Random random = new Random();

        @Override
        public void run(SourceContext<Integer> ctx) throws Exception {
            while (isRunning) {
                ctx.collect(random.nextInt());
                //慢点执行别给自己整炸了
                Thread.sleep(1000L);
            }
        }

        @Override
        public void cancel() {
            isRunning = false;
        }
    }

    /**
     * 使用忙循环模拟一个无界流
     * 两个方法 run负责执行；cancel负责停止 记得给一个状态位
     * TODO 多线程ParallelSourceFunction 并行度可以设置多个
     */
    static class UserDefinedParallelSource implements ParallelSourceFunction<Integer> {
        volatile boolean isRunning = true;

        Random random = new Random();

        @Override
        public void run(SourceContext<Integer> ctx) throws Exception {
            while (isRunning) {
                ctx.collect(random.nextInt());
                //慢点执行别给自己整炸了
                Thread.sleep(1000L);
            }
        }

        @Override
        public void cancel() {
            isRunning = false;
        }
    }
}
