package com.learn.datasource;

import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.NumberSequenceIterator;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

/**
 * @create: 2023-04-17 22:34
 * @author: Mr.Du
 * --------------
 * @notes: 自定义source，从collection
 **/
public class BatchFromCollection {
    public static void main(String[] args) throws Exception {
        //1.获取执行环境（ExecutionEnvironment）
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //加载/创建初始数据集
        DataStreamSource<String> ds1 = env.fromElements("spark", "flink", "spark", "hadoop");
        ArrayList<Tuple2<String, Long>> tuple2List = new ArrayList<>();
        tuple2List.add(Tuple2.of("hadoop", 1L));
        tuple2List.add(Tuple2.of("spark", 2L));
        tuple2List.add(Tuple2.of("flink", 3L));
        DataStream<List<Tuple2<String, Long>>> ds2 = env.fromElements(tuple2List);

        //2.2）env.fromCollection()
        DataStream<String> ds3 = env.fromCollection(Arrays.asList("spark", "flink", "hadoop"));
        //可以并行读取集合数据（流式开发中进行测试）
        DataStream<Long> ds4 = env.fromParallelCollection(new NumberSequenceIterator(0L, 10L), TypeInformation.of(Long.TYPE)).setParallelism(3);


        //2.3）env.generateSequence()
        DataStream<Long> ds5 = env.generateSequence(1, 10);

        DataStream<Long> ds6 = env.fromSequence(1, 10);

        //TODO 4）指定将计算的结果输出
        ds1.print();
        ds2.print();
        ds3.print();
        ds4.print();
        ds5.print();
        ds6.print();

        //TODO 5）提交任务（可选）
        env.execute();
    }
}
