package com.explame.d02_Source_数据源;

import com.explame.POJO.Work;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.junit.jupiter.api.Test;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

public class source {

    //1.从java集合中读取数据
    @Test
    public void source01() throws Exception {
        //1.创建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1); //设置最大并行数

        //2.准备数据
            //Arrays.asList():将数组转换为集合
        List<Work> list= Arrays.asList(
                new Work(1,"q",45.0),
                new Work(2,"w",46.0),
                new Work(3,"e",47.0)
        );

        //3.读取数据源
            //fromCollection():从本地集合中读取数据
        env.fromCollection(list).print();

        //4.执行任务
        env.execute();
    }

    //2.从文件中读取数据源
    @Test
    public void source02() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        env
                .readTextFile("input")
                .print();

        env.execute();
    }

    //3.从hdfs中读取数据源
    @Test
    public void source03() throws Exception {

    }
}
