package com.shujia.flink.source;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.file.src.FileSource;
import org.apache.flink.connector.file.src.reader.TextLineInputFormat;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.time.Duration;

public class Demo2FileSource {
    public static void main(String[] args) throws Exception {

        /*
         * 流批统一
         * 1、同一套算子代码既能做流处理也能做批处理
         * 2、同一个file数据源，既能有界读取也能无界读取
         */

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //修改处理模式
        env.setRuntimeMode(RuntimeExecutionMode.STREAMING);

        /*
         * 1、老版本  -- 有界流
         */
        DataStream<String> linesDS = env.readTextFile("flink/data/students.csv");

        //linesDS.print();

        /*
         *2、型版本  --可有界读取也可以无界读取
         */

        //构建fileSource
        FileSource<String> fileSource = FileSource
                .forRecordStreamFormat(
                        //指定读取文件的格式和编码
                        new TextLineInputFormat("UTF-8"),
                        //指定读取数据的路径
                        new Path("flink/data/stu")
                )
                //每隔一段时间读取目录下新的文件，构建无界流
                .monitorContinuously(Duration.ofSeconds(5))
                .build();

        //使用fileSource
        DataStream<String> fileDS = env.fromSource(fileSource, WatermarkStrategy.noWatermarks(), "fileSource");

        //3、统计单词的数量
        DataStream<Tuple2<String, Integer>> kvDS = fileDS
                .map(stu -> Tuple2.of(stu.split(",")[4], 1), Types.TUPLE(Types.STRING, Types.INT));

        //分组统计单词的数量
        KeyedStream<Tuple2<String, Integer>, String> keyByDS = kvDS.keyBy(kv -> kv.f0);

        //对下标为1的列求和
        DataStream<Tuple2<String, Integer>> countDS = keyByDS.sum(1);

        countDS.print();

        env.execute();
    }
}
