package com.alison.datastream.workcount;

import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @Author alison
 * @Date 2024/4/8 13:38
 * @Version 1.0
 * @Description
 */
public class S1_StreamWC {

    /*

input:
he world
he flink
he java
he go

output:
(he,1)
(he,2)
(he,3)
(he,4)

     */
    public static void main(String[] args) throws Exception {
        // 创建流处理执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 设置并行度，默认值 = 当前计算机的CPU逻辑核数,设置成1 即单线程处理
        env.setParallelism(1);
        // 从文件中读取数据
        String inputPath = "D:/workspace/lab/learnbigdata/learnflink/flink-datastream/src/main/resources/dataset/words.txt";
        DataStreamSource<String> inputDataStream = env.readTextFile(inputPath);
        DataStream<Tuple2<String, Long>> flatMapDataStream = inputDataStream
                .flatMap((String line, Collector<Tuple2<String, Long>> out) -> out.collect(Tuple2.of(line.split("\\s+")[0], 1L)))
                .returns(Types.TUPLE(Types.STRING, Types.LONG));
        DataStream<Tuple2<String, Long>> resultStream = flatMapDataStream.keyBy(0).sum(1);
        resultStream.print();

        // 执行任务
        env.execute();
    }
}
