package com.alison.dataset;

import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.AggregateOperator;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.operators.FlatMapOperator;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;


/**
 * @Author alison
 * @Date 2024/4/8 13:14
 * @Version 1.0
 * @Description
 */
public class E1_BatchWC {

    /*
input:

he world
he flink
he java
he go

output:

(go,1)
(flink,1)
(world,1)
(java,1)
(he,4)
     */
    public static void main(String[] args) throws Exception {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        String inputPath = "D:/workspace/lab/learnbigdata/learnflink/flink-datastream/src/main/resources/dataset/words.txt";
        DataSource<String> dataSource = env.readTextFile(inputPath);
        FlatMapOperator<String, Tuple2<String, Long>> flatMap = dataSource.flatMap((String line, Collector<Tuple2<String, Long>> out) -> {
            String[] split = line.split("\\s+");
            for (String string : split) {
                out.collect(Tuple2.of(string, 1L));
            }
        }).returns(Types.TUPLE(Types.STRING, Types.LONG));
        DataSet<Tuple2<String, Long>> dataSet = flatMap
                // 按照第一个位置的word分组
                // 按照第二个位置上的数据求和
                .groupBy(0).sum(1);
        dataSet.print();
    }
}
