package com.atguigu.flink.chapter02;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.net.Socket;

/*
flink词频统计的步骤：
    1、获取一个流的执行环境
    2、通过执行环境，从source读取数据，得到一个流
    3、对流做各种操作
    4、输出结果
    5、启动流的执行环境
 */
public class WcUnBounded {
    //通过集群启动一个socket 客户端来测试
    public static void main(String[] args) throws Exception {
        //1、获取一个流的执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        //2、通过执行环境，从source 读取数据，得到一个流
        DataStreamSource<String> fileStream = env.socketTextStream("hadoop102", 8888);

        //3、对流做各种操作
        //3.1 对每行数据做切割
        SingleOutputStreamOperator<String> wordStream = fileStream.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public void flatMap(String line, Collector<String> out) throws Exception {
                String[] words = line.split(" ");

                for (String word : words) {
                    out.collect(word);
                }
            }
        });

        //3.2 给每个单词配置 1
        SingleOutputStreamOperator<Tuple2<String, Integer>> wordOneStream = wordStream.map(new MapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Integer> map(String word) throws Exception {
                return Tuple2.of(word, 1);
            }
        });

        //3.3 按照单词分组
        KeyedStream<Tuple2<String, Integer>, String> keyedStream = wordOneStream.keyBy(new KeySelector<Tuple2<String, Integer>, String>() {
            @Override
            public String getKey(Tuple2<String, Integer> t) throws Exception {
                return t.f0;
            }
        });

        //3.4 按照分组聚合: 前面是元组，1 表示元组位置为1 的元素进行聚合
        SingleOutputStreamOperator<Tuple2<String, Integer>> result = keyedStream.sum(1);

        //4、输出结果
        result.print();

        //5、启动流的执行环境
        env.execute();
    }
}


//public class WcUnBounded {
//    public static void main(String[] args) throws Exception {
//        //1、获取一个流的执行环境
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//        env.setParallelism(1);
//        //2、通过流的执行环境：从source中读取数据，得到一个流
//        DataStreamSource<String> source = env.socketTextStream("hadoop102", 8888);
//
//        //3、对流做各种处理
//        //3.1 对每行数据进行切割
//        SingleOutputStreamOperator<String> oneStream = source.flatMap(new FlatMapFunction<String, String>() {
//            @Override
//            public void flatMap(String line,
//                                Collector<String> out) throws Exception {
//                String[] words = line.split(" ");
//                for (String word : words) {
//                    out.collect(word);
//                }
//            }
//        });
//
//        //3.2 给每个单词配置1
//        SingleOutputStreamOperator<Tuple2<String, Long>> wordStream = oneStream.map(new MapFunction<String, Tuple2<String, Long>>() {
//            @Override
//            public Tuple2<String, Long> map(String word) throws Exception {
//                return new Tuple2<>(word, 1L);
//            }
//        });
//
//        //3.3 按照单词分组
//        KeyedStream<Tuple2<String, Long>, String> keyedStream = wordStream.keyBy(new KeySelector<Tuple2<String, Long>, String>() {
//            @Override
//            public String getKey(Tuple2<String, Long> t) throws Exception {
//                return t.f0;
//            }
//        });
//
//        //3.4 分组聚合：前面是元组，1表示元组位置为1 的元素进行聚合
//        SingleOutputStreamOperator<Tuple2<String, Long>> result = keyedStream.sum(1);
//
//        //4、输出结果
//        result.print();
//
//        //5、启动流的执行环境
//        env.execute();
//    }
//}


//public class WcUnBounded {
//    public static void main(String[] args) throws Exception {
//        //1、获取一个流的执行环境
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//        env.setParallelism(1);
//        //2、通过流的执行环境：从source中读取数据，得到一个流
//        DataStreamSource<String> streamSource = env.socketTextStream("hadoop102", 8888);
//
//        //3、对流做各种操作
//        //3.1 对每行数据进行切割
//        SingleOutputStreamOperator<String> oneStream = streamSource.flatMap(new FlatMapFunction<String, String>() {
//            @Override
//            public void flatMap(String line,
//                                Collector<String> out) throws Exception {
//                String[] words = line.split(" ");
//                for (String word : words) {
//                    out.collect(word);
//                }
//            }
//        });
//
//        //3.2 给每个单词配置1
//        SingleOutputStreamOperator<Tuple2<String, Long>> wordStream = oneStream.map(new MapFunction<String, Tuple2<String, Long>>() {
//            @Override
//            public Tuple2<String, Long> map(String word) throws Exception {
//                return new Tuple2<>(word, 1L);
//            }
//        });
//
//        //3.3 按照单词分组
//        KeyedStream<Tuple2<String, Long>, String> keyedStream = wordStream.keyBy(new KeySelector<Tuple2<String, Long>, String>() {
//            @Override
//            public String getKey(Tuple2<String, Long> t) throws Exception {
//                return t.f0;
//            }
//        });
//
//        //3.4 分组聚合：前面是元组，1代表元组位置为1 的元素进行聚合
//        SingleOutputStreamOperator<Tuple2<String, Long>> result = keyedStream.sum(1);
//
//        //4、输出结果
//        result.print();
//
//        //5、启动流的执行环境
//        env.execute();
//    }
//}