package com.atguigu.flink.chapter01_wordcount;

import com.atguigu.flink.pojo.WordCount;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * Created by Smexy on 2022/10/19
 *
 *      报错:
 *
 *          Caused by: org.apache.flink.api.common.functions.InvalidTypesException:  方法的返回值类型有关，认为类型非法
 *                  The generic type parameters of 'Collector' are missing.
 *                  In many cases lambda methods don't provide enough information for automatic type extraction
 *                  when Java generics are involved.
 *                  An easy workaround is to use an (anonymous) class instead that implements the
 *                  'org.apache.flink.api.common.functions.FlatMapFunction' interface.
 *                  Otherwise the type has to be specified explicitly using type information.
 *
 *      结论：  如果是匿名内部类实现某个接口，无需担心泛型丢失。
 *              如果是lamda表达式，某些情况下，泛型会丢失的。
 *                 Collector<WordCount>:  Collector类型可以知道，但是里面的泛型 WordCount在编译时会自动擦除。
 *                      运行时，不知道类型，报错。
 *
 *              解决方案： 明确地告诉其中的泛型的类型。
 *                          在转换之后，用 returns(...)  明确声明其中的泛型。
 */
public class Demo5_LamdaPOJO
{
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //启动一个线程去算。不设置，默认用当前环境中所有的cpu作为线程数进行计算
        //env.setParallelism(1);

        //使用环境读取数据源，获取一个流
        DataStreamSource<String> source = env.socketTextStream("hadoop103", 8888);

        //对source进行转换，单词统计
        SingleOutputStreamOperator<WordCount> ds1 = source
            .flatMap((FlatMapFunction<String, WordCount>) (value, out) -> {
                String[] words = value.split(" ");
                for (String word : words) {
                    out.collect(new WordCount(word, 1));
                }
            })
            .returns(WordCount.class);

        ds1
            //类似之前的groupBy, keyBy代表按照key进行分组
            // KeySelector必须实现的抽象方法getKey()的核心处理逻辑是 WordCount的getWord()
            .keyBy(WordCount::getWord)
            .sum("count")
            .print();



        //启动执行环境，计算才会开始，永不结束
        env.execute();

    }
}
