package com.atguigu.flink.wordcount;

import com.atguigu.flink.pojo.WordCount;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * Created by Smexy on 2022/11/18
 *
            在Tuple不适合的场景，需要把数据封装为POJO
                    POJO(Plain Old Java Object): 传统的java对象
                            和Bean大部分情况概念是相似的，如果是POJO要求可以被flink的序列化框架进行  S 或  DS

 */
public class Demo5_POJOWordCount
{
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //设置并行度为1，全局只有一个Task计算
        env.setParallelism(1);
        //读取数据
        DataStreamSource<String> ds = env.socketTextStream("hadoop103", 8888);

        //转换
        ds.flatMap(new FlatMapFunction<String, WordCount>()
        {
            @Override
            public void flatMap(String value, Collector<WordCount> out) throws Exception {
                String[] words = value.split(" ");
                for (String word : words) {
                    out.collect(new WordCount(word,1));
                }
            }
        })
          .keyBy(new KeySelector<WordCount, String>()
          {
              //返回的数据作为key，会被分组
              @Override
              public String getKey(WordCount value) throws Exception {
                  return value.getWord();
              }
          })
          // 如果是对POJO的某个属性进行sum，要写属性名。如果是对Tuple的某个位置的字段进行sum，要写位置
          .sum("count")
          .print();


        //启动执行环境
        env.execute();

    }


}
