package com.example;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;

/**
 * WordCount Example
 * 
 * 这是一个基础的Flink流处理程序示例，演示如何：
 * 1. 读取文本文件
 * 2. 分割单词
 * 3. 统计单词出现次数
 * 4. 输出结果
 */
public class WordCount {

    public static void main(String[] args) throws Exception {
        // 获取流执行环境
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 输入文件路径
        String inputPath = "data/input.txt";
        String outputPath = "data/output";

        // 如果有命令行参数，则使用命令行参数
        if (args.length > 0) {
            inputPath = args[0];
        }
        if (args.length > 1) {
            outputPath = args[1];
        }

        // 读取输入文件
        @SuppressWarnings("deprecation")
        DataStream<String> text = env.readTextFile(inputPath);

        // 处理数据：分割单词、统计
        DataStream<String> wordCounts = text
                // FlatMap: 将每行文本分割成单词
                .flatMap(new Tokenizer())
                // 按单词分组并统计
                .keyBy(tuple -> tuple.f0)
                .sum(1)
                // 将结果转换为字符串格式
                .map(new MapFunction<Tuple2<String, Integer>, String>() {
                    @Override
                    public String map(Tuple2<String, Integer> value) throws Exception {
                        return value.f0 + " " + value.f1;
                    }
                });

        // 输出结果到文件
        sinkOutput(wordCounts, outputPath);

        // 执行任务
        env.execute("WordCount Example");
    }

    @SuppressWarnings("deprecation")
    private static void sinkOutput(DataStream<String> data, String outputPath) {
        data.writeAsText(outputPath);
    }

    /**
     * 实现 FlatMapFunction，用于分割单词
     * 输入：一行文本
     * 输出：(单词, 1) 的 Tuple2 对象
     */
    public static class Tokenizer implements FlatMapFunction<String, Tuple2<String, Integer>> {
        @Override
        public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
            // 将单词转换为小写并分割
            String[] tokens = value.toLowerCase().split("\\W+");
            
            for (String token : tokens) {
                // 只处理非空单词
                if (token.length() > 0) {
                    out.collect(new Tuple2<>(token, 1));
                }
            }
        }
    }
}
