package org.apache.spark.examples.sql.streaming;

import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.sql.*;

import scala.Tuple2;

import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;

/**
 * Created by admin on 2019/3/23.
 * 由于此窗口类似于分组，因此在代码中，您可以使用groupBy()和window()操作来表示窗口化聚合
 */
public class JavaStructuredNetworkWordCountWindowed {
    public static void main(String[]  args){
        System.setProperty("hadoop.home.dir", "C:/hadoop-2.6.0");
        SparkSession spark = SparkSession
                .builder()
                .appName("JavaStructuredNetworkWordCountWindowed")
                .getOrCreate();
        Dataset<Row> lines =spark
                .readStream()
                .format("socket")
                .option("host", "localhost")
                .option("port", "9999")
                .option("includeTimestamp", true)
                .load();

        // 将行拆分为单词，保留时间戳
        Dataset<Row> words = lines.as(Encoders.tuple(Encoders.STRING(), Encoders.TIMESTAMP()))//转换为字符串
                .flatMap((FlatMapFunction<Tuple2<String, Timestamp>, Tuple2<String, Timestamp>>)
                                t -> {
                                    List<Tuple2<String, Timestamp>> result = new ArrayList<>();
                                    for (String word : t._1.split(" ")) {
                                        result.add(new Tuple2<>(word, t._2));
                                    }
                                    return result.iterator();
                                },
                        Encoders.tuple(Encoders.STRING(), Encoders.TIMESTAMP())
                ).toDF("word", "timestamp");


        Dataset<Row> count = words.groupBy(
                // 根据 timestamp -〉计算10分钟内的单词，每5分钟更新一次
                functions.window(words.col("timestamp"), "10 minutes", "5 minutes")
                , words.col("word")
        ).count();


        //使用withWatermark()如下所示的前一个示例轻松定义水印。
        words.withWatermark("timestamp","10 minutes")
                .groupBy(
                        functions.window(words.col("timestamp"), "10 minutes", "5 minutes"),
                        words.col("word")
                ).count();

        //流式重复数据删除

        lines.dropDuplicates("guid");

        lines.withWatermark("eventTime", "10 seconds")
                .dropDuplicates("guid", "eventTime");
    }
}
