package org.example.com.atguigu.day06;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.function.*;
import org.apache.spark.streaming.Seconds;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import scala.Tuple2;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.util.ArrayList;
import java.util.Iterator;

public class Save {
    public static void main(String[] args) throws InterruptedException {
        // second 5 : 五秒一个批次
        JavaStreamingContext ssc = new JavaStreamingContext(new SparkConf().setMaster("local[*]").setAppName("test"), Seconds.apply(5));
        ssc.sparkContext().setLogLevel("error");  // 只展示报错信息,不看别的日志信息,方便查看运行结果
        JavaReceiverInputDStream<String> ds = ssc.socketTextStream("hadoop102", 9999);

        JavaPairDStream<String, Integer> ds2 = ds.flatMapToPair(new PairFlatMapFunction<String, String, Integer>() {
            @Override
            public Iterator<Tuple2<String, Integer>> call(String s) throws Exception {
                String[] arr = s.split(" ");
                ArrayList<Tuple2<String, Integer>> res = new ArrayList<>();
                for (String wc : arr) {
                    res.add(new Tuple2<>(wc, 1));
                }
                return res.iterator();
            }
        });


        // TODO 窗口长度和滑动长度必须是批次时间的整数倍
        // 上面写法,有下面这个更简洁的写法  reduceByKeyAndWindow = window + reduceByKey
        JavaPairDStream<String, Integer> ds3 = ds2.reduceByKeyAndWindow(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1 + v2;
            }
        }, Seconds.apply(15), Seconds.apply(5));

        // 一个窗口的结果打印
        // TODO DStream保存数据到mysql/HBASE/redis/es等位置,都是使用foreachRDD, 然后调用rdd的foreachPartition算子,在算子中call方法中自己写代码保存
        ds3.foreachRDD(new VoidFunction<JavaPairRDD<String, Integer>>() {
            @Override
            public void call(JavaPairRDD<String, Integer> rdd1) throws Exception {
                rdd1.foreachPartition(new VoidFunction<Iterator<Tuple2<String, Integer>>>() {
                    @Override
                    public void call(Iterator<Tuple2<String, Integer>> tuple2Iterator) throws Exception {
                        // jdbc代码保存到mysql
                        // ...
                    }
                });
            }
        });

        // 启动
        ssc.start();

        // 阻塞
        ssc.awaitTermination();
    }
}
