package com.atguigu.bigdata.spark.streaming;

import com.atguigu.bigdata.spark.util.JDBCUtil;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;

import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Tuple2;
import scala.Tuple3;
import scala.Tuple4;

import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.text.SimpleDateFormat;
import java.util.*;

public class SparkStreaming11_Req1_BlackList_JAVA {
    public static void main(String[] args) throws InterruptedException {
        SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming");
        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(3));
        Map<String, Object> kafkaPara = new HashMap<>();
        kafkaPara.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
        kafkaPara.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        kafkaPara.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        kafkaPara.put(ConsumerConfig.GROUP_ID_CONFIG,"atguigu");
        String topic = "test111";

        JavaInputDStream<ConsumerRecord<String,String>> kafkaDataDS =  KafkaUtils.createDirectStream(jssc, LocationStrategies.PreferConsistent(),
                ConsumerStrategies.Subscribe(Collections.singleton(topic),kafkaPara));

        JavaDStream<AdClickData> adClickData = kafkaDataDS.map(new Function<ConsumerRecord<String, String>, AdClickData>() {
            @Override
            public AdClickData call(ConsumerRecord<String, String> stringStringConsumerRecord) throws Exception {
                String value = stringStringConsumerRecord.value();
                String[] temp = value.split(" ");
                return new AdClickData(temp[0],temp[1],temp[2],temp[3],temp[4]);
            }
        });
        adClickData.print();
        // TODO 通过JDBC周期性获取黑名单数据
         JavaDStream<Tuple4<String, String, String, Integer>> adClickDataDS = adClickData.transform(new Function<JavaRDD<AdClickData>, JavaRDD<Tuple4<String, String, String, Integer>>>() {
             @Override
             public JavaRDD<Tuple4<String, String, String, Integer>> call(JavaRDD<AdClickData> v1) throws Exception {
                 List<String> blackList = new ArrayList<>();
                 Connection conn = JDBCUtil.getConnection();
                 PreparedStatement pstat = conn.prepareStatement("select userid from black_list");
                 ResultSet rs = pstat.executeQuery();
                 while(rs.next()) {
                     blackList.add(rs.getString(1));
                 }
                 rs.close();
                 pstat.close();
                 conn.close();
                 // TODO 判断点击用户是否在黑名单中
                 JavaRDD<AdClickData> filterRDD = v1.filter(new Function<AdClickData, Boolean>() {
                     @Override
                     public Boolean call(AdClickData v1) throws Exception {
                         return !blackList.contains(v1.getUser());
                     }
                 });
                 // TODO 如果用户不在黑名单中，那么进行统计数量（每个采集周期）
                 JavaPairRDD<Tuple3<String, String, String>,Integer> pairRdd = filterRDD.mapToPair(new PairFunction<AdClickData, Tuple3<String, String, String>, Integer>() {
                     @Override
                     public Tuple2<Tuple3<String, String, String>, Integer> call(AdClickData adClickData) throws Exception {
                         SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
                         String day = sdf.format(new java.util.Date(Long.valueOf(adClickData.getTs())));
                         return new Tuple2<Tuple3<String, String, String>, Integer>(new Tuple3<String, String, String>(day, adClickData.getUser(), adClickData.getAd()), 1);
                     }
                 });

                 JavaPairRDD<Tuple3<String, String, String>, java.lang.Integer> resmap = pairRdd.reduceByKey(new Function2<Integer, Integer, Integer>() {
                     @Override
                     public Integer call(Integer v1, Integer v2) throws Exception {
                         return v1 + v2;
                     }
                 });

                 JavaRDD<Tuple4<String, String, String, Integer>> res = resmap.map(new Function<Tuple2<Tuple3<String, String, String>, Integer>, Tuple4<String, String, String, Integer>>() {
                     @Override
                     public Tuple4<String, String, String, Integer> call(Tuple2<Tuple3<String, String, String>, Integer> v1) throws Exception {
                         int count = v1._2;
                         Tuple3<String, String, String> tuple = v1._1;
                         return new Tuple4<String, String, String, Integer>(tuple._1(),tuple._2(),tuple._3(),count);
                     }
                 });
                 return res;
             }
         });
        // rdd. foreach方法会每一条数据创建连接
        // foreach方法是RDD的算子，算子之外的代码是在Driver端执行，算子内的代码是在Executor端执行
        // 这样就会涉及闭包操作，Driver端的数据就需要传递到Executor端，需要将数据进行序列化
        // 数据库的连接对象是不能序列化的。

        // RDD提供了一个算子可以有效提升效率 : foreachPartition
        // 可以一个分区创建一个连接对象，这样可以大幅度减少连接对象的数量，提升效率
        // TODO 如果统计数量超过点击阈值(30)，那么将用户拉入到黑名单
        adClickDataDS.foreachRDD(new VoidFunction<JavaRDD<Tuple4<String, String, String, Integer>>>() {
            @Override
            public void call(JavaRDD<Tuple4<String, String, String, Integer>> tuple4RDD) throws Exception {
                JavaRDD<Tuple4<String, String, String, Integer>> map = tuple4RDD.map(new Function<Tuple4<String, String, String, Integer>, Tuple4<String, String, String, Integer>>() {
                    @Override
                    public Tuple4<String, String, String, Integer> call(Tuple4<String, String, String, Integer> v1) throws Exception {
                        System.out.println(v1.toString());
                        String day = v1._1();
                        String user = v1._2();
                        String ad = v1._3();
                        int count = v1._4();
                        if(count >= 30) {
                            Connection conn = JDBCUtil.getConnection();
                            PreparedStatement pstat = conn.prepareStatement(
                                    " insert into black_list (userid) values (?) on DUPLICATE KEY UPDATE userid = ?".trim());
                            pstat.setString(1, user);
                            pstat.setString(2, user);
                            pstat.executeUpdate();
                            pstat.close();
                            conn.close();
                        } else {
                            // TODO 如果没有超过阈值，那么需要将当天的广告点击数量进行更新。
                            Connection conn = JDBCUtil.getConnection();
                            PreparedStatement pstat = conn.prepareStatement("select * from user_ad_count  where dt = ? and userid = ? and adid = ? ".trim());
                            pstat.setString(1, day);
                            pstat.setString(2, user);
                            pstat.setString(3, ad);
                            // 查询统计表数据
                            ResultSet rs = pstat.executeQuery();
                            // 如果存在数据，那么更新
                            if(rs.next()) {
                                PreparedStatement pstat1 = conn.prepareStatement(" update user_ad_count set count = count + ? where dt = ? and userid = ? and adid = ?".trim());
                                pstat1.setInt(1, count);
                                pstat1.setString(2, day);
                                pstat1.setString(3, user);
                                pstat1.setString(4, ad);
                                pstat1.executeUpdate();
                                pstat1.close();
                                // TODO 判断更新后的点击数据是否超过阈值，如果超过，那么将用户拉入到黑名单。
                                PreparedStatement pstat2 = conn.prepareStatement("select * from user_ad_count where dt = ? and userid = ? and adid = ? and count >= 30 ".trim());
                                pstat2.setString(1, day);
                                pstat2.setString(2, user);
                                pstat2.setString(3, ad);
                                ResultSet rs2 = pstat2.executeQuery();
                                if(rs2.next()) {
                                    PreparedStatement pstat3 =conn.prepareStatement(" insert into black_list (userid) values (?) on DUPLICATE KEY UPDATE userid = ? ".trim());
                                    pstat3.setString(1, user);
                                    pstat3.setString(2, user);
                                    pstat3.executeUpdate();
                                    pstat3.close();
                                }
                                rs2.close();
                                pstat2.close();
                            } else {
                                // 如果不存在数据，那么新增
                                PreparedStatement pstat1 = conn.prepareStatement(" insert into user_ad_count ( dt, userid, adid, count ) values ( ?, ?, ?, ? ) ".trim());
                                pstat1.setString(1, day);
                                pstat1.setString(2, user);
                                pstat1.setString(3, ad);
                                pstat1.setInt(4, count);
                                pstat1.executeUpdate();
                                pstat1.close();
                            }
                            rs.close();
                            pstat.close();
                            conn.close();
                        }
                        return v1;
                    }
                });
            }
        });
        // 1. 启动采集器
        jssc.start();
        // 2. 等待采集器的关闭
        jssc.awaitTermination();
    }
}
