package sparkdemo;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Tuple2;

import java.util.*;

/**
 * ./spark-submit --class  sparkdemo.RealTimeOrderStatsApp  --master spark://192.40.10.130:7077   /root/myspark/jars/spark-product-1.0-SNAPSHOT.jar
 */
public class RealTimeOrderStatsApp {
    private static final String BOOTSTRAP_SERVERS_CONFIG = "192.40.10.187:6667";

    public static void main(String[] args) {
        runRealTimeOrderStats();
    }

    public static void runRealTimeOrderStats() {
        // 设置Spark Streaming配置
        SparkConf sparkConf = new SparkConf()
                .setAppName("RealTimeOrderStats")
                .setMaster("local[*]");

        JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(2));
        SparkSession spark = SparkSession.builder().config(sparkConf).getOrCreate();

        // Kafka消费者配置
        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_CONFIG);
        kafkaParams.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        kafkaParams.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        kafkaParams.put(ConsumerConfig.GROUP_ID_CONFIG, "niit");
        kafkaParams.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        kafkaParams.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

        // 订阅的Kafka主题
        Collection<String> topics = Collections.singletonList("orders");

        JavaDStream<String> stream = KafkaUtils.createDirectStream(
                jssc,
                LocationStrategies.PreferConsistent(),
                ConsumerStrategies.<String, String>Subscribe(topics, kafkaParams)
        ).map(record -> record.value());

        // 处理接收到的消息
        JavaDStream<Order> orderDStream = stream.map(new Function<String, Order>() {
            @Override
            public Order call(String value) throws Exception {
                String[] fields = value.split("\t");
                if (fields.length == 5) {
                    //    String data = orderType + "\t" + orderName + "\t" + orderQuantity + "\t" + orderDate + "\t" + isValid;
                    return new Order(
                            fields[0],
                            fields[1],
                            Integer.parseInt(fields[2]),
                            fields[3],
                            fields[4]
                    );
                }
                return null;
            }
        }).filter(new Function<Order, Boolean>() {
            @Override
            public Boolean call(Order order) throws Exception {
                return order != null;
            }
        });

        // 统计所有Valid和Invalid订单号的总和
        JavaPairDStream<String, Integer> globalValidInvalidCounts = orderDStream.mapToPair(
                new PairFunction<Order, String, Integer>() {
                    @Override
                    public Tuple2<String, Integer> call(Order order) throws Exception {
                        return new Tuple2<>(order.getIsValid(), 1);
                    }
                }
        ).reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1 + v2;
            }
        });
        globalValidInvalidCounts.print();

        // 统计各个订单号各自的Valid和Invalid数量
        JavaPairDStream<Tuple2<String, String>, Integer> productValidInvalidCounts = orderDStream.mapToPair(
                new PairFunction<Order, Tuple2<String, String>, Integer>() {
                    @Override
                    public Tuple2<Tuple2<String, String>, Integer> call(Order order) throws Exception {
                        return new Tuple2<>(new Tuple2<>(order.getProductName(), order.getIsValid()), 1);
                    }
                }
        ).reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1 + v2;
            }
        });
        productValidInvalidCounts.print();

        // 统计所有订单类别的数量
        JavaPairDStream<String, Integer> categoryCounts = orderDStream.mapToPair(
                new PairFunction<Order, String, Integer>() {
                    @Override
                    public Tuple2<String, Integer> call(Order order) throws Exception {
                        return new Tuple2<>(order.getCategory(), 1);
                    }
                }
        ).reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1 + v2;
            }
        });
        categoryCounts.print();

        // 统计各个订单类别的Valid数和Invalid数的数量
        JavaPairDStream<Tuple2<String, String>, Integer> categoryValidInvalidCounts = orderDStream.mapToPair(
                new PairFunction<Order, Tuple2<String, String>, Integer>() {
                    @Override
                    public Tuple2<Tuple2<String, String>, Integer> call(Order order) throws Exception {
                        return new Tuple2<>(new Tuple2<>(order.getCategory(), order.getIsValid()), 1);
                    }
                }
        ).reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1 + v2;
            }
        });
        categoryValidInvalidCounts.print();

        // 统计各个订单的各个类别的Valid和Invalid数量
        JavaPairDStream<Tuple2<Tuple2<String, String>, String>, Integer> categoryProductValidInvalidCounts =
                orderDStream.mapToPair(
                        new PairFunction<Order, Tuple2<Tuple2<String, String>, String>, Integer>() {
                            @Override
                            public Tuple2<Tuple2<Tuple2<String, String>, String>, Integer> call(Order order) throws Exception {
                                Tuple2<String, String> categoryProduct = new Tuple2<>(order.getCategory(), order.getProductName());
                                return new Tuple2<>(new Tuple2<>(categoryProduct, order.getIsValid()), 1);
                            }
                        }
                ).reduceByKey(new Function2<Integer, Integer, Integer>() {
                    @Override
                    public Integer call(Integer v1, Integer v2) throws Exception {
                        return v1 + v2;
                    }
                });
        categoryProductValidInvalidCounts.print();

        // 将结果推送到新的Kafka topic - 统计所有Valid和Invalid订单号的总和
        globalValidInvalidCounts.foreachRDD(rdd -> {
            rdd.foreachPartition(partition -> {
                KafkaProducer<String, String> producer = createKafkaProducer();
                try {
                    while (partition.hasNext()) {
                        Tuple2<String, Integer> tuple = partition.next();
                        String message = tuple._1() + "\t" + tuple._2();
                        producer.send(new ProducerRecord<>("order_valid_invalid_count", null, message));
                    }
                } finally {
                    producer.close();
                }
            });
        });

        //  将结果推送到新的Kafka topic - 统计各个订单号各自的Valid和Invalid数量
        productValidInvalidCounts.foreachRDD(rdd -> {
            rdd.foreachPartition(partition -> {
                KafkaProducer<String, String> producer = createKafkaProducer();
                try {
                    while (partition.hasNext()) {
                        Tuple2<Tuple2<String, String>, Integer> tuple = partition.next();
                        String message = tuple._1()._1() + "\t" + tuple._1()._2() + "\t" + tuple._2();
                        producer.send(new ProducerRecord<>("product_order_stats", null, message));
                    }
                } finally {
                    producer.close();
                }
            });
        });

        //  将结果推送到新的Kafka topic - 统计所有订单类别的数量
        categoryCounts.foreachRDD(rdd -> {
            rdd.foreachPartition(partition -> {
                KafkaProducer<String, String> producer = createKafkaProducer();
                try {
                    while (partition.hasNext()) {
                        Tuple2<String, Integer> tuple = partition.next();
                        String message = tuple._1() + "\t" + tuple._2();
                        producer.send(new ProducerRecord<>("category_order_counts", null, message));
                    }
                } finally {
                    producer.close();
                }
            });
        });

        //  将结果推送到新的Kafka topic - 统计各个订单类别的Valid数和Invalid数的数量
        categoryValidInvalidCounts.foreachRDD(rdd -> {
            rdd.foreachPartition(partition -> {
                KafkaProducer<String, String> producer = createKafkaProducer();
                try {
                    while (partition.hasNext()) {
                        Tuple2<Tuple2<String, String>, Integer> tuple = partition.next();
                        String message = tuple._1()._1() + "\t" + tuple._1()._2() + "\t" + tuple._2();
                        producer.send(new ProducerRecord<>("category_valid_invalid_counts", null, message));
                    }
                } finally {
                    producer.close();
                }
            });
        });

        //  将结果推送到新的Kafka topic - 统计各个订单的各个类别的Valid和Invalid数量
        categoryProductValidInvalidCounts.foreachRDD(rdd -> {
            rdd.foreachPartition(partition -> {
                KafkaProducer<String, String> producer = createKafkaProducer();
                try {
                    while (partition.hasNext()) {
                        Tuple2<Tuple2<Tuple2<String, String>, String>, Integer> tuple = partition.next();
                        String message = tuple._1()._1()._1() + "\t" + tuple._1()._1()._2() + "\t" +
                                tuple._1()._2() + "\t" + tuple._2();
                        producer.send(new ProducerRecord<>("category_product_order_stats", null, message));
                    }
                } finally {
                    producer.close();
                }
            });
        });

        // 开始接收数据并处理
        jssc.start();
        try {
            jssc.awaitTermination();
        } catch (Exception e) {
            e.printStackTrace();
            System.out.println("An error occurred. Shutting down the streaming context.");
            jssc.stop();
        }
    }

    // 创建Kafka生产者的方法
    private static KafkaProducer<String, String> createKafkaProducer() {
        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_CONFIG);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        return new KafkaProducer<>(props);
    }
}