package app;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.HasOffsetRanges;
import org.apache.spark.streaming.kafka010.OffsetRange;
import redis.clients.jedis.Jedis;
import util.MyKafkaUtils;
import util.MyOffsetsUtils;
import util.MyRedisUtils;

import java.util.*;

/**
 * 业务数据消费分流
 *
 * 1. 准备实时环境
 *
 * 2. 从redis中读取偏移量
 *
 * 3. 从kafka中消费数据
 *
 * 4. 提取偏移量结束点
 *
 * 5. 数据处理
 *     5.1 转换数据结构
 *     5.2 分流
 *         事实数据 => Kafka
 *         维度数据 => Redis
 * 6. flush Kafka的缓冲区
 *
 * 7. 提交offset
 *
 *
 */
public class OdsBaseDbApp {
    final static String redisFactKeys= "FACT:TABLES";
    final static String redisDimKeys = "DIM:TABLES";
    public static void main(String[] args) throws InterruptedException {
        SparkConf conf = new SparkConf().setMaster("local[4]").setAppName("ods_base_db_app");
        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));
        final String topicName = "ODS_BASE_DB_1018";  //对应生成器配置中的主题名
        final String groupId = "ODS_BASE_LOG_GROUP_1018";
        //2. 从redis中读取偏移量
        Map<TopicPartition, Long> offsets = MyOffsetsUtils.readOffset(topicName, groupId);
        //3. 从Kafka中消费数据
        JavaInputDStream<ConsumerRecord<Object, Object>> kafkaDStream = null;
        if(offsets != null && !offsets.isEmpty()) {
            kafkaDStream = MyKafkaUtils.getKafkaDStream(jssc,topicName,groupId, offsets);
        } else {
            kafkaDStream = MyKafkaUtils.getKafkaDStream(jssc,topicName,groupId);
        }
        //4. 提取偏移量结束点
        // TODO 补充: 从当前消费到的数据中提取offsets , 不对流中的数据做任何处理.
        final List<OffsetRange> list = new ArrayList<OffsetRange>();
        JavaDStream<ConsumerRecord<Object, Object>> offsetRangesDStream = kafkaDStream.transform(new Function<JavaRDD<ConsumerRecord<Object, Object>>, JavaRDD<ConsumerRecord<Object, Object>>>() {
            public JavaRDD<ConsumerRecord<Object, Object>> call(JavaRDD<ConsumerRecord<Object, Object>> rdd) throws Exception {
                OffsetRange[] offsetRanges = ((HasOffsetRanges) rdd.rdd()).offsetRanges();
                for(int i = 0; i < offsetRanges.length; i++) {
                    list.add(offsetRanges[i]);
                }
                return rdd;
            }
        });
        //5. 处理数据
        // 5.1 转换数据结构
        JavaDStream<JSONObject> jsonObjDStream = offsetRangesDStream.map(new Function<ConsumerRecord<Object, Object>, JSONObject>() {
            public JSONObject call(ConsumerRecord<Object, Object> v1) throws Exception {
                String data = String.valueOf(v1.value());
                JSONObject jsonObject = JSON.parseObject(data);
                return jsonObject;
            }
        });



        jsonObjDStream.foreachRDD(rdd -> {
            //如何动态配置表清单???
            // 将表清单维护到redis中，实时任务中动态的到redis中获取表清单.
            // 类型: set
            // key:  FACT:TABLES   DIM:TABLES
            // value : 表名的集合
            // 写入API: sadd
            // 读取API: smembers
            // 过期: 不过期
            /// driver端执行
            Jedis jedis = MyRedisUtils.getJedisFromPool();
            //事实表清单
            Set<String> factTables = jedis.smembers(redisFactKeys);
            //做成广播变量
            Broadcast<Set<String> > factTablesBC = jssc.sparkContext().broadcast(factTables);
            //维度表清单
            Set<String> dimTables = jedis.smembers(redisDimKeys);
            //做成广播变量
            Broadcast<Set<String> > dimTablesBC = jssc.sparkContext().broadcast(dimTables);
            jedis.close();
            rdd.foreachPartition((VoidFunction<Iterator<JSONObject>>) jsonObjectIterator -> {
                Jedis jedis1 = MyRedisUtils.getJedisFromPool();
                while(jsonObjectIterator.hasNext()) {
                    JSONObject jsonObj = jsonObjectIterator.next();
                    String operType = jsonObj.getString("type");
                    String opValue = null;
                    if(operType.contains("bootstrap-insert")) {
                        opValue = "I";
                    }
                    if(operType.contains("insert")) {
                        opValue = "I";
                    }
                    if(operType.contains("insert")) {
                        opValue = "U";
                    }
                    if(operType.contains("delete")) {
                        opValue = "D";
                    }
                    //判断操作类型: 1. 明确什么操作  2. 过滤不感兴趣的数据
                    if(opValue != null) {
                        // 提取表名
                        String tableName = jsonObj.getString("table");
                        if(factTablesBC.value().contains(tableName)) {
                            //事实数据
                            // 提取数据
                            String data = jsonObj.getString("data");
                            // DWD_ORDER_INFO_I  DWD_ORDER_INFO_U  DWD_ORDER_INFO_D
                            String dwdTopicName = "DWD_" + tableName.toUpperCase() + "_" + opValue + "_1018";
                            MyKafkaUtils.send(dwdTopicName, data);
                        }


                        if(dimTablesBC.value().contains(tableName)) {
                            //维度数据
                            // 类型 : string  hash
                            //        hash ： 整个表存成一个hash。 要考虑目前数据量大小和将来数据量增长问题 及 高频访问问题.
                            //        hash :  一条数据存成一个hash.
                            //        String : 一条数据存成一个jsonString.
                            // key :  DIM:表名:ID
                            // value : 整条数据的jsonString
                            // 写入API: set
                            // 读取API: get
                            // 过期:  不过期
                            //提取数据中的id
                            JSONObject dataObj = jsonObj.getJSONObject("data");
                            String id = dataObj.getString("id");
                            String redisKey ="DIM:" + tableName.toUpperCase() + ":" + id;
                            jedis1.set(redisKey, dataObj.toJSONString());
                        }
                    }
                }
                jedis1.close();
                MyKafkaUtils.flush();
            });
            MyOffsetsUtils.saveOffset(topicName, groupId, list);
            list.clear();
        });
        jssc.start();
        jssc.awaitTermination();
    }
}
