package com.bw.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.bw.been.TableProcess;
import com.bw.func.TableProcessFunc;
import com.bw.util.MyDeserializationSchemaFunction;
import com.bw.util.MyKafkaUtil;
import com.bw.util.SinkHBaseUtil;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;

public class BaseDB {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        //读取kafka数据创建流
        String groupId = "BaseDBDwd";
        String topic = "ods_base_db";

        FlinkKafkaConsumer<String> kafkaConsumer = MyKafkaUtil.getKafkaSource(groupId, topic);
        DataStreamSource<String> source = env.addSource(kafkaConsumer);
        //过滤脏数据
        SingleOutputStreamOperator<JSONObject> process = source.process(new ProcessFunction<String, JSONObject>() {
            @Override
            public void processElement(String s,
                                       ProcessFunction<String, JSONObject>.Context context,
                                       Collector<JSONObject> collector) throws Exception {
                try {
                    JSONObject jsonObject = JSON.parseObject(s);
                    collector.collect(jsonObject);
                } catch (Exception e) {
                    System.out.println("有脏数据");
                }
            }
        });//返回的是JsonObject类型


        SingleOutputStreamOperator<JSONObject> filter = process.filter(new FilterFunction<JSONObject>() {
            @Override
            public boolean filter(JSONObject jsonObject) throws Exception {
                String data = jsonObject.getString("data");
                return data != null && data.length() > 0;
            }
        });//返回的是JsonObject类型

        //读取配置表
        SourceFunction<String> sourceFunction = MySQLSource.<String>builder()
                .hostname("hadoop101")
                .port(3306)
                .databaseList("gmall-2022-realtime") // monitor all tables under inventory database
                .tableList("gmall-2022-realtime.table_process")
                .username("root")
                .password("123456")
                .deserializer(new MyDeserializationSchemaFunction()) // converts SourceRecord to String
//                .startupOptions(StartupOptions.earliest())//全量添加
                .build();
        //输出数据
        DataStreamSource<String> mysqlDS = env.addSource(sourceFunction);
        //编写状态编程（描述要广播的数据格式，参数是输入的类型，输出的类型）
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("table_process", String.class, TableProcess.class);
        //将得到的配置表转化为广播流进行数据共享
        BroadcastStream<String> broadcastStream = mysqlDS.broadcast(mapStateDescriptor);
        //将主流和广播流连接，就是将主流和广播流结合处理
        BroadcastConnectedStream<JSONObject, String> connect = filter.connect(broadcastStream);
        // 建立侧流，将要存入kafka的数据放入主流，hbase数据放入侧流
        OutputTag<JSONObject> hbaseOutput = new OutputTag<JSONObject>("hbase_db"){};
        //分流
        SingleOutputStreamOperator<JSONObject> process1 = connect.process(new TableProcessFunc(hbaseOutput,mapStateDescriptor));

//        process1.print("kafka>>>>>>");
//        process1.getSideOutput(hbaseOutput).print("hbase>>>>>>");

        FlinkKafkaProducer<JSONObject> kafkaSInkBySchema = MyKafkaUtil.getKafkaSInkBySchema(new KafkaSerializationSchema<JSONObject>() {
            @Override
            public void open(SerializationSchema.InitializationContext context) throws Exception {
                System.out.println("序列化>>>>");
            }

            @Override
            public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObject, @Nullable Long aLong) {
                return new ProducerRecord<>(
                        jsonObject.getString("sinkTable"),
                        jsonObject.toJSONString().getBytes()
                );
            }
        });

        process1.addSink(kafkaSInkBySchema);
//        process1.getSideOutput(hbaseOutput).addSink(new SinkHBaseUtil());


        env.execute();
    }
}
