package com.pw.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.pw.gmall.realtime.app.BaseApp;
import com.pw.gmall.realtime.common.Constant;
import com.pw.gmall.realtime.entities.TableProcess;
import com.pw.gmall.realtime.utils.FlinkSinkUtils;
import com.pw.gmall.realtime.utils.KafkaUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.List;

/**
 * @Author: linux_future * @since: 2022/3/16
 **/
@Slf4j
public class DwdDbApp extends BaseApp {
    public static void main(String[] args) {
        new DwdDbApp().init(2002, 1, "DwdDbApp",
                "DwdDbApp3", Constant.TOPIC_ODS_DB);
    }

    @Override
    protected void handler(StreamExecutionEnvironment env, DataStreamSource<String> stream) {
        //注意：构建数据时，需要maxwell打开，
        // 构建流程： db.jar ->mysql ->maxwell->kafka-->数据到这里
        //数据做etl处理,过滤除insert和更新的数据
        SingleOutputStreamOperator<JSONObject> streamEtl = etlStream(stream);
        //streamEtl.print("etl:");
        //2.读取,配置数据
        SingleOutputStreamOperator<TableProcess> tableStream = readProcessTable(env);
        //tableStream.print();
        // 3. 数据流和配置流进行connect, 实现动态分流
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> broadcastStream = broadcastStream(streamEtl, tableStream);
        broadcastStream.print("数据分流：");
        //4.过滤不需要的字段
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterStream = filterStream(broadcastStream);
        //filterStream.print();
        //5.动态分流
        Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> splitStream = splitStream(filterStream);
        splitStream.f0.print("kafka");
        splitStream.f1.print("hbase");
        //6.写入数据
        writeSinkKafka(splitStream.f0);
        writeSinkHbase(splitStream.f1);

    }

    private void writeSinkHbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        System.out.println("writeSinkHbase=========================");
        //hbase> ({"birthday":"2000-03-18","login_name":"q7rbun78j4s","gender":"F","create_time":"2022-03-18 11:46:42","name":"闻人舒","user_level":"1","id":44},TableProcess(source_table=user_info, operate_type=update, sink_type=hbase, sink_table=dim_user_info, sink_columns=id,login_name,name,user_level,birthday,gender,create_time,operate_time, sink_pk=null, sink_extend=null))
 /*
         维度表的数据写入到HBase中, 通过phoenix写入

         1. phoenix的表不会自动创建, 需要我们创建
            a: 手动提前创建
               优点: 比较简单, 代码就不用考虑建表
               缺点: 不够灵活

            b: 通过代码自动创建:当某个维度表的第一条进来的时候, 执行一个sql, 创建这个维度表
                优点: 可以自动适应配置变化, 比较灵活.
                缺点: 代码实现起来比较复杂


        2. 采用 b 种方式
            1. 能不能 使用flink提供的jdbc sink

                这次想phoenix写数据的时候, 需要几个sql语句?
                    2个sql: 建表   插入数据的

                    jdbc sink 只能使用一个sql(插入数据集的sql), 所以不能使用jdbc sink
             2. 使用自定义sink
                 使用jdbc来建表和插入数据

         */
        stream.keyBy(t -> t.f1.getSink_table()).addSink(FlinkSinkUtils.getPhoenixSink());

    }

    private void writeSinkKafka(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        System.out.println("writeSinkKafka======================================= ");
        //kafka> ({"callback_time":"2022-03-18 11:46:43","payment_type":"1101","out_trade_no":"277374919173168","refund_status":"0701","create_time":"2022-03-18 11:46:43","total_amount":1998.00,"subject":"退款","sku_id":5,"id":2302,"order_id":26451},TableProcess(source_table=refund_payment, operate_type=insert, sink_type=kafka, sink_table=dwd_refund_payment, sink_columns=id,out_trade_no,order_id,sku_id,payment_type,trade_no,total_amount,subject,refund_status,create_time,callback_time,callback_content, sink_pk=id, sink_extend=null))
        stream.addSink(KafkaUtils.getKafkaSink());
    }

    private Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> splitStream(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        System.out.println("splitStream==========================");
        OutputTag<Tuple2<JSONObject, TableProcess>> kafkaOutPutTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("kafkaOutput") {
        };
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseOutPutTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbaseOutput") {
        };
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> mainStream = stream.process(new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public void processElement(Tuple2<JSONObject, TableProcess> value, ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>.Context ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                TableProcess tp = value.f1;
                if (Constant.SINK_TYPE_KAFKA.equals(tp.getSink_type())) {
                    ctx.output(kafkaOutPutTag, value);
                } else if (Constant.SINK_TYPE_HBASE.equals(tp.getSink_type())) {
                    ctx.output(hbaseOutPutTag, value);
                }
            }
        });
        DataStream<Tuple2<JSONObject, TableProcess>> kafkaStream = mainStream.getSideOutput(kafkaOutPutTag);
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = mainStream.getSideOutput(hbaseOutPutTag);
        return Tuple2.of(kafkaStream, hbaseStream);
    }

    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterStream(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterStream = stream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> value) throws Exception {

                // 把数据中不需要的字段过滤掉
                JSONObject data = value.f0;

                List<String> columns = Arrays.asList(value.f1.getSink_columns().split(","));  // 存储了素有需要sink的列
                // 遍历data中的每一个列名, 如果存在于columns这个集合中, 则保留, 否则删除
                // 如果删除map中的key

                data.keySet().removeIf(key -> !columns.contains(key));
                return value;
            }
        });
        return filterStream;
    }

    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> broadcastStream(SingleOutputStreamOperator<JSONObject> streamEtl, SingleOutputStreamOperator<TableProcess> tableStream) {

        MapStateDescriptor<String, TableProcess> tpStateDes = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        //1.配置流
        BroadcastStream<TableProcess> tableProcessBroadcast = tableStream.broadcast(tpStateDes);
        //2.数据流

        //关联
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream = streamEtl.connect(tableProcessBroadcast).process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public void processElement(JSONObject value, ReadOnlyContext ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                // 根据表名:操作类型, 从广播状态中国获取对应的配置信息
                ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDes);
                String key = value.getString("table") + ":" + value.getString("type");
                System.out.println("key数据流： " + key);
                TableProcess tp = tpState.get(key);
                // 有些表在配置信息中并没有, 表示这张表的数据不需要sink
                // 如果这张表的配置不存在, tp对象就是null
                System.out.println("====数据流处理数据。。。。。。。。。。。。。。。。");
                if (tp != null) {
                    // 每条数据的元数据在tp中基本都有体现, 所以, 数据中的元数据信息可以去掉, 只保留data字段中的数据
                    out.collect(Tuple2.of(value.getJSONObject("data"), tp));
                }
            }

            @Override
            public void processBroadcastElement(TableProcess value, Context ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                System.out.println("配置流处理数据。。。。。。。。。。。。。。。。");
                BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDes);
                String key = value.getSource_table() + ":" + value.getOperate_type();
                System.out.println("key: " + key);
                tpState.put(key, value);
            }
        });
        return stream;
    }


    private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        tEnv.executeSql("CREATE TABLE `table_process`( " +
                "   `source_table`  string, " +
                "   `operate_type`  string, " +
                "   `sink_type`  string, " +
                "   `sink_table`  string, " +
                "   `sink_columns` string, " +
                "   `sink_pk`  string, " +
                "   `sink_extend`  string, " +
                "   PRIMARY KEY (`source_table`,`operate_type`)  NOT ENFORCED" +
                ")with(" +
                "   'connector' = 'mysql-cdc', " +
                "   'hostname' = 'hadoop162', " +
                "   'port' = '3306', " +
                "   'username' = 'root', " +
                "   'password' = 'aaaaaa'," +
                "   'database-name' = 'gmall2022_realtime', " +
                "   'table-name' = 'table_process'," +
                "   'debezium.snapshot.mode' = 'initial' " +  // 读取mysql的全量,增量以及更新数据
                ")"
        );

        Table table_process = tEnv.from("table_process");

        SingleOutputStreamOperator<TableProcess> tableStream = tEnv.toRetractStream(table_process, TableProcess.class)
                .filter(t -> t.f0).map(t -> t.f1);
        return tableStream;
    }

    private SingleOutputStreamOperator<JSONObject> etlStream(DataStreamSource<String> stream) {
        log.info("......etl start.........................");
        SingleOutputStreamOperator<JSONObject> etlResult = stream.map(obj -> JSON.parseObject(obj.replaceAll("bootstrap-", ""))).filter(obj ->
                obj.getString("table") != null && obj.getJSONObject("data") != null
                        && obj.getString("data").length() > 3
                        && "gmall2022".equals(obj.getString("database"))
                        && ("insert".equals(obj.getString("type")) || "update".equals(obj.getString("type")))
        );
        return etlResult;
    }
}
