package com.practice.gmall.realtime.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.practice.gmall.realtime.app.BaseApp;
import com.practice.gmall.realtime.bean.TableProcess;
import com.practice.gmall.realtime.common.Constant;
import com.practice.gmall.realtime.util.FlinkSinkUtil;
import com.practice.gmall.realtime.util.JdbcUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;

import java.sql.Connection;
import java.sql.PreparedStatement;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Properties;

/**
 * @author Ethan
 * @description 从Kafka消费维度层的数据
 * todo write a summary about how to decouple code ; how to design code to help enhance reusing code
 * @time 2023-02-08
 */
@Slf4j
public class DimApp extends BaseApp {
    public static void main(String[] args) {
        new DimApp().init(3333, "DimApp", 2, Constant.KAFKA_BROKERS, "ods_db");
    }

    @Override
    protected void handle(StreamExecutionEnvironment env, DataStreamSource<String> stream) {
        //1. trim data
        DataStream<String> eltedStream = elt(stream);

        //2. use CDC to read configuration data in MySQL, return the needed stream back;
        SingleOutputStreamOperator<TableProcess> tps = readTableProcess(env);

        //2.2 create table in phoenix
        tps = creatTableInPhoenix(tps);

        //3. broadcast tableProcess to data stream, and connect two stream

        SingleOutputStreamOperator<Tuple2<TableProcess, JSONObject>> dataWithConfig = connect(eltedStream, tps);

        //3.1 delete columns unnecessary
        SingleOutputStreamOperator<Tuple2<TableProcess, JSONObject>> deletedStream = deleteColumns(dataWithConfig);

        //4. write dim layer data to table in phoenix
        writeToPhoenix(deletedStream);

    }

    private SingleOutputStreamOperator<Tuple2<TableProcess, JSONObject>> deleteColumns(SingleOutputStreamOperator<Tuple2<TableProcess, JSONObject>> ds) {
        return ds.map((MapFunction<Tuple2<TableProcess, JSONObject>, Tuple2<TableProcess, JSONObject>>) value -> {
            TableProcess tp = value.f0;
            JSONObject data = value.f1;

            //List<String> columns = Arrays.asList(tp.getSinkColumns().split(","));
            // todo 为什么要判断operate_type???
            //data.keySet().removeIf(key -> !columns.contains(key) && !"operate_type".equals(tp.getOpType()));

            Iterator<String> cols = Arrays.stream(tp.getSinkColumns().split(",")).iterator();
            if (cols.hasNext()) {
                String key = cols.next();
                if (!data.containsKey(key)) {
                    data.remove(key);
                }
            }

            return value;
        }).returns(new TypeHint<Tuple2<TableProcess, JSONObject>>() {});
    }

    private void writeToPhoenix(SingleOutputStreamOperator<Tuple2<TableProcess, JSONObject>> ds) {
        ds.addSink(FlinkSinkUtil.getPhoenixSink());


        // todo jdbc connector 不能同时写出到多张表的原因是什么？
        /*// use jdbc api to write data to phoenix
        // 1. get connection
        Connection conn = JdbcUtil.getPhoenixConn(Constant.PHOENIX_JDBC_DRIVER, Constant.PHOENIX_DB_URL, null, null);
        // 2. format sql statement
        dataWithConfig.process(new ProcessFunction<Tuple2<TableProcess, JSONObject>, Void>() {

            private StringBuffer sql;

            @Override
            public void processElement(Tuple2<TableProcess, JSONObject> tuple2,
                                       Context context,
                                       Collector<Void> collector) throws Exception {
                String skTable = tuple2.f0.getSinkTable();
                String skColumns = tuple2.f0.getSinkColumns();
                Set<String> keySet = tuple2.f1.keySet();
                Iterator<String> it = keySet.iterator();
                String skValues = Arrays.stream(skColumns.split(",")).map(e -> tuple2.f1.getString(e)).collect(Collectors.joining(","));

                sql = new StringBuffer();
                sql.append("UPSERT INTO ")
                        .append(skTable)
                        .append("(" + skColumns + ") ")
                        .append("VALUES")
                        .append("(" + skValues + ")");

                System.out.println("DimApp.processElement " + sql.toString());
                PreparedStatement statement = conn.prepareStatement(sql.toString());
                statement.execute();
                conn.commit();
            }
        });

        // 3. execute*/
    }

    private SingleOutputStreamOperator<Tuple2<TableProcess, JSONObject>> connect(DataStream<String> stream, SingleOutputStreamOperator<TableProcess> tps) {
        // 0. choose data structure
        // state: MapStateDescriptor; process: IN1:String, IN2:TableProcess, OUT: Tuple2<TableProcess, JSONObject>;
        // reason:

        // Key: table:type; Value: TableProcess
        MapStateDescriptor<String, TableProcess> stateDes = new MapStateDescriptor<>("config", String.class, TableProcess.class);

        // 1. connect two stream
        BroadcastStream<TableProcess> broadcastStream = tps.broadcast(stateDes);

        // 2. broadcast tableProcess stream

        return stream.connect(broadcastStream)
                .process(new BroadcastProcessFunction<String, TableProcess, Tuple2<TableProcess, JSONObject>>() {


                    private BroadcastState<String, TableProcess> mapState;

                    @Override
                    public void processElement(String obj,
                                               ReadOnlyContext readOnlyContext,
                                               Collector<Tuple2<TableProcess, JSONObject>> out) throws Exception {

                        ReadOnlyBroadcastState<String, TableProcess> broadcastState = readOnlyContext.getBroadcastState(stateDes);

                        Tuple2<TableProcess, JSONObject> outTuple2 = new Tuple2<>();

                        JSONObject jsonObject = JSON.parseObject(obj);
                        String table = jsonObject.getString("table");
                        JSONObject entry = JSON.parseObject(jsonObject.getString("data"));

                        entry.put("operate_type",jsonObject.getString("type"));// todo why put this field to data?

                        String key = "dim_" + table + ":ALL";// key to connect tableProcess

                        TableProcess tp = broadcastState.get(key);

                        if (broadcastState.contains(key)) {
                            outTuple2.setFields(tp, entry);
                            out.collect(outTuple2);
                        }

                    }

                    @Override
                    public void processBroadcastElement(TableProcess tp,
                                                        Context context,
                                                        Collector<Tuple2<TableProcess, JSONObject>> out) throws Exception {

                        mapState = context.getBroadcastState(stateDes);
                        // todo 对删除delete的数据的理解
                        String key = getKey(tp);
                        if ("d".equals(tp.getOpType())) {
                            mapState.remove(key);
                        } else {
                            mapState.put(key, tp);
                        }

                    }

                    private String getKey(TableProcess tableProcess) {
                        // todo 为什么需要加上table type？
                        return tableProcess.getSinkTable() + ":" + tableProcess.getSourceType();
                    }
                });

    }

    private SingleOutputStreamOperator<TableProcess> creatTableInPhoenix(SingleOutputStreamOperator<TableProcess> tps) {
        // 1. 创建jdbc连接, 连接phoenix

        // 1.1  拼接SQL语句
        return tps.process(new ProcessFunction<TableProcess, TableProcess>() {

            private Connection conn;

            @Override
            public void open(Configuration parameters)  {
                conn = JdbcUtil.getJdbcConn(Constant.PHOENIX_JDBC_DRIVER, Constant.PHOENIX_DB_URL, null, null);
            }

            @Override
            public void close() throws Exception {
                conn.close();
            }

            @Override
            public void processElement(TableProcess tp, ProcessFunction<TableProcess, TableProcess>.Context context, Collector<TableProcess> out) throws Exception {
                // CREATE TABLE IF NOT EXISTS "my_case_sensitive_table"
                //    ( "id" char(10) not null primary key, "value" integer)
                //    DATA_BLOCK_ENCODING='NONE',VERSIONS=5,MAX_FILESIZE=2000000 split on (?, ?, ?)

                StringBuilder sql = new StringBuilder();
                String columns = tp.getSinkColumns().replaceAll("[^,]+", "$0 VARCHAR");
                String pkey = tp.getSinkPk() == null ? "id" : tp.getSinkPk();
                String skExt = tp.getSinkExtend() == null ? "" : tp.getSinkExtend();

                sql.append("CREATE TABLE IF NOT EXISTS ")
                        .append(tp.getSinkTable())
                        .append("(")
                        .append(columns).append(" CONSTRAINT pk PRIMARY KEY(").append(pkey).append(")")
                        .append(")")
                        .append(skExt);

                PreparedStatement statement = conn.prepareStatement(sql.toString());
                // todo 为什么当中间有一条SQL执行失败了以后，后续的建表语句不再执行？
                statement.execute(sql.toString());
                out.collect(tp);
            }
        });
    }

    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {

        // disable SSL property
        Properties properties = new Properties();
        properties.setProperty("useSSL", "false");

        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname(Constant.MYSQL_HOST)
                .port(Constant.PORT)
                .databaseList(Constant.CONFIG_DATABASE) // set captured database
                .tableList(Constant.CONFIG_DATABASE + "." + Constant.MYSQL_CONFIG_TABLE) // set captured table
                .username(Constant.MYSQL_ROOT_USER)
                .password(Constant.MYSQL_ROOT_PASSWORD)
                .jdbcProperties(properties)
                .startupOptions(StartupOptions.initial())
                .deserializer(new JsonDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
                .build();

        return env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL configuration Source")
                .map((MapFunction<String, JSONObject>) JSON::parseObject)
                .process(new ProcessFunction<JSONObject, TableProcess>() {
                    @Override
                    public void processElement(JSONObject jsonObject, ProcessFunction<JSONObject, TableProcess>.Context context, Collector<TableProcess> out)  {

                        String op = jsonObject.getString("op");
                        if ("u".equals(op) || "c".equals(op) || "r".equals(op)) {
                            TableProcess tp = JSON.parseObject(jsonObject.getString("after"), TableProcess.class);
                            tp.setOpType(op);
                            out.collect(tp);
                        }
                        if ("d".equals(op)) {
                            TableProcess tp = JSON.parseObject(jsonObject.getString("before"), TableProcess.class);
                            tp.setOpType(op);
                            out.collect(tp);
                        }
                    }
                });


        /*
            insert：op=c, before=null, after=data
            update:
                1. update primary key: (先d,后c)op=c, before=null, after=data
                2. update ordinary column: op=u,before=data, after=new data
            delete: op=d, before=data, after=null

         */
    }


    private DataStream<String> elt(DataStreamSource<String> stream) {
        return stream.filter((FilterFunction<String>) str -> {
            try {
                JSONObject obj = JSON.parseObject(str);
                String database = obj.getString("database");
                String type = obj.getString("type");
                String data = obj.getString("data");
                String table = obj.getString("table");
                // 为什么没有bootstrap-update的数据
                // MySQL不存储历史数据，在执行maxwell-bootstrap同步全量数据的原理是利用执行查询，将返回的数据发送到Maxwell
                // 再将数据发送到Kafka，因此数据是最新的，就是只有bootstrap-insert，无bootstrap-update
                return
                        "gmall2022".equals(database)
                                && null != table
                                && ("insert".equals(type) || "update".equals(type) || "bootstrap-insert".equals(type))
                                && data != null
                                && data.length() > 2;
            } catch (Exception e) {
                log.warn("请检查这条数据是否为JSON格式：" + str);
                return false;
            }
        })
                .map(str ->str.replaceAll("bootstrap-", ""));
    }

}
