package com.atguigu.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseApp;
import com.atguigu.realtime.bean.TableProcess;
import com.atguigu.realtime.util.MyKafkaUtil;
import com.atguigu.realtime.util.MyRedisUtil;
import org.apache.flink.api.common.state.*;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import redis.clients.jedis.Jedis;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/2/4 17:16
 */
public class DWDDbApp extends BaseApp {
    final static OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbaseTag") {};
    
    public static void main(String[] args) {
        new DWDDbApp().init(2, "DWDDbApp", "ods_db");
        
    }
    
    @Override
    public void run(StreamExecutionEnvironment env,
                    DataStreamSource<String> sourceStream) {
        // 1. 对流进行etl
        final SingleOutputStreamOperator<JSONObject> etlStream = etl(sourceStream);
        // 2. 读取配置表数据, 使用cdc把数据做成流
        final SingleOutputStreamOperator<TableProcess> processTableStream = readProcessTable(env);
        
        // 3. 根据配置表数据进行动态分流
        final SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> sink2KafkaStream = dynamicSplit(etlStream, processTableStream);
        final DataStream<Tuple2<JSONObject, TableProcess>> sink2HbaseStream = sink2KafkaStream.getSideOutput(hbaseTag);
        
        // 4. sink数据
        sink2Kafka(sink2KafkaStream);
        sink2Hbase(sink2HbaseStream);
        
    }
    
    /**
     * 维度表数据写入到Hbase
     *
     * @param sink2HbaseStream 维度表数据流
     */
    private void sink2Hbase(DataStream<Tuple2<JSONObject, TableProcess>> sink2HbaseStream) {
        String phoenixUrl = "jdbc:phoenix:hadoop162,hadoop163,hadoop164:2181";
        sink2HbaseStream
            .keyBy(f -> f.f1.getSinkTable()) // 按照要sink的表进行分组
            .addSink(new RichSinkFunction<Tuple2<JSONObject, TableProcess>>() {
                private Jedis client;
                private Connection conn;
                private ValueState<Boolean> tableCreated;
                
                @Override
                public void open(Configuration parameters) throws Exception {
                    System.out.println("open ...");
                    // 建立到Phoenix的连接
                    conn = DriverManager.getConnection(phoenixUrl);
                    // 定义一个状态, 用来表示该表是否已经被创建
                    tableCreated = getRuntimeContext()
                        .getState(new ValueStateDescriptor<Boolean>("tableCreated", Boolean.class));
                    
                    // 获取redis连接
                    client = MyRedisUtil.getClient();
                }
                
                @Override
                public void invoke(Tuple2<JSONObject, TableProcess> value, Context context) throws Exception {
                    System.out.println("invoke ...");
                    //1. 检测表是否存在
                    checkTable(value);
                    //2. 向Phoenix中插入数据
                    // upset into user(id,name) values('100', 'lisi')
                    write2Hbase(value);
                    
                    // 事实表join维度表的时候添加的代码
                    delCache(value);
                }
                
                private void delCache(Tuple2<JSONObject, TableProcess> value) {
                    final JSONObject obj = value.f0.getJSONObject("data");
                    final TableProcess tp = value.f1;
                    if ("update".equalsIgnoreCase(tp.getOperateType())) {
                        client.del("dim_" + tp.getSinkTable() + "_" + obj.getString("id"));
                    }
                }
                
                /**
                 * 数据写入到Hbase中
                 * @param value 要写入的相关数据
                 * @throws SQLException
                 */
                private void write2Hbase(Tuple2<JSONObject, TableProcess> value) throws SQLException {
                    final JSONObject dataJson = value.f0.getJSONObject("data");
                    final TableProcess tp = value.f1;
                    
                    // 2.1 拼接sql语句
                    final StringBuilder upsertSql = new StringBuilder();
                    upsertSql
                        .append("upsert into ")
                        .append(tp.getSinkTable())
                        .append("(")
                        .append(value.f1.getSinkColumns())
                        .append(") values (");
                    
                    for (String column : tp.getSinkColumns().split(",")) {
                        upsertSql.append("'").append(dataJson.getString(column)).append("',");
                    }
                    upsertSql.deleteCharAt(upsertSql.length() - 1);
                    upsertSql.append(")");
                    
                    // 2.2 执行sql语句
                    final PreparedStatement ps;
                    try {
                        System.out.println(upsertSql);
                        ps = conn.prepareStatement(upsertSql.toString());
                        ps.execute();
                        conn.commit();
                        ps.close();
                    } catch (SQLException e) {
                        e.printStackTrace();
                    }
                    
                }
                
                /**
                 * 检测表是否存在, 如果不存在则先在Phoenix中创建表
                 * @param value
                 * @throws Exception
                 */
                private void checkTable(Tuple2<JSONObject, TableProcess> value) throws Exception {
                    if (tableCreated.value() == null) {
                        System.out.println("checkTable -> if ");
                        tableCreated.update(true);
                        // 表示第一次插入数据, 需要首先在Phoenix中创建表
                        // 生成建表语句:  create table if not exists user(id varchar, name varchar , constraint pk primary key(id, name) ) SALT_BUCKETS = 3
                        final StringBuilder createSql = new StringBuilder();
                        createSql
                            .append("create table if not exists ")
                            .append(value.f1.getSinkTable())
                            .append("(");
                        for (String column : value.f1.getSinkColumns().split(",")) {
                            createSql.append(column).append(" varchar,");
                        }
                        createSql
                            .append("constraint pk primary key(")
                            .append(value.f1.getSinkPk() == null ? "id" : value.f1.getSinkPk())
                            .append(")");
                        createSql.append(")");
                        createSql.append(value.f1.getSinkExtend() == null ? "" : value.f1.getSinkExtend());
                        
                        PreparedStatement ps = conn.prepareStatement(createSql.toString());
                        ps.execute();
                        conn.commit();
                        ps.close();
                        
                    }
                }
                
                @Override
                public void close() throws Exception {
                    System.out.println("close...");
                    if (conn != null) {
                        conn.close();
                    }
                }
            });
        
    }
    
    /**
     * 事实表数据写入到Kafka
     *
     * @param sink2KafkaStream 事实表数据流
     */
    private void sink2Kafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> sink2KafkaStream) {
        sink2KafkaStream.addSink(MyKafkaUtil.getKafkaSink());
    }
    
    /**
     * 对数据流进行动态切分
     *
     * @param dbStream           数据流
     * @param processTableStream 动态配置流
     * @return 到kafka的主流
     */
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dynamicSplit
    (SingleOutputStreamOperator<JSONObject> dbStream,
     SingleOutputStreamOperator<TableProcess> processTableStream) {
        // 1. 把配置表流做成广播状态
        // 1.1 状态描述符:  key: sourceTable_operatorType
        final MapStateDescriptor<String, TableProcess> tableProcessStateDescriptor = new MapStateDescriptor<>(
            "tableProcessState",
            String.class,
            TableProcess.class);
        // 1.2 创建广播流
        final BroadcastStream<TableProcess> tableProcessBroadcastStream = processTableStream
            .broadcast(tableProcessStateDescriptor);
        // 2. 用数据流去关联广播流
        
        final SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> sinkToKafkaStream = dbStream
            .connect(tableProcessBroadcastStream)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                
                // 过滤掉不需要的字段
                private void filterColumns(JSONObject dataJsonObj, String sinkColumns) {
                    final List<String> columns = Arrays.asList(sinkColumns.split(","));
                    dataJsonObj
                        .entrySet()
                        .removeIf(entry -> !columns.contains(entry.getKey()));
                }
                
                @Override
                public void processElement(JSONObject jsonObj,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 处理数据流中的元素
                    final ReadOnlyBroadcastState<String, TableProcess> broadcastState = ctx
                        .getBroadcastState(tableProcessStateDescriptor);
                    // 事实表数据存入主流(sink 到 Kafka),  维度表数据存入侧输出流(通过Phoenix sink到 HBase)
                    // 1. 获取表名,操作类型, 和数据
                    String tableName = jsonObj.getString("table");
                    String operateType = jsonObj.getString("type");
                    JSONObject dataJsonObj = jsonObj.getJSONObject("data");
                    
                    //1.1 如果是使用Maxwell的初始化功能，那么type类型为bootstrap-insert,我们这里也标记为insert，方便后续处理
                    if ("bootstrap-insert".equals(operateType)) {
                        operateType = "insert";
                        jsonObj.put("type", operateType);
                    }
                    // 2. 获取配置信息
                    final String key = tableName + "_" + operateType;
                    if (broadcastState.contains(key)) {
                        final TableProcess tableProcess = broadcastState.get(key);
                        // 2.1 sink到Kafka或者HBase的时候, 不是所有字段都需要保存, 过滤掉不需要的
                        filterColumns(dataJsonObj, tableProcess.getSinkColumns());
                        // 2.2 开始分流: 事实表数据在主流, 维度表数据在侧输出流
                        final String sinkType = tableProcess.getSinkType();
                        if (TableProcess.SINK_TYPE_KAFKA.equalsIgnoreCase(sinkType)) {
                            out.collect(Tuple2.of(jsonObj, tableProcess));
                        } else if (TableProcess.SINK_TYPE_HBASE.equalsIgnoreCase(sinkType)) {
                            ctx.output(hbaseTag, Tuple2.of(jsonObj, tableProcess));
                        }
                    }
                }
                
                @Override
                public void processBroadcastElement(TableProcess tableProcess,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 处理广播流中的元素
                    // 1. 获取广播状态, 其实存储的是一个map
                    final BroadcastState<String, TableProcess> broadcastState = ctx
                        .getBroadcastState(tableProcessStateDescriptor);
                    // 2. 以sourceTable作为Key 存入map集合中
                    broadcastState.put(tableProcess.getSourceTable() + "_" + tableProcess.getOperateType(), tableProcess);
                }
            });
        
        return sinkToKafkaStream;
        
    }
    
    /**
     * 读取的配置表的数据
     *
     * @param env
     * @return
     */
    private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
        // 配置表在jdbc, 使用flink-sql-cdc来完成
        final StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        tenv
            .executeSql("CREATE TABLE `table_process`( " +
                            "   `source_table`  string, " +
                            "   `operate_type`  string, " +
                            "   `sink_type`  string, " +
                            "   `sink_table`  string, " +
                            "   `sink_columns` string, " +
                            "   `sink_pk`  string, " +
                            "   `sink_extend`  string, " +
                            "   PRIMARY KEY (`source_table`,`operate_type`)  NOT ENFORCED" +
                            ")with(" +
                            "   'connector' = 'mysql-cdc', " +
                            "   'hostname' = 'hadoop162', " +
                            "   'port' = '3306', " +
                            "   'username' = 'root', " +
                            "   'password' = 'aaaaaa', " +
                            "   'database-name' = 'gmall2021_realtime', " +
                            "   'table-name' = 'table_process'," +
                            "   'debezium.snapshot.mode' = 'initial' " +  // 读取mysql的全量,增量以及更新数据
                            ")");
        final Table table = tenv.sqlQuery("select " +
                                              "  source_table sourceTable, " +
                                              "  sink_type sinkType, " +
                                              "  operate_type operateType, " +
                                              "  sink_table sinkTable, " +
                                              "  sink_columns sinkColumns, " +
                                              "  sink_pk sinkPk, " +
                                              "  sink_extend sinkExtend " +
                                              "from table_process ");
        return tenv
            .toRetractStream(table, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
    }
    
    /**
     * 对db数据进行etl
     *
     * @param sourceStream
     * @return
     */
    public SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> sourceStream) {
        // 根据实际业务, 对数据做一些过滤
        return sourceStream
            .map(JSON::parseObject)
            .filter(value -> value.getString("table") != null
                && value.getJSONObject("data") != null
                && value.getString("data").length() > 3);
    }
}
