package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseApp;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.util.MyKafkaUtil;
import com.atguigu.gmall.realtime.util.MyRedisUtil;
import org.apache.flink.api.common.state.*;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import redis.clients.jedis.Jedis;

import java.io.IOException;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/3/15 15:37
 */
public class DWDDbApp extends BaseApp implements Serializable {
    final OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbaseTag") {};
    
    public static void main(String[] args) {
        new DWDDbApp().init(10001, 2, "DWDDbApp", "DWDDbApp", "ods_db");
    }
    
    @Override
    public void run(StreamExecutionEnvironment env,
                    DataStreamSource<String> sourceStream) {
        // 1. 读取 配置表的数据 返回一个流, 流中存储的是配置表的数据
        final SingleOutputStreamOperator<TableProcess> tableProcessStream = readProcessTable(env);
        // 2. 读取ods_db的数据, 并做etl
        final SingleOutputStreamOperator<JSONObject> eltStream = etl(sourceStream);
        // 3. 动态分流
        final SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> sink2KafkaStream = dynamicSplit(eltStream, tableProcessStream);
        final DataStream<Tuple2<JSONObject, TableProcess>> sink2HBaseStream = sink2KafkaStream.getSideOutput(hbaseTag);
        
        // 4.sink到目的地
        // 4.1 sink到Kafka
        sink2Kafka(sink2KafkaStream);
        // 4. sink到Hbase  log4j to two 2   i18n 国际化  json4s
        sink2Hbase(sink2HBaseStream);
        
    }
    
    private void sink2Hbase(DataStream<Tuple2<JSONObject, TableProcess>> sink2HBaseStream) {
        /*
            1. 到hbase, 需要在hbase中建表, 让程序自动建表
            2. 表不存在才建
            3. 使用sql来写: Phoenix
         */
        String phoenixUrl = "jdbc:phoenix:hadoop162,hadoop163,hadoop164:2181";
        
        sink2HBaseStream
            .keyBy(t -> t.f1.getSinkTable())  // 提高写入的效率, 让同一个表的数据进入同一个并行度中
            .addSink(new RichSinkFunction<Tuple2<JSONObject, TableProcess>>() {
                
                private Jedis redisClient;
                // 表示表是否已经创建的状态
                private ValueState<Boolean> createTableState;
                private Connection conn;
                
                @Override
                public void open(Configuration parameters) throws Exception {
                    // 建立到Phoenix的连接
                    conn = DriverManager.getConnection(phoenixUrl);
                    
                    createTableState = getRuntimeContext()
                        .getState(new ValueStateDescriptor<Boolean>("createTableState", Boolean.class));
                    
                    redisClient = MyRedisUtil.getRedisClient();
                    
                }
                
                @Override
                public void invoke(Tuple2<JSONObject, TableProcess> value,
                                   Context context) throws Exception {
                    // 1. 检测表是否存在, 如果不存在就新建对应的表
                    checkTable(value);
                    // 2. 把数据吸入到Hbase中
                    write2HBase(value);
                    // 3. 删除缓存(更新)
                    delCache(value);
                }
                
                private void delCache(Tuple2<JSONObject, TableProcess> value) {
                    if ("update".equalsIgnoreCase(value.f1.getOperateType())) {
                        // 如果操作是更新, 删除缓存
                        final JSONObject obj = value.f0;
                        final TableProcess tp = value.f1;
                        String key = tp.getSinkTable() + ":" + obj.getString("id");
                        System.out.println("obj: " + obj);
                        redisClient.del(key.toUpperCase());
//                        redisClient.set(key, )
                    }
                }
                
                private void write2HBase(Tuple2<JSONObject, TableProcess> value) throws SQLException {
                    // upsert into user(id, name)values(?, ?)
                    final JSONObject data = value.f0;
                    final TableProcess tp = value.f1;
                    final StringBuilder sql = new StringBuilder();
                    sql
                        .append("upsert into ")
                        .append(tp.getSinkTable())
                        .append("(")
                        .append(tp.getSinkColumns())
                        .append(")values(");
                    for (String c : tp.getSinkColumns().split(",")) {
                        sql.append("'").append(data.getString(c)).append("',");
                    }
                    sql.deleteCharAt(sql.length() - 1);
                    
                    sql.append(")");
                    
                    final PreparedStatement ps = conn.prepareStatement(sql.toString());
                    ps.execute();
                    conn.commit();
                    ps.close();
                }
                
                // 检测Phoenix中的表, 不存在创建
                private void checkTable(Tuple2<JSONObject, TableProcess> value) throws SQLException, IOException {
                    // 执行建表语句
                    // create table if not exists user(
                    // id varchar,
                    // name varchar,
                    // constraint primary key(id) ) SALT_BUCKETS = 3
                    if (createTableState.value() == null) {
                        System.out.println("value");
                        final TableProcess tp = value.f1;
                        final StringBuilder createUrl = new StringBuilder();
                        createUrl
                            .append("create table if not exists ")
                            .append(tp.getSinkTable())
                            .append("(");
                        
                        for (String c : tp.getSinkColumns().split(",")) {
                            createUrl.append(c).append(" varchar,");
                        }
                        // 拼接主键
                        createUrl
                            .append("constraint pk primary key(")
                            .append(tp.getSinkPk() == null ? "id" : tp.getSinkPk())
                            .append(")");
                        
                        createUrl.append(")");
                        createUrl.append(tp.getSinkExtend() == null ? "" : tp.getSinkExtend());
                        System.out.println(createUrl);
                        final PreparedStatement ps = conn.prepareStatement(createUrl.toString());
                        ps.execute();
                        conn.commit();
                        ps.close();
                        createTableState.update(true);
                    }
                }
                
                @Override
                public void close() throws Exception {
                    // 关闭资源
                    if (conn != null) {
                        conn.close();
                    }
                }
            });
    }
    
    private void sink2Kafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> sink2KafkaStream) {
        sink2KafkaStream.addSink(MyKafkaUtil.getFlinkKafkaSink());
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dynamicSplit(SingleOutputStreamOperator<JSONObject> eltStream,
                                                                                      SingleOutputStreamOperator<TableProcess> tableProcessStream) {
        //key: 表名_操作类型
        // 1. 创建广播流
        final MapStateDescriptor<String, TableProcess> tableProcessMapStateDescriptor =
            new MapStateDescriptor<>("tableProcessState", String.class, TableProcess.class);
        final BroadcastStream<TableProcess> tableProcessBroadcastStream = tableProcessStream
            .broadcast(tableProcessMapStateDescriptor);
        
        // 2. 用数据流去关联广播流
        
        final SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> result = eltStream
            .connect(tableProcessBroadcastStream)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(JSONObject value,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 根据这个value数据, 获取这次操作的 表和 动作, 然后从广播状态获取配置
                    final ReadOnlyBroadcastState<String, TableProcess> broadcastState = ctx
                        .getBroadcastState(tableProcessMapStateDescriptor);
                    final String tableName = value.getString("table");
                    // 当数据是maxwell初始化得到的实时type是 bootstrap-insert
                    final String operateType = value.getString("type").replaceAll("bootstrap-", "");
                    
                    // 1. 配置
                    final TableProcess tableProcess = broadcastState.get(tableName + "_" + operateType);
                    // 2. 具体的数据
                    final JSONObject data = value.getJSONObject("data");
                    // 3. 主流的数据进入kafka  侧流进 hbase
                    if (tableProcess != null) { // 如果配置表中有对应的数据才做处理
                        // 过滤掉不需要的字段
                        filterColumns(data, tableProcess.getSinkColumns());
                        final String sinkType = tableProcess.getSinkType();
                        if (TableProcess.SINK_TYPE_KAFKA.equalsIgnoreCase(sinkType)) {
                            out.collect(Tuple2.of(data, tableProcess));
                            
                        } else if (TableProcess.SINK_TYPE_HBASE.equalsIgnoreCase(sinkType)) {
                            ctx.output(hbaseTag, Tuple2.of(data, tableProcess));
                        }
                    }
                }
                
                //把data中需要的字段过滤掉
                private void filterColumns(JSONObject data,
                                           String sinkColumns) {
                    final List<String> columns = Arrays.asList(sinkColumns.split(","));
                    
                    data.entrySet().removeIf(entry -> !columns.contains(entry.getKey()));
                }
                
                @Override
                public void processBroadcastElement(TableProcess value,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 获取广播状态
                    final BroadcastState<String, TableProcess> broadcastState = ctx
                        .getBroadcastState(tableProcessMapStateDescriptor);
                    // 把配置数据放入广播状态
                    broadcastState.put(value.getSourceTable() + "_" + value.getOperateType(), value);
                }
            });
        return result;
    }
    
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> sourceStream) {
        return sourceStream
            .map(JSON::parseObject)
            .filter(obj -> obj.getString("table") != null
                && obj.getJSONObject("data") != null
                && obj.getString("data").length() > 10
            );
    }
    
    private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
        final StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        
        tenv.executeSql("CREATE TABLE `table_process`( " +
                            "   `source_table`  string, " +
                            "   `operate_type`  string, " +
                            "   `sink_type`  string, " +
                            "   `sink_table`  string, " +
                            "   `sink_columns` string, " +
                            "   `sink_pk`  string, " +
                            "   `sink_extend`  string, " +
                            "   PRIMARY KEY (`source_table`,`operate_type`)  NOT ENFORCED" +
                            ")with(" +
                            "   'connector' = 'mysql-cdc', " +
                            "   'hostname' = 'hadoop162', " +
                            "   'port' = '3306', " +
                            "   'username' = 'root', " +
                            "   'password' = 'aaaaaa', " +
                            "   'database-name' = 'gmall2021_realtime', " +
                            "   'table-name' = 'table_process'," +
                            "   'debezium.snapshot.mode' = 'initial' " +  // 读取mysql的全量,增量以及更新数据
                            ")"
        );
        
        final Table table = tenv.sqlQuery("select " +
                                              "  source_table sourceTable, " +
                                              "  sink_type sinkType, " +
                                              "  operate_type operateType, " +
                                              "  sink_table sinkTable, " +
                                              "  sink_columns sinkColumns, " +
                                              "  sink_pk sinkPk, " +
                                              "  sink_extend sinkExtend " +
                                              "from table_process "
        );
        
        return tenv
            .toRetractStream(table, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
        
    }
    
}
