package com.atguigu.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseApp;
import com.atguigu.realtime.bean.TableProcess;
import com.atguigu.realtime.util.MyKafkaUtil;
import com.atguigu.realtime.util.MyRedisUtil;
import org.apache.flink.api.common.state.*;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import redis.clients.jedis.Jedis;

import java.io.IOException;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/4/17 14:11
 */
public class DWDDbApp extends BaseApp implements Serializable {
    
    private OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {};
    ;
    
    public static void main(String[] args) {
        new DWDDbApp().init(2001, 2, "DWDDbApp", "DWDDbApp", "ods_db");
        
    }
    
    
    @Override
    protected void run(StreamExecutionEnvironment env,
                       DataStreamSource<String> sourceStream) {
        // 1. 读取配置表
        SingleOutputStreamOperator<TableProcess> tableProcessStream = readProcessTable(env);
        
        // 2. sourceStream 是数据表(事实表, 维度表) 做etl
        SingleOutputStreamOperator<JSONObject> eltDataStream = etlDataSteam(sourceStream);
        
        // 3. 动态分流
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = dynamicSplit(eltDataStream, tableProcessStream);
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        
        // 4. 把sink到该去的地方
        // 4.1 sink到kafka
        sink2Kafka(kafkaStream);
        // 4.2 sink掉hbase
        sink2Hbase(hbaseStream);
        
    }
    
    private void sink2Hbase(DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream) {
        /*
        通过Phoenix写入数据到hbase  sql
            1. 在Phoenix给字段设置类型的时候, 最好统统的用varchar
            2. upsert ....
         */
        hbaseStream
            .keyBy(t -> t.f1.getSinkTable()) // 同一张表的数据, 进入同一个并行度
            .addSink(new RichSinkFunction<Tuple2<JSONObject, TableProcess>>() {
                
                private ValueState<Boolean> createTableState;
                private Connection conn;
                
                @Override
                public void open(Configuration parameters) throws Exception {
                    // 建立到Phoenix连接(hbase)
                    // jdbc连接能不能放在状态中?  不能放入状态(不支持序列化)  // jdbc:mysql:
                    String phoenixUrl = "jdbc:phoenix:hadoop162,hadoop163,hadoop164:2181";
                    // 加载驱动.  有些情况下对一些常用的驱动会根据url自动加载
                    Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
                    conn = DriverManager.getConnection(phoenixUrl);
                    
                    createTableState = getRuntimeContext().getState(new ValueStateDescriptor<Boolean>("createTableState", Boolean.class));
                }
                
                @Override
                public void invoke(Tuple2<JSONObject, TableProcess> ele,
                                   Context context) throws Exception {
                    // 流中元素进来一个调用一次
                    // 建表: 表如何存在就不建, 不存在就建
                    checkTable(ele);
                    // 写入
                    write2HBase(ele);
                    
                    // 新增: 如果该条数据是update, 则删除redis中对应的缓存
                    // key 表名:id
                    delCache(ele);
                    
                }
                
                private void delCache(Tuple2<JSONObject, TableProcess> ele) {
                    
                    JSONObject dimObj = ele.f0;
                    TableProcess tp = ele.f1;
                    
                    if (tp.getOperateType().equals("update")) {
                        String table = tp.getSinkTable();
                        String id = dimObj.getString("id");
                        Jedis client = MyRedisUtil.getClient();
                        client.del(table + ":" + id);
                        client.close();
                        System.out.println("删除缓存: " + table + "  id: " + id);
                    }
                    
                }
                
                private void write2HBase(Tuple2<JSONObject, TableProcess> ele) throws SQLException {
                    // 拼接写入Phoenix的seql语句
                    /*
                        upsert into user(id, name) values(?, ?)
                        
                      
                     */
                    JSONObject data = ele.f0;
                    TableProcess tp = ele.f1;
                    
                    StringBuilder sb = new StringBuilder();
                    sb
                        .append("upsert into ")
                        .append(tp.getSinkTable())
                        .append("(");
                    //  1. 追加字段的名
                    for (String c : tp.getSinkColumns().split(",")) {
                        sb.append(c).append(",");
                    }
                    sb.deleteCharAt(sb.length() - 1); // 去掉最后一个逗号
                    sb.append(")values(");
                    
                    //  2. 追加问号
                    for (String c : tp.getSinkColumns().split(",")) {
                        sb.append("?,");
                    }
                    sb.deleteCharAt(sb.length() - 1);
                    
                    sb.append(")");
                    
                    PreparedStatement ps = conn.prepareStatement(sb.toString());
                    System.out.println(sb.toString());
                    //  3. 给占位符设置值
                    
                    String[] cs = tp.getSinkColumns().split(",");
                    for (int i = 0; i < cs.length; i++) {
                        String value = data.getString(cs[i]);
                        ps.setString(i + 1, value);
                    }
                    ps.execute();
                    conn.commit();
                    ps.close();
                }
                
                private void checkTable(Tuple2<JSONObject, TableProcess> ele) throws SQLException, IOException {
                    // create table if not exist dim_user(
                    // id varchar,
                    // name varchar,
                    // age varchar,
                    // constraint primary key(id,name)) SALT_BUCKETS = 3;  // 盐表
                    
                    if (createTableState.value() == null) {
                        TableProcess tp = ele.f1;
                        StringBuilder sb = new StringBuilder("create table if not exists ");
                        sb
                            .append(tp.getSinkTable())
                            .append("(");
                        for (String c : tp.getSinkColumns().split(",")) {
                            sb
                                .append(c)
                                .append(" varchar, ");
                        }
                        sb
                            .append("constraint pk primary key(")
                            .append(tp.getSinkPk() == null || tp.getSinkPk().length() == 0 ? "id" : tp.getSinkPk()) // 有些表没有主键
                            .append("))")
                            .append(tp.getSinkExtend() == null || tp.getSinkExtend().length() == 0 ? "" : tp.getSinkExtend());
                        
                        System.out.println(sb.toString());
                        PreparedStatement ps = conn.prepareStatement(sb.toString());
                        ps.execute();
                        conn.commit();
                        ps.close();
                        
                        createTableState.update(true);
                        
                    }
                    
                }
                
                @Override
                public void close() throws Exception {
                    if (conn != null) {
                        conn.close();
                    }
                }
            });
    }
    
    private void sink2Kafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream) {
        // 2 -> two->to sinkToKafka  log4j 4 four for  i18n  国际化
        kafkaStream.addSink(MyKafkaUtil.getKafkaSink());
        
    }
    
    // 动态分流
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dynamicSplit(SingleOutputStreamOperator<JSONObject> eltDataStream,
                                                                                      SingleOutputStreamOperator<TableProcess> tableProcessStream) {
        
        // 1. 把配置表做成广播流
        // key = "user_info:insert" value: TableProcess
        MapStateDescriptor<String, TableProcess> tableProcessDescriptor = new MapStateDescriptor<>("tableProcessState", String.class, TableProcess.class);
        BroadcastStream<TableProcess> tableProcessBS = tableProcessStream
            .broadcast(tableProcessDescriptor);
        // 2. 数据表和广播流connect
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> result = eltDataStream
            .connect(tableProcessBS)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(JSONObject value,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    
                    ReadOnlyBroadcastState<String, TableProcess> bs = ctx.getBroadcastState(tableProcessDescriptor);
                    
                    // 去Kafka的放主流  去hbase的方侧输出流
                    String tableName = value.getString("table");
                    String operatorType = value.getString("type").replaceAll("bootstrap-", "");  // bootstrap-insert ->insert
                    String key = tableName + ":" + operatorType;
                    
                    // 1. 获取这条数据的配置信息
                    TableProcess tableProcess = bs.get(key);
                    if (tableProcess != null) {
                        
                        // 获取数据真正的数据
                        JSONObject data = value.getJSONObject("data");
                        // data重点字段比较, 但是不是所有的字段都需要. 把data中的字段做些过滤, sinkColumns要什么, 就保留什么
                        filterColumns(data, tableProcess.getSinkColumns());
                        
                        String sinkType = tableProcess.getSinkType();
                        // 2. 分流
                        if (TableProcess.SINK_TYPE_KAFKA.equalsIgnoreCase(sinkType)) { // 写主流
                            out.collect(Tuple2.of(data, tableProcess));
                        } else if (TableProcess.SINK_TYPE_HBASE.equalsIgnoreCase(sinkType)) { // 写hbase
                            
                            ctx.output(hbaseTag, Tuple2.of(data, tableProcess));
                        }
                    }
                }
                
                private void filterColumns(JSONObject data, String sinkColumns) {
                    // 直接在data对象中删除字段
                    List<String> columns = Arrays.asList(sinkColumns.split(","));
                    // 返回值是true删除, false留下
                    data.entrySet().removeIf(kv -> !columns.contains(kv.getKey()));
                    
                }
                
                @Override
                public void processBroadcastElement(TableProcess value,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 处理配置表数据
                    // key = "user_info:insert" value: TableProcess
                    BroadcastState<String, TableProcess> bs = ctx.getBroadcastState(tableProcessDescriptor);
                    // 把数据存入到广播状态中
                    bs.put(value.getSourceTable() + ":" + value.getOperateType(), value);
                }
            });
        return result;
        
    }
    
    private SingleOutputStreamOperator<JSONObject> etlDataSteam(DataStreamSource<String> sourceStream) {
        return sourceStream
            .map(JSON::parseObject)
            .filter(obj -> obj.getString("database") != null
                && obj.getString("table") != null
                && obj.getString("data") != null
                && obj.getString("data").length() > 10);
    }
    
    private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
        /*
        配置表: mysql
        读法: 启动的时候读取全量, 运行的过程中要读取变化和新增
        
        flink sql cdc
        
        table API
        SQL
         */
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        tenv
            .executeSql("CREATE TABLE table_process( " +
                            "   `source_table`  string, " +
                            "   `operate_type`  string, " +
                            "   `sink_type`  string, " +
                            "   `sink_table`  string, " +
                            "   `sink_columns` string, " +
                            "   `sink_pk`  string, " +
                            "   `sink_extend`  string, " +
                            "   PRIMARY KEY (`source_table`,`operate_type`)  NOT ENFORCED" +
                            ")with(" +
                            "   'connector' = 'mysql-cdc', " +  // 可以读全量和新增
                            "   'hostname' = 'hadoop162', " +
                            "   'port' = '3306', " +
                            "   'username' = 'root', " +
                            "   'password' = 'aaaaaa', " +
                            "   'database-name' = 'gmall2021_realtime', " +
                            "   'table-name' = 'table_process'," +
                            "   'debezium.snapshot.mode' = 'initial' " +  // 读取mysql的全量 然后增量以及更新数据
                            // scan.startup.mode
                            ")");
        
        Table processTable = tenv.sqlQuery("select " +
                                               "  source_table sourceTable, " +
                                               "  sink_type sinkType, " +
                                               "  operate_type operateType, " +
                                               "  sink_table sinkTable, " +
                                               "  sink_columns sinkColumns, " +
                                               "  sink_pk sinkPk, " +
                                               "  sink_extend sinkExtend " +
                                               "from table_process");
        
        return tenv
            .toRetractStream(processTable, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
        
    }
}
/*
dwd层需要掌握的知识:
1. 消费kafka数据,设置ckeckpoint
2. 数据写入到Kafka
3. flink sql cdc
    cdc:
4. 动态分流
    广播流
    connect
    广播状态
5. 数据写入到 hbase(Phoenix)
 */
