package com.atguigu.realtime.app.dwd;

import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseApp;
import com.atguigu.realtime.bean.TableProcess;
import com.atguigu.realtime.common.Constant;
import com.atguigu.realtime.common.ConstantTopic;
import com.atguigu.realtime.util.MyJDBCUtil;
import com.atguigu.realtime.util.MyJedisUtil;
import com.atguigu.realtime.util.MyKafkaUtil;
import org.apache.flink.api.common.state.*;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import redis.clients.jedis.Jedis;

import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;

/**
 * @ClassName: DWDdbApp
 * @Description:
 * @Author: kele
 * @Date: 2021/4/18 20:29
 *
 * 处理ods层业务数据
 * 1、读取配置表（存放哪个表应该去在哪个位置，事实表去kafka，维度表去hbase）
 * 2、通过广播流控制kafka中的表的数据去哪里
 *
 **/
public class DWDdbApp extends BaseApp {

    public static void main(String[] args) {

        //读取ods_db中的数据
        new DWDdbApp().init(20001,2,"DWDdbApp", ConstantTopic.ODS_DB,"DWDdbApp");
    }

    @Override
    protected void run(StreamExecutionEnvironment env, DataStreamSource<String> dataStreamSource) {

        /**
         * 1、读取配置表的数据
         *    配置表的数据在mysql中，通过flink提供的cdc监控mysql中的表读取
         */
        SingleOutputStreamOperator<TableProcess> tableConfig = readConfTable(env);

        //2、读取业务数据
        SingleOutputStreamOperator<JSONObject> dataStream = ETLBussinessData(dataStreamSource);

        //3、将配置表做成广播流，动态分流
        //数据的返回结果是（ （发送kafka的数据，该数据的配置表），（发送hbase的数据，该数据的配置表） ）
        Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseData = dynamicSplit(tableConfig, dataStream);

        //4、将数据分别发送至相应的地方
        send2Kafka(kafkaHbaseData.f0);
        send2Hbase(kafkaHbaseData.f1);

      //  kafkaHbaseData.f0.print("kafka");
      //  kafkaHbaseData.f1.print("hbase");
    }

    /**
     * 将发送到hbase中的数据发送
     * @param stream
     */
    private void send2Hbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {

        stream
            .keyBy(t -> t.f1.getSinkTable())
            .addSink(new RichSinkFunction<Tuple2<JSONObject, TableProcess>>() {
                private ValueState<Boolean> isCreateState;
                //jdbc的连接 由于不能实现序列化 不能放到状态中，
                private Connection connection;

                /**
                 * 连接到phoenix
                 * @param parameters
                 * @throws Exception
                 */
                @Override
                public void open(Configuration parameters) throws Exception {

                    connection = MyJDBCUtil.getConnect(Constant.PHOENIX_URL,Constant.PHOENIX_DRIVER);

                    isCreateState = getRuntimeContext().getState(new ValueStateDescriptor<>("isCreateState", Boolean.class));
                }

                //来一条数据，该方法执行一次
                @Override
                public void invoke(Tuple2<JSONObject, TableProcess> element,
                                   Context context) throws Exception {

                    //根据配置表中的信息建表
                    CreateTable(element);

                    //将数据写入到配置表中
                    Write2Hbase(element);

                    //如果该数据是更新的操作，则删除缓存在redis中的数据，
                    delRedisCache(element);
                }

                /**
                 * 删除redis中的缓存
                 * @param element
                 */
                private void delRedisCache(Tuple2<JSONObject, TableProcess> element) {

                    if ("update".equalsIgnoreCase(element.f1.getOperateType())) {

                        //redis中数据存储的key是：table:id
                        String key = element.f1.getSinkTable() +":"+ element.f0.getString("id");

                        Jedis redis = MyJedisUtil.getRedis();

                        redis.del(key.toUpperCase());
                        System.out.println("删除redis缓存："+key);
                        redis.close();
                    }
                }

                /**
                 * 将数据写入Hbase
                 * @param element
                 */
                private void Write2Hbase(Tuple2<JSONObject, TableProcess> element) throws SQLException {

                    JSONObject data = element.f0;
                    TableProcess table = element.f1;
                    String[] columns = table.getSinkColumns().split(",");

                    //upsert into xxx(字段，字段...) values(?，?...)
                    StringBuilder sql = new StringBuilder("upsert into ");

                    sql.append(table.getSinkTable()).append("(");

                    for (String column : columns) {
                        sql.append(column).append(",");
                    }
                    sql.deleteCharAt(sql.length()-1); //删除掉最后一个逗号
                    sql.append(") values(");

                    for (String column : columns) {
                        sql.append("?,");
                    }
                    sql.deleteCharAt(sql.length()-1);
                    sql.append(")");


                    PreparedStatement ps = connection.prepareStatement(sql.toString());

                    for (int i = 0; i < columns.length; i++) {

                        String value = data.getString(columns[i]);
                        ps.setString(i+1,value);
                    }

                    ps.execute();
                    connection.commit();
                    ps.close();
                }

                /**
                 * 通过phoenix根据配置表中的字段建表
                 * @param element
                 */
                private void CreateTable(Tuple2<JSONObject, TableProcess> element) throws IOException, SQLException {
                    /**建表语句
                     * create table if not exists xxx(
                     * 字段1 varchar,
                     * 字段2 varchar,
                     * contains pk primary key( xxx != null ? "id":xxx )   //添加主键
                     * )SALT_BUCKETS = xxx ? null ? "":xxx        //phoenix加盐，就是在hbase建表的时候添加多个regionserver
                     */

                    //添加状态，只创建一次表
                    if (isCreateState.value() == null) {

                        StringBuilder ctsql = new StringBuilder("create table if not exists ");

                        ctsql.append(element.f1.getSinkTable()).append("(");

                        for (String column : element.f1.getSinkColumns().split(",")) {
                            ctsql.append(column).append(" varchar, ");
                        }

                        ctsql.append("constraint pk primary key(");
                        ctsql.append(element.f1.getSinkPk() == null || element.f1.getSinkPk().length() == 0 ? "id" : element.f1.getSinkPk()).append("))");
                        ctsql.append(element.f1.getSinkExtend() == null ? "" : element.f1.getSinkExtend());

                        System.out.println(ctsql);

                        //执行建表语句
                        PreparedStatement ps = connection.prepareStatement(ctsql.toString());
                        ps.execute();
                        connection.commit();  //提交命令

                        ps.close();
                        isCreateState.update(true);
                    }
                }

                @Override
                public void close() throws Exception {
                    if (connection != null) {
                        connection.close();
                    }
                }

            });
    }

    /**
     * 将kafka中的数据发送到不同的topic中
     * @param stream
     */
    private void send2Kafka(DataStream<Tuple2<JSONObject, TableProcess>> stream) {

        stream
                .keyBy(t->t.f1.getSinkTable())
                .addSink(MyKafkaUtil.getKafkaSink());

    }

    /**
     * 将配置表和数据表进行关联，
     * 关联结果为tuple2（数据表，）
     * @param tableConfig
     * @param dataStream
     */
    private Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplit(SingleOutputStreamOperator<TableProcess> tableConfig,
                                                                                                                                            SingleOutputStreamOperator<JSONObject> dataStream) {

        //侧输出流到hbase
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {
        };
        //设置广播变量中数据的存储的格式
        MapStateDescriptor<String, TableProcess> configTableDescription = new MapStateDescriptor<>("configTableDescription", String.class, TableProcess.class);

        //1、将配置表作为广播表
        BroadcastStream<TableProcess> configStream = tableConfig.broadcast(configTableDescription);

        /**
         *2、将数据流和广播流进行关联
         *
         *  配置流：（表：类型，TableProcess）
         *  数据流connect配置流通过配置流的key进行匹配
         */

        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> process = dataStream
                .connect(configStream)
                .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {

                    /**
                     *
                     * @param value ： 存放的是数据流中的数据
                     * @param ctx
                     * @param out
                     * @throws Exception
                     */
                    @Override
                    public void processElement(JSONObject value,
                                               ReadOnlyContext ctx,
                                               Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        //获取配置流的信息
                        ReadOnlyBroadcastState<String, TableProcess> bs = ctx.getBroadcastState(configTableDescription);

                        //数据流要按照（表名：类型）配置表进行关联
                        String tableName = value.getString("table");
                        //如果是通过maxwell进行全量查询，类型会有bootstrap的前缀，是bootstrap-insert，bootstrap-update
                        String tableType = value.getString("type").replaceAll("bootstrap-", "");
                        String key = tableName + ":" + tableType;

                        TableProcess configTable = bs.get(key);

                        //如果data和config可以关联上
                        if (configTable != null) {

                            //只需要数据部分，同时根据配置表中的sink字段进行过滤。删除掉不需要的字段
                            JSONObject data = value.getJSONObject("data");

                            filterColumn(data, configTable);

                            //获取sink的位置
                            String sinkType = configTable.getSinkType();
                            // 如果kafka发到主流，hbase发送到侧输出流
                            if (TableProcess.SINK_TYPE_KAFKA.equalsIgnoreCase(sinkType)) { // 写主流
                                out.collect(Tuple2.of(data, configTable));
                            } else if (TableProcess.SINK_TYPE_HBASE.equalsIgnoreCase(sinkType)) { // 写hbase
                                ctx.output(hbaseTag, Tuple2.of(data, configTable));
                            }
                        }
                    }

                    //按照sinkcolumn过滤data中的数据
                    private void filterColumn(JSONObject data, TableProcess configTable) {

                        List<String> list = Arrays.asList(configTable.getSinkColumns().split(","));

//*********************注意：data中的数据不能通过for循环进行删除,只能通过迭代器进行remove
                        data.entrySet().removeIf(kv -> !list.contains(kv.getKey()));
                    }

                    /**
                     * 广播流进行处理
                     * @param value ： 存储的是广播流中的数据
                     * @param ctx
                     * @param out
                     * @throws Exception
                     *
                     */
                    @Override
                    public void processBroadcastElement(TableProcess value,
                                                        Context ctx,
                                                        Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {

                        BroadcastState<String, TableProcess> bs = ctx.getBroadcastState(configTableDescription);

                        /**
                         * 数据中包含  table以及type作为主键，将配置流中的数据状态存储起来
                         */
                        String key = value.getSourceTable() + ":" + value.getOperateType();
                        bs.put(key, value);

                    }
                });

        DataStream<Tuple2<JSONObject, TableProcess>> hbase = process.getSideOutput(hbaseTag);
        return Tuple2.of(process,hbase);

    }

    /**
     * 对业务数据进行清洗
     * @param dataStream
     */
    private SingleOutputStreamOperator<JSONObject> ETLBussinessData(DataStreamSource<String> dataStream) {

        return dataStream
                //先将表中转化为JSONObject
                .map(JSONObject::parseObject)
                //过滤掉 1、空的table，2、空的data，或者data < 3的无效数据
                .filter(jobj-> jobj.getString("table") != null
                        && jobj.getJSONObject("data") != null
                        && jobj.getJSONObject("data").toString().length() > 3
                );
    }


    /**
     * 1、创建表的环境。
     * 2、通过mysql-cdc读取gmall2021_realtime表
     * 3、查询gmall2021_realtime表中的数据，并将其转化为流的形式返回
     */
    private SingleOutputStreamOperator<TableProcess> readConfTable(StreamExecutionEnvironment env) {

        //1、先创建table的环境
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        //连接数据库，通过mysql-cdc的方式读取数据，获取每个表的所处位置
        tenv.executeSql("create table `table_process`(\n" +
                "   `source_table` string,  \n" +
                "   `operate_type`  string, \n" +
                "   `sink_type`  string,    \n" +
                "   `sink_table`  string,   \n" +
                "   `sink_columns` string,  \n" +
                "   `sink_pk`  string,  \n" +
                "   `sink_extend`  string,  \n" +
                "   primary key (`source_table`,`operate_type`) not enforced)  \n" +
                "   with(   \n" +
                "   'connector' = 'mysql-cdc',  \n" +                //连接方式，选择mysql-cdc的方式连接
                "   'hostname' = 'hadoop162',   \n" +
                "   'port' = '3306',    \n" +
                "   'username' = 'root',    \n" +
                "   'password' = 'aaaaaa',  \n" +
                "   'database-name' = 'gmall2021_realtime', \n" +
                "   'table-name' = 'table_process', \n" +
                "   'debezium.snapshot.mode' = 'initial'    \n" +  //    读取mysql的全量,增量以及更新数据
                ")");

        //查询数据
        Table table = tenv.sqlQuery("select source_table sourceTable,\n" +
                "sink_type sinkType,\n" +
                "operate_type operateType,\n" +
                "sink_table sinkTable,\n" +
                "sink_columns sinkColumns,\n" +
                "sink_pk sinkPk,\n" +
                "sink_extend sinkExtend\n" +
                "from table_process");

        //将表的数据转化为流，表中的数据存在增，删，所以需要使用retract的流
        return tenv
                .toRetractStream(table, TableProcess.class)
               //结果是一个二元元组，第一个是boolean类型，添加数据true，删除数据是false
                .filter(t -> t.f0)
                //不需要元组的第一排数据
                .map(t -> t.f1);
    }
}
