package com.atguigu.gmall.realtime.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseApp;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.common.Constant;
import com.atguigu.gmall.realtime.util.FlinkSinUtil;
import com.atguigu.gmall.realtime.util.JdbcUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;

import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

/**
 * @Author lzc
 * @Date 2023/2/8 11:20
 */
@Slf4j
public class DimApp extends BaseApp {
    public static void main(String[] args) {
        new DimApp().init(2001, 2, "DimApp", Constant.TOPIC_ODS_DB);
        
    }
    
    @Override
    protected void handle(StreamExecutionEnvironment env,
                          DataStreamSource<String> stream) {
        // 1. 对数据进行清洗, 去除脏数据
        SingleOutputStreamOperator<JSONObject> etledStream = etl(stream);
        // 2. 读取配置信息
        SingleOutputStreamOperator<TableProcess> tpStream = readTableProcess(env);
        // 3. 在 phoenix 中建表(在广播之前建表)
        tpStream = createPhoenixTable(tpStream);
        // 4. 把业务数据和配置数据进行 connect
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream = connect(etledStream, tpStream);
        // 5. 把数据中不需要的列删除
        dataTpStream = delNotNeedColumns(dataTpStream);
        
        // 6. 把维度数据写入到 phoenix 中
        writeToPhoenix(dataTpStream);
 
        
    }
    
    private void writeToPhoenix(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream) {
        /*
        1. 考虑专用的 phoenix 连接器
            没有专门 phoenix 连接可用
            
        2. 考虑 jdbc 连接器
            理论上可以,但是咱们这里不行!!!
            
            jdbc sink 只能写入到一个表中, 我们需要同时写入到多个表, 所以不能用
            
        3. 自定义 sink
         */
    
        dataTpStream.addSink(FlinkSinUtil.getPhoenixSink());
        
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> delNotNeedColumns(
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream) {
        // ({"a": '', "b:":..}, Tp..)  =>({"a": ''}, Tp..)
        return dataTpStream
            .map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> t) throws Exception {
                    JSONObject data = t.f0;
                    List<String> columns = Arrays.asList(t.f1.getSinkColumns().split(","));
                    // 遍历 data 中的每一个 key, 如果在columns中存在就保留, 不存在就删除
                    data.keySet().removeIf(key -> !columns.contains(key) && !"operate_type".equals(key));
                    
                    return t;
                }
            });
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connect(
        SingleOutputStreamOperator<JSONObject> dataStream,
        SingleOutputStreamOperator<TableProcess> tpStream) {
        // 1. 先把配置流做成广播流
        // key: source_table:table_name   user_info:update
        // value:  TableProcess
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        BroadcastStream<TableProcess> tpBcStream = tpStream.broadcast(tpStateDesc);
        // 2. 数据流去 connect 广播流
        BroadcastConnectedStream<JSONObject, TableProcess> dataTpStream = dataStream.connect(tpBcStream);
        
        
        return dataTpStream
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                // 4. 处理数据流中的数据(维度数据后): 从广播中状态对找到每条数据对应的配置信息.
                // 数据流中的数据, 每来一条, 会执行一次
                @Override
                public void processElement(JSONObject value,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    ReadOnlyBroadcastState<String, TableProcess> state = ctx.getBroadcastState(tpStateDesc);
                    String table = value.getString("table");
                    String type = "ALL";
                    String key = getStateKey(table, type);
                    
                    TableProcess tp = state.get(key);
                    if (tp != null) { // 这条数据有对应的配置信息
                        JSONObject data = value.getJSONObject("data");
                        // 后面有用
                        data.put("operate_type", value.getString("type"));
                        out.collect(Tuple2.of(data, tp));
                    }
                }
                
                // 3. 处理广播流的数据: 把配置信息放入到广播状态中
                // 每来一条配置信息, 则每个并行度执行一次这个方法
                @Override
                public void processBroadcastElement(TableProcess tp,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 获取到广播状态
                    BroadcastState<String, TableProcess> state = ctx.getBroadcastState(tpStateDesc);
                    // 把配置信息存入到广播状态
                    String key = getStateKey(tp.getSourceTable(), tp.getSourceType());
                    
                    // 如果 op=d, 把配置从广播状态中删除
                    // 其他的是更新或者新增
                    if ("d".equals(tp.getOp_type())) {
                        state.remove(key);
                    } else {
                        state.put(key, tp);
                    }
                }
                
                private String getStateKey(String table, String type) {
                    return table + ":" + type;
                }
            });
        
        
    }
    
    private SingleOutputStreamOperator<TableProcess> createPhoenixTable(
        SingleOutputStreamOperator<TableProcess> tpStream) {
        // 每读到一条配置信息就需要在 phoenix 中建表或者删表
        return tpStream
            .filter(tp -> "dim".equals(tp.getSinkType()))  // 只过滤出维度数据的配置信息
            .map(new RichMapFunction<TableProcess, TableProcess>() {
                // ctrl + shift + f
                private Connection conn;
                
                @Override
                public void open(Configuration parameters) throws Exception {
                    // 1. 获取 jdbc 连接
                    conn = JdbcUtil.getPhoenixConnection();
                }
                
                @Override
                public void close() throws Exception {
                    // 6. 关闭连接
                    JdbcUtil.closeConnection(conn);
                }
                
                @Override
                public TableProcess map(TableProcess tp) throws Exception {
                    String op = tp.getOp_type();
                    if ("d".equals(op)) {
                        // 删表
                        dropTable(tp);
                    } else if ("u".equals(op)) {
                        // 删表
                        dropTable(tp);
                        // 建表
                        createTable(tp);
                    } else {
                        // 建表
                        createTable(tp);
                    }
                    
                    return tp;
                }
                
                // 建表
                private void createTable(TableProcess tp) throws SQLException {
                    // create table if not exists
                    // user(id varchar , age varchar, sex varchar, constraint pk primary key (id,name)) SALT_BUCKETS = 3;
                    StringBuilder sql = new StringBuilder();
                    sql
                        .append("create table if not exists ")
                        .append(tp.getSinkTable())
                        .append("(")
                        // a,aa,bb  => a varchar, aa varchar, bb varchar
                        // 拼接字段
                        .append(tp.getSinkColumns().replaceAll("[^,]+", "$0 varchar"))
                        .append(", constraint pk primary key (")
                        .append(tp.getSinkPk() == null ? "id" : tp.getSinkPk())
                        .append("))")
                        .append(tp.getSinkExtend() == null ? "" : tp.getSinkExtend());  // ))null
                    
                    System.out.println("phoenix 建表语句: " + sql);
                    PreparedStatement ps = conn.prepareStatement(sql.toString());
                    ps.execute();
                    ps.close();
                }
                
                // 删表
                private void dropTable(TableProcess tp) throws SQLException {
                    String sql = "drop table if exists " + tp.getSinkTable();
                    System.out.println("phoenix 删表语句: " + sql);
                    PreparedStatement ps = conn.prepareStatement(sql);
                    ps.execute();
                    ps.close();
                }
            });
    }
    
    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        Properties props = new Properties();
        props.put("useSSL", "false");
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
            .hostname("hadoop162")
            .port(3306)
            .databaseList("gmall_config") // set captured database, If you need to synchronize the whole database, Please set tableList to ".*".
            .tableList("gmall_config.table_process") // set captured table
            .username("root")
            .password("aaaaaa")
            .jdbcProperties(props)
            .startupOptions(StartupOptions.initial()) // 启动读取所有数据, 然后再从 binlog 读取变化数据
            .deserializer(new JsonDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
            .build();
        
        return env
            .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "mysql-config-source")
            .map(new MapFunction<String, TableProcess>() {
                @Override
                public TableProcess map(String value) throws Exception {
                    JSONObject obj = JSON.parseObject(value);
                    // d: 需要 before
                    // c,r,u: 需要 after
                    String op = obj.getString("op");
                    
                    TableProcess tp;
                    if ("d".equals(op)) {
                        tp = obj.getObject("before", TableProcess.class);
                    } else {
                        tp = obj.getObject("after", TableProcess.class);
                    }
                    tp.setOp_type(op);
                    
                    return tp;
                }
            });
        
    }
    
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
        return stream
            .filter(new FilterFunction<String>() {
                @Override
                public boolean filter(String value) throws Exception {
                    
                    // 应该是 json 格式
                    try {
                        JSONObject obj = JSON.parseObject(value);
                        String type = obj.getString("type");
                        String data = obj.getString("data");
                        return "gmall2022".equals(obj.getString("database"))
                            && null != obj.getString("table")
                            && ("insert".equals(type) || "update".equals(type) || "bootstrap-insert".equals(type) || "delete".equals(type))
                            && null != data
                            && data.length() > 2;
                    } catch (Exception e) {
                        // 不是 json 格式
                        log.warn("你的 json 数据: " + value + "  不是正确的 json");
                        return false;
                    }
                }
            })
            .map(value -> JSON.parseObject(value.replaceAll("bootstrap-", "")));
        
    }
}
/*
 SALT_BUCKETS = 3
 盐表
 
 region server
    region
    region
 
 建一张表,一张默认只有一个 region
 
 region 的分裂:
    旧: 10G 当 region增长到 10g 会自动分裂
    新: min(10g, 2 * 128 * n^3 )?
 
 region 的迁移
    自动. 在后台某个时间进行迁移
    
生产中为了避免region 的分裂和迁移: 建预分区表




----
 目标: 从 kafka 的 ods_db 读取数据, 过滤出其中的维度数据, 然后写出到 phoenix 中
    动态分流:
        cdc 技术
        connect 和 广播状态
        
    自定义 sink:
        phoenix sink
        
-----------
cdc

    cp:   r  读取的是快照(read)
             before: null
             after: 有值
          u  before: 有值
             after: 有值(需要的)
          
          更新主键:
          先
          d:
            before: 有值
            after: null
          再
          c:
            before: null
            after: 有值
         
         删除数据:
            d
            
         
         
      r: 读快照
      c: 新增
      d: 删除
      u: 更新(更新的非主键)
   
-----
c r: 建表
d: 删表
u: 先删表再建表

 */