package com.atguigu.gmall.realtime.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseApp;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.commont.Constant;
import com.atguigu.gmall.realtime.util.FlinkSinkUtil;
import com.atguigu.gmall.realtime.util.JdbcUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;

import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;


/**
 * @Author lzc
 * @Date 2023/4/19 14:19
 */
@Slf4j
public class DimApp extends BaseApp {
    public static void main(String[] args) {
        new DimApp().init(20001,
                          2,
                          "DimApp",
                          Constant.TOPIC_ODS_DB
        );
        
    }
    
    @Override
    public void handle(StreamExecutionEnvironment env,
                       DataStreamSource<String> stream) {
        // 1. 对读到数据做数据清洗
        SingleOutputStreamOperator<JSONObject> etledStream = etl(stream);
        // 2. 通过 flink cdc 读取配置表
        SingleOutputStreamOperator<TableProcess> tpStream = readTableProcess(env);
        // 3. 在 phoenix 中建表操作
        tpStream = createPhoenixTable(tpStream);
        // 4. 数据流去 connect 配置流
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream = connect(etledStream, tpStream);
        // 5. 删除不需要的列
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> resultStream = deleteNotNeedColumns(dataTpStream);
        // 6. 把数据写入到维度表中
        writeToPhoenix(resultStream);
        
        
    }
    
    private void writeToPhoenix(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> resultStream) {
        resultStream.addSink(FlinkSinkUtil.getPhoenixSink());
        /*
            1. 找官方提供的专门的 phoenix sink 来写
                没有专门 phoenix sink
            2. 能不能使用 jdbc sink ?
                只能写入到一张表中. 现在流中是有多张维度表的数据, 所以不能使用 jdbc sink
            3. 自定义 sink
            
         */
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> deleteNotNeedColumns(
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream) {
        return dataTpStream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> t) throws Exception {
                JSONObject data = t.f0;  // a b c
                List<String> columns = new ArrayList<>(Arrays.asList(t.f1.getSinkColumns().split(",")));  // a b
                columns.add("op_type");
                // 判断 data 中的 key 是否在 columns 中存在,如果存在就保留, 不存在就删除
                data.keySet().removeIf(key -> !columns.contains(key));
                return t;
            }
        });
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connect(
        SingleOutputStreamOperator<JSONObject> dataStream,
        SingleOutputStreamOperator<TableProcess> tpStream) {
        // 1. 先把配置流做成广播流
        // key: source_table:ALL   user_info:ALL
        // value: TableProcess 对象
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        BroadcastStream<TableProcess> tpBcStream = tpStream.broadcast(tpStateDesc);
        // 2. 让数据流去 connect 广播流
        return dataStream
            .connect(tpBcStream)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                // 4. 处理数据里中的数据: 从广播状态中取出对应的配置, 把数据和配置组成元组输出
                // 每来一条数据, 执行一次
                @Override
                public void processElement(JSONObject value,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    String table = value.getString("table");
                    String key = getKey(table, "ALL");
                    
                    ReadOnlyBroadcastState<String, TableProcess> state = ctx.getBroadcastState(tpStateDesc);
                    TableProcess tp = state.get(key);
                    // tp 是 null, 表示当前数据不是需要的维度信息
                    if (tp != null) {
                        
                        // 只输出 data 数据
                        JSONObject data = value.getJSONObject("data");
                        data.put("op_type", value.getString("type"));
                        out.collect(Tuple2.of(data, tp));
                    }
                }
                
                // 3. 处理广播流中的元素: 把配置信息存入到广播状态中
                // 每来一个配置信息, 这个方法执行多次: 每个并行度执行一次
                @Override
                public void processBroadcastElement(TableProcess tp,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    System.out.println(tp.getSourceTable() + "----");
                    
                    BroadcastState<String, TableProcess> state = ctx.getBroadcastState(tpStateDesc);
                    
                    String key = getKey(tp.getSourceTable(), tp.getSourceType());
                    // 如果配置信息被删了, phoenix 中的表也会被删, 则状态中的配置也要删掉,否则写入的数据的时候, 会出现表找不到
                    if ("d".equals(tp.getOp())) {
                        state.remove(key);
                    } else {
                        // 如果是 c 和 r 是新增配置, 如果是 u 是更新配置
                        state.put(key, tp);
                    }
                    
                }
                
                private String getKey(String table, String type) {
                    return table + ":" + type;
                }
                
            });
        
        
    }
    
    private SingleOutputStreamOperator<TableProcess> createPhoenixTable(
        SingleOutputStreamOperator<TableProcess> tpStream) {
        return tpStream
            .process(new ProcessFunction<TableProcess, TableProcess>() {
                
                private Connection conn;
                
                @Override
                public void open(Configuration parameters) throws Exception {
                    // 1. 建立到 phoenix 的连接对象
                    conn = JdbcUtil.getPhoenixConnection();
                }
                
                @Override
                public void close() throws Exception {
                    // 5. 关闭连接对象
                    JdbcUtil.closeConnection(conn);
                }
                
                @Override
                public void processElement(TableProcess tp,
                                           Context ctx,
                                           Collector<TableProcess> out) throws Exception {
                    String op = tp.getOp();
                    if ("r".equals(op) || "c".equals(op)) {
                        // 建表
                        createTable(tp);
                    } else if ("d".equals(op)) {
                        // 删表
                        dropTable(tp);
                    } else {
                        // 先删
                        dropTable(tp);
                        // 再建
                        createTable(tp);
                    }
                    out.collect(tp);
                    
                }
                
                private void createTable(TableProcess tp) throws SQLException {
                    // create table if not exists person(id varchar, name varchar, age varchar, constraint primary key pk(id))SALT_BUCKETS = 4
                    StringBuilder sql = new StringBuilder();
                    sql
                        .append("create table if not exists ")
                        .append(tp.getSinkTable())
                        .append("(")
                        // id,tm_name => id varchar,tm_name varchar
                        //.append(tp.getSinkColumns().replaceAll(",", " varchar,") + " varchar")
                        .append(tp.getSinkColumns().replaceAll("[^,]+", "$0 varchar"))
                        .append(", constraint pk primary key(")
                        .append(tp.getSinkPk() == null ? "id" : tp.getSinkPk())
                        .append("))")
                        .append(tp.getSinkExtend() == null ? "" : tp.getSinkExtend());  //
                    System.out.println("建表语句: " + sql);
                    executeDDLSQL(sql.toString());
                }
                
                
                private void dropTable(TableProcess tp) throws SQLException {
                    // drop table if exists person
                    String sql = "drop table if exists " + tp.getSinkTable();
                    executeDDLSQL(sql);
                }
                
                private void executeDDLSQL(String sql) throws SQLException {
                    PreparedStatement ps = conn.prepareStatement(sql);
                    ps.execute();
                    ps.close();
                }
                
            });
    }
    
    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        // useSSL=false
        Properties props = new Properties();
        props.setProperty("useSSL", "false");
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
            .hostname(Constant.MYSQL_HOST)
            .port(3306)
            .databaseList("gmall2023_config") // set captured database, If you need to synchronize the whole database, Please set tableList to ".*".
            .tableList("gmall2023_config.table_process") // set captured table 库名.表名
            .username("root")
            .password("aaaaaa")
            .jdbcProperties(props)
            .serverTimeZone("Asia/Shanghai")
            .startupOptions(StartupOptions.initial()) // 第一次启动先读取所有的数据(快照), 然后在使用 binlog 实时监控变化
            .deserializer(new JsonDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
            .build();
        
        return env
            .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "mysql-cdc")
            .map(new MapFunction<String, TableProcess>() {
                @Override
                public TableProcess map(String value) throws Exception {
                    JSONObject obj = JSON.parseObject(value);
                    String op = obj.getString("op");
                    TableProcess tp;
                    if ("d".equals(op)) {
                        tp = obj.getObject("before", TableProcess.class);
                    } else {
                        tp = obj.getObject("after", TableProcess.class);
                        
                    }
                    tp.setOp(op);
                    return tp;
                }
            })
            .filter(tp -> "dim".equals(tp.getSinkType()));  // 只过滤出维度表的配置信息
    }
    
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
        return stream
            .filter(new FilterFunction<String>() {
                @Override
                public boolean filter(String value) throws Exception {
                    // 是 json 格式  {}
                    try {
                        JSONObject obj = JSON.parseObject(value);
                        String type = obj.getString("type");
                        return "gmall2023".equals(obj.getString("database"))
                            && ("insert".equals(type) || "update".equals(type) || "delete".equals(type) || "bootstrap-insert".equals(type))
                            && null != obj.getJSONObject("data");
                        
                    } catch (Exception e) {
                        // System.out.println("数据格式有误,不是正确的 json 数据: " + value);
                        log.warn("数据格式有误,不是正确的 json 数据: " + value);
                        
                        return false;
                    }
                    
                }
            })
            .map(json -> JSON.parseObject(json.replaceAll("bootstrap-", "")));
        
    }
}
/*
 SALT_BUCKETS = 4  创建盐表(hbase中的预分区表)
    分区的边界: 使用十六进制数据
       01
       02
       03
 
 RegionServer
    表
       Region
       
  默认情况, 一张表一个 Region
  
  region 会分裂
    0.96 之前:
        当 region 到 10G 的时候, 会一分为 2
    0.96 之后:
        min(2 * 128 * n^3 M, 10G) 分裂的依据
   
  region会迁移:
    同一张表的多个 region 会分布在不同的RegionServer 上
    
  生产环境下, 一般避免 region 的分裂和迁移
  
    预分区
    
 
 
 
------
cdc 读取配置信息:
    op:  r  (读取的快照)  before=null  after 有值
    op:  c (插入数据)    before=null  after 有值
    op:  u (更新时间)    before=有值  after 有值
    更新主键: 先 d 再 c
    op:  d(删除)       before=有值  after=null



 */