package com.atguigu.realtime.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseAppV1;
import com.atguigu.realtime.bean.TableProcess;
import com.atguigu.realtime.common.Constant;
import com.atguigu.realtime.util.FlinkSinkUtil;
import com.atguigu.realtime.util.JdbcUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;

import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;


/**
 * @Author lzc
 * @Date 2022/7/16 9:08
 */
public class DimApp extends BaseAppV1 {
    public static void main(String[] args) {
        new DimApp().init(
            2001,
            2,
            "DimApp",
            Constant.TOPIC_ODS_DB
        );
    }
    
    @Override
    protected void handle(StreamExecutionEnvironment env,
                          DataStreamSource<String> stream) {
        // 对流进行处理
        // 1. 对数据流中的数据做etl
        SingleOutputStreamOperator<JSONObject> etledStream = etl(stream);
        
        // 2. 读取配置表的数据
        SingleOutputStreamOperator<TableProcess> tpStream = readTableProcess(env);
        // 3. 把配置流做成广播流,  数据流和广播配置流做connect   // 4. 根据配置信息处理数据流中的数据
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream = connect(etledStream, tpStream);
        // 4. 过滤掉多余的列
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filteredStream = filterNoNeedColumns(connectedStream);
//
        // 5. 把维度数据写出到phoenix中
       writeToPhoenix(filteredStream);
    }
    
    private void writeToPhoenix(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        /*
        自定义phoenix sink
         */
        
        stream.keyBy(t -> t.f1.getSinkTable()).addSink(FlinkSinkUtil.getPhoenixSink());
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterNoNeedColumns(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
       return stream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> t) throws Exception {
                JSONObject data = t.f0;
                List<String> columns = Arrays.asList(t.f1.getSinkColumns().split(","));
    
                data.keySet().removeIf(key -> !columns.contains(key) && !key.equals("operate_type"));
                return t;
            }
        });
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connect(SingleOutputStreamOperator<JSONObject> dataStream,
                                                                                 SingleOutputStreamOperator<TableProcess> tpStream) {
        /*tpStream.map(tp -> {
            //建表
        })*/
        
        // 1. tpStream做成广播流
        /*
        key  表名(mysql中的表名: )
        value  TableProcess 这里包含了用到的所有配置
        
        数据流中的数据要获取配置信息
            根据表名找到对应的配置信息
        
         */
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        BroadcastStream<TableProcess> tpBcStream = tpStream.broadcast(tpStateDesc);
        // 2. dataStream 去 connect 广播流
       return dataStream
            .connect(tpBcStream)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                
                
                @Override
                public void processElement(JSONObject obj,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 4.处理数据流中的数据
                    // 根据表名找到对应的配置信息, 从广播状态中
                    ReadOnlyBroadcastState<String, TableProcess> state = ctx.getBroadcastState(tpStateDesc);
                    // 数据和配置组成元组, 输出
                    String table = obj.getString("table");
                    TableProcess tp = state.get(table);
    
                    if (tp != null) {
                        // 这种表是维度表, 并且在配置中有配置
                        // 输出的时候, 只输出data的内容
                        JSONObject data = obj.getJSONObject("data");
                        data.put("operate_type",obj.getString("type")); // 考虑到后面可能需要失败维度是添加还是更新操作
                        out.collect(Tuple2.of(data, tp));
                    }
                    
                }
                
                @Override
                public void processBroadcastElement(TableProcess tp,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 3.处理广播数据
                    // 3.1 先在phoenix中建立对应的表 (不是最好)
                    createTable(tp);
                    // 3.2 把配置信息放入到广播状态. 将来数据流中的数据来的时候才可以获取到广播状态中的配置信息
                    saveTpToState(tp, ctx);
                    
                }
    
                // 保存状态
                private void saveTpToState(TableProcess tp, Context ctx) throws Exception {
                    BroadcastState<String, TableProcess> state = ctx.getBroadcastState(tpStateDesc);
                    state.put(tp.getSourceTable(), tp);
                }
    
                private Connection conn;
                
                @Override
                public void open(Configuration parameters) throws Exception {
                    // 创建连接
                    conn = JdbcUtil.getPhoenixConnection();
                }
                
                @Override
                public void close() throws Exception {
                    // 关闭连接
                    JdbcUtil.closeConnection(conn);
                }
                
                private void createTable(TableProcess tp) {
                    /*
                    通过jdbc执行建表语句
                     */
                    // create table if not exists per(id varchar, age varchar,  primary key(id)) SALT_BUCKETS = 4
                    StringBuilder sql = new StringBuilder();
                    sql
                        .append("create table if not exists ")
                        .append(tp.getSinkTable())
                        .append("(")
                        .append(tp.getSinkColumns().replaceAll("[^,]+", "$0 varchar"))
                        .append(", constraint pk primary key(")
                        .append(tp.getSinkPk() == null ? "id" : tp.getSinkPk())
                        .append("))")
                        .append(tp.getSinkExtend() == null ? "": tp.getSinkExtend());  // ... null
                    
                    System.out.println("建表语句: " + sql);
                    // 预处理语句
                    PreparedStatement ps = null;
                    try {
                        ps = conn.prepareStatement(sql.toString());
                        // 给sql中的占位符赋值
                        // 执行预处理语句
                        ps.execute();
                        // 关闭预处理
                        ps.close();
                    } catch (SQLException e) {
                        e.printStackTrace();
                        throw new RuntimeException("在phoenix中建表语句执行失败, 请检查:" + sql);
                    }
                }
            });
        
        
    }
    
    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
            .hostname("hadoop162")
            .port(3306)
            .databaseList("gmall_config") // set captured database, If you need to synchronize the whole database, Please set tableList to ".*".
            .tableList("gmall_config.table_process") // set captured table
            .username("root")
            .password("aaaaaa")
            .deserializer(new JsonDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
            .build();
        // 先读取表中所有数据: 快照, 然后再从bin_log中监控变化数据
        return env
            .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "mysql-cdc-source")
            .map(new MapFunction<String, TableProcess>() {
                @Override
                public TableProcess map(String value) throws Exception {
                    JSONObject obj = JSON.parseObject(value);
                    
                    return obj.getObject("after", TableProcess.class);
                }
            });
        
        
    }
    
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
        return stream
            .filter(new FilterFunction<String>() {
                @Override
                public boolean filter(String value) throws Exception {
                    try {
                        JSONObject obj = JSON.parseObject(value);
                        return "gmall2022".equals(obj.getString("database"))
                            && (obj.getString("type").contains("insert") || "update".equals(obj.getString("type")))
                            && obj.getString("data") != null
                            && obj.getString("data").length() > 2;
                        
                    } catch (Exception e) {
                        System.out.println("json格式错误....");
                        return false;
                    }
                }
            })
            .map(JSON::parseObject);  // 转成JsonObject方便后序的处理
    }
}
/*
1. 历史原因
2. 到了kafka之后,相当于对数据做了一个备份和缓冲

------
 SALT_BUCKETS = 4
 
 盐表
 
 region server
    region
    
 表创建 : region  1个
 当region增长到一定程度之后, 会分裂
   固定 10g
   
   ...
   
   迁移
   
 ------
 
 预分区
 
 
 ---------
 
 phoenix:
    盐表
 


 */