package que.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;

import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;

import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;

import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;
import que.app.AppV1;
import que.bean.TableProcess;
import que.common.Word;
import que.util.FlinkSinkUtil;
import que.util.JdbcUtil;


import java.sql.Connection;
import java.sql.PreparedStatement;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;

/**
 * @author Naruto
 * @description:
 * @Class_Name com.atguigu.realtime.app.dim.DimApp
 * @Date 2022/08/15 18:15
 */
public class DimApp extends AppV1 {

    public static void main(String[] args) {
        new DimApp().init(9000,1,"DimApp", Word.TOPIC_ODS_DB);
    }

    @Override
    protected void handle(StreamExecutionEnvironment env,
                          DataStreamSource<String> stream) {
//        stream.print();
        // todo 1. 对业务数据做过滤 ETL: 过滤不是json格式的数据和只保留insert和update的数据
        SingleOutputStreamOperator<JSONObject> etlStream = etl(stream);
//        etlStream.print();

        // todo 2. 读取配置信息(广播流数据) : 利用flink-cdc 的MysqlSource 连接mysql获取dim表的配置信息
        SingleOutputStreamOperator<TableProcess> tableStream = readTableProcess(env);

        //todo 3. 数据流和广播流做connect ：配置流(广播流)使用jdbc连接器在Phoenix中建表,后将配置流制作成广播流，connect之后数据流只保留dim表相关数据。
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream = connect(etlStream, tableStream);

        //todo 4. 过滤掉不需要的字段：dim表字段可能比mysql表字段少，去除数据流中的多余字段
//        dataTpStream.print();
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> resultStream = filterNoNeedColumns(dataTpStream);
        //todo 5. 根据不同的配置信息, 把不同的维度写入到不同的Phoenix的表中，使用druid的连接池来获取连接
        writeToPhoenix(resultStream);
    }

    private void writeToPhoenix(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        /*
            不推荐使用jdbc连接器：因为这样一个流只能写一张表，多张表的话sql不同，导致不能使用jdbc sink
        */
        stream.addSink(FlinkSinkUtil.getPhoenixSink());
    }

    /**
    * @author Naruto
    * @description 过滤出不需要的字段
    * @param
    * @return void
    */
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterNoNeedColumns(
            SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream) {
        
        return dataTpStream
                .map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {

                    @Override
                    public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> value) throws Exception {

                        //真实数据
                        JSONObject data = value.f0;
                        //表的列名集合
                        List<String> columns = Arrays.asList(value.f1.getSinkColumns().split(","));

                        Iterator<String> iterator = data.keySet().iterator();

                        //除去表中的多余字段
                        data.keySet().removeIf(key -> !columns.contains(key) && !"op_type".equals(key));
//                        while (iterator.hasNext()){
//                            String key = iterator.next();
//                            if(!data.containsKey(key)){
//                                data.remove(key);
//                            }
//                        }

                        return value;
                    }
                });
    }

    /**
    * @author Naruto
    * @description  将获取的配置流在phoenix中建表，然后 数据流connect配置流后 根据配置信息制作的广播流过滤数据（过滤掉不需要的表）
    * @param etlStream, tableStream
    * @return org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator<org.apache.flink.api.java.tuple.Tuple2<com.alibaba.fastjson.JSONObject,com.atguigu.realtime.bean.TableProcess>>
    */
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connect(SingleOutputStreamOperator<JSONObject> etlStream,
                                                                                 SingleOutputStreamOperator<TableProcess> tableStream) {



        tableStream.map(
            new RichMapFunction<TableProcess, TableProcess>() {
                private Connection conn;

                //获取 phoenix连接
                @Override
                public void open(Configuration parameters) throws Exception {
                    conn = JdbcUtil.getPhoenixConnection();
                }

                //关闭phoenix连接
                @Override
                public void close() throws Exception {
                    JdbcUtil.closeConnection(conn);
                }

                //todo 将查询到的数据建表
                @Override
                public TableProcess map(TableProcess table) throws Exception {
                    // 避免与服务器的长连接, 长时间没有使用, 服务器会自动关闭连接.
                    if(conn.isClosed()){
                        conn = JdbcUtil.getPhoenixConnection();
                    }

                    //建表语句 create table if not exists table();

                    //1. 开始拼接sql语句
                    StringBuilder sql = new StringBuilder();
                    sql
                        .append(" create table if not exists ")
                        .append(table.getSinkTable())
                        .append(" ( ")
                        // 把 id,tm_name  ------> id varchar,tm_name varchar 
                        .append(table.getSinkColumns().replaceAll("[^,]+", "$0 varchar"))
                        .append(",constraint pk primary key ( ")
                        .append(table.getSinkPk() ==null ? "id" : table.getSinkPk())
                        .append(" )) ")
                        .append(table.getSinkExtend() == null? "" : table.getSinkExtend() );

                    System.out.println("phoenix建表语句为："+sql.toString());

                    PreparedStatement ps = conn.prepareStatement(sql.toString());
                    
                    ps.execute();
                    
                    ps.close();
                    
                    return table;
                }
            }
        );

        //todo 将配置表制作成广播流
        MapStateDescriptor<String, TableProcess> tbStateDesc = new MapStateDescriptor<>("tbState", String.class, TableProcess.class);
        BroadcastStream<TableProcess> broadcastStream = tableStream.broadcast(tbStateDesc);


        //数据流connect广播流
        return etlStream.connect(broadcastStream)
                .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject,TableProcess>>() {
                    @Override
                    public void processElement(JSONObject value,
                                               ReadOnlyContext ctx,
                                               Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        //读取存入广播变量的配置信息，根据配置信息判读是否需要该数据
                        ReadOnlyBroadcastState<String, TableProcess> state = ctx.getBroadcastState(tbStateDesc);

                        String key = value.getString("table");

                        //根据表名获取配置信息
                        TableProcess tableProcess = state.get(key);

                        //如果不是维度表或者其他表，为不需要的数据
                        if(tableProcess != null){
                            //获取data中的字段信息
                            JSONObject data = value.getJSONObject("data");
                            //操作类型写到data中，后续有用
                            data.put("op_type",value.getString("type"));
                            out.collect(Tuple2.of(data,tableProcess));
                        }

                    }


                    // 将配置表放入广播变量
                    @Override
                    public void processBroadcastElement(TableProcess value,
                                                        Context ctx,
                                                        Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {

                        BroadcastState<String, TableProcess> broadcastState = ctx.getBroadcastState(tbStateDesc);

                        String key = value.getSourceTable();
                        //todo 将配置表放入广播变量
                        broadcastState.put(key,value);

                    }
                });
    }

    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        //获取MysqlSource
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                .databaseList("edu_config")
                .tableList("edu_config.table_process")
                .username("root")
                .password("aaaaaa")
                //反序列化器
                .deserializer(new JsonDebeziumDeserializationSchema())
                .build();

//        {"before":null,"after":{"source_table":"base_region","sink_table":"dim_base_region","sink_columns":"id,region_name","sink_pk":null,"sink_extend":null},"source":{"version":"1.5.4.Final","connector":"mysql","name":"mysql_binlog_source","ts_ms":0,"snapshot":"false","db":"gmall_config","sequence":null,"table":"table_process","server_id":0,"gtid":null,"file":"","pos":0,"row":0,"thread":null,"query":null},"op":"r","ts_ms":1660640415619,"transaction":null}

        //查询出的数据要的是after里面的数据
        return env
                .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(),"MySql Source")
                .map(str ->{

                    JSONObject json = JSON.parseObject(str);
                    return json.getObject("after", TableProcess.class);
                });

    }

    /**
    * @author Naruto
    * @description 拦截过滤掉异常数据，只保留新增和更新数据
    * @param stream
    * @return org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator<com.alibaba.fastjson.JSONObject>
    */
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {

        return stream.filter(str ->{
            try {

                //第一次maxwell全量时，会出现bootstrap- 符号
                JSONObject json = JSON.parseObject(str.replaceAll("bootstrap-",""));

                return "edu".equals(json.getString("database"))
                        && ( "insert".equals(json.getString("type")) || "update".equals(json.getString("type"))) //只要新增和更新的数据
                        && json.get("data") != null  //数据不能为空
                        && json.getString("data").length() > 2;


            } catch (Exception e) {
                System.out.println("数据不是json格式： "+ str);
                return false;
            }
        })
//        .map(JSONObject::parseObject);
        .map(str ->{
            JSONObject json = JSON.parseObject(str.replaceAll("bootstrap-",""));
            return json;
        });
    }
}
/*
https://developer.aliyun.com/article/777502
https://github.com/ververica/flink-cdc-connectors


 SALT_BUCKETS = 4
 盐表

 ------------
 regionserver
 region
  数据

默认情况 建表一张表只有一个region

当region膨胀一定程度, 会自动分裂

    旧: 10G 一分为2

    新: ...

    hadoop162
     r1  r2

   自动迁移
    r2 迁移到163

 -----
 避免分裂和迁移: 预分区


 --------

 Phoenix建表: 如何创建带有预分区的表?







 */