package com.atguigu.func;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.bean.TableProcess;
import com.atguigu.utils.JdbcUtil;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;

import java.sql.Connection;
import java.sql.DriverManager;
import java.util.*;

//BaseDBApp动态分流不需要加工的事实表 需要的自定义的处理连接流实现类
public class DwdTableProcessFunction extends BroadcastProcessFunction<JSONObject, String, JSONObject>{//输出类型跟主流保持一致


    private MapStateDescriptor<String, TableProcess> mapStateDescriptor;

    private HashMap<String, TableProcess> tableProcessHashMap;

    //通过构造器将BaseDBApp的mapStateDescriptor传进来
    public DwdTableProcessFunction(MapStateDescriptor<String, TableProcess> mapStateDescriptor) {
        this.mapStateDescriptor = mapStateDescriptor;
    }

    //todo 预加载配置信息
    @Override
    public void open(Configuration parameters) throws Exception {
       tableProcessHashMap = new HashMap<>();//类型由上面private HashMap<String, TableProcess> tableProcessHashMap;决定了

        //todo 读取mysql里的配置信息，解析后存入hashmap中
        Connection conn = DriverManager.getConnection("jdbc:mysql://hadoop102:3306/gmall-220828-config?" +
                "user=root&password=123456&useUnicode=true&" +
                "characterEncoding=utf8&serverTimeZone=Asia/Shanghai&useSSL=false");

        //todo 将配置信息表查询出来并转化为tableProcess对象
        List<TableProcess> tableProcesses = JdbcUtil.queryList(
                conn,
                "select * from table_process where sink_type='dwd'",
                TableProcess.class,
                true);
        //todo 遍历数据将每条数据都存入hashmap
        for (TableProcess tableProcess : tableProcesses) {
            String key = tableProcess.getSourceTable() + "-" + tableProcess.getSourceType();
            tableProcessHashMap.put(key,tableProcess);
        }




    }

    /*
           value(flink CDC->广播流->value)，也就是flink CDC输出为：{
   "before":null,
   "after":{
       "source_table":"base_trademark",
       "source_type":"update",
       "sink_table":"dim_base_trademark",
       "sink_columns":"id,tm_name",
       "sink_pk":"id",
       "sink_extend":null
   },
   "source":{
       "version":"1.5.4.Final",
       "connector":"mysql",
       "name":"mysql_binlog_source",
       "ts_ms":1655172926148,
       "snapshot":"false",
       "db":"gmall-211227-config",
       "sequence":null,
       "table":"table_process",
       "server_id":0,
       "gtid":null,
       "file":"",
       "pos":0,
       "row":0,
       "thread":null,
       "query":null
   },
   "op":"r",
   "ts_ms":1655172926150,
   "transaction":null
}
            */
    //todo 配置流处理：
    //todo 1.解析数据为tableProcess对象
    //todo 2.将数据写入广播状态
    @Override
    public void processBroadcastElement(String value, Context ctx, Collector<JSONObject> out) throws Exception {
        JSONObject jsonObject = JSON.parseObject(value);

        //todo 当mysql里配置信息有删除操作时，我们这里也需要删除广播状态的配置信息
        BroadcastState<String, TableProcess> broadcastState = ctx.getBroadcastState(mapStateDescriptor);
        if("d".equals(jsonObject.getString("op"))){
            //如果是删除操作，广播状态也要删除，否则到时候主流才不会匹配上配置流删除的数据
            JSONObject before = JSON.parseObject(jsonObject.getString("before"));

            //todo 注意！！！ 事实表要把表名和操作类型写到一起作为key，也就是唯一标识
            String key = before.getString("source_table") +"-"+before.getString("source_type");
            broadcastState.remove(key);
            tableProcessHashMap.remove(key);
        }else {
            //如果不是删除操作的话，就要把数据解析成tableProcess对象写到广播状态里

            //todo 1.解析数据为tableProcess对象
            TableProcess tableProcess = JSON.parseObject(jsonObject.getString("after"), TableProcess.class);
            String key = tableProcess.getSourceTable() +"-"+tableProcess.getSourceType();

            //todo 2.将数据写入广播状态
            broadcastState.put(key,tableProcess);//hashmap在open方法里写入了，不用在这里写入，否则就重复了


        }


    }


    // 主流，从kafka topic_db消费的数据,kafka数据来源于maxwell读取mysql的数据
    /*
    Maxwell数据格式，也就是value:
{"database":"gmall","table":"cart_info","type":"update","ts":1592270938,"xid":13090,"xoffset":1573,"data":{"id":100924,"user_id":"93","sku_id":16,"cart_price":4488,"sku_num":1,"img_url":"http://47.93.148.192:8080/group1/M0rBHu8l-sklaALrngAAHGDqdpFtU741.jpg","sku_name":"华为 HUAWEI P40 麒麟990 5G SoC芯片 5000万30倍数字变焦 8GB+128GB亮黑色全网通5G手机","is_checked":null,"create_time":"2020-06-14 09:28:57","operate_time":null,"is_ordered":1,"order_time":"2021-10-17 09:28:58","source_type":"2401","source_id":null},"old":{"is_ordered":0,"order_time":null}}

{
    "database":"gmall","table":"cart_info","type":"update","ts":1592270938,"xid":13090,"xoffset":1573,
    "data":{//最新字段
    "id":100924,"user_id":"93","sku_id":16,"cart_price":4488,"sku_num":1
    },
    "old":{
        "is_ordered":0,"order_time":null
    }
}
     */
    //todo 主流处理:
    //todo 1.获取广播状态
    //todo 2.过滤行列
    //todo 3.补充sink_table字段
    //todo 4.将数据收集给下游
    @Override
    public void processElement(JSONObject value, ReadOnlyContext ctx, Collector<JSONObject> out) throws Exception {

        //todo 1.获取广播状态和hashmap状态
        ReadOnlyBroadcastState<String, TableProcess> broadcastState = ctx.getBroadcastState(mapStateDescriptor);
        String key = value.getString("table")+"-"+value.getString("type");

        TableProcess tableProcess = broadcastState.get(key);
        TableProcess tableProcessMap = tableProcessHashMap.get(key);

        //todo 2.过滤行列
//        String  dataJsonString = value.getString("data");
//        JSONObject data = JSON.parseObject(dataJsonString);

        if (tableProcess != null || tableProcessMap != null) {//todo 这一步已经在过滤行了，为null就表示：主流的key配置流没有，那么就不collect
            if (tableProcess == null){
                tableProcess=tableProcessMap;
            }
            //value.getJSONObject("data")取的是地址值，filterColumns返回的是void，没有返回值，由于value.get取的是地址值，地址值不变，所以是直接对value数据作了过滤
            JSONObject data = value.getJSONObject("data");
            String sinkColumns = tableProcess.getSinkColumns();
            filterColumns(data,sinkColumns);//todo 将过滤列封装成一个方法

            //todo 3.补充sink_table字段
            value.put("sink_table",tableProcess.getSinkTable());

            //todo 4.将数据输出（此时的value是过滤了行列的，并且补充了sink_table字段）
            out.collect(value);
        }


    }

    //过滤列
    private void filterColumns(JSONObject data,String sinkColumns){
        //把列名按照逗号切分，一个个放入list集合中（因为如果直接用sinkColumns.contains(entry.getKey)来判断是否包含配置信息的字段，例如1001，xx是否包含100,则会返回true，
        // 这样就不对了，所以将列名一个一个放入list集合中，）list.contains(entry.getKey)表示的就是包不包含100这个元素了
        String[] columns = sinkColumns.split(",");
        List<String> columnsList = Arrays.asList(columns);

        Set<Map.Entry<String, Object>> entries = data.entrySet();//key是列名，value是列值
        Iterator<Map.Entry<String, Object>> iterator = entries.iterator();
        while (iterator.hasNext()){
            Map.Entry<String, Object> entry = iterator.next();//遍历每一条data数据
            if(!columnsList.contains(entry.getKey())){
                //如果配置表的sinkColumns列名没有主流data里的列名，就要把主流的这个列名：列值都移除掉
                iterator.remove();
            }
        }

    }

}
