package com.atguigu.gmall.realtime.app.func;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.beans.TableProcess;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;

import java.util.Collections;
import java.util.HashSet;


////泛型：1 主流的元素类型 2 广播流的元素类型 3 处理后的元素类型
public class TableProcessFunction extends BroadcastProcessFunction<JSONObject, String, JSONObject> {

    MapStateDescriptor<String, TableProcess> mapStateDescriptor=null;

    HashSet<String>  colSet=null;

    @Override
    public void open(Configuration parameters) throws Exception {
        colSet=new HashSet<>();
    }

    public TableProcessFunction(MapStateDescriptor<String, TableProcess> mapStateDescriptor){
        this.mapStateDescriptor=mapStateDescriptor;
    }


    //从状态中读取维度配置信息 ，对主流的数据进行处理
    //1   过滤非维度数据
    //
    //2  根据维度配置字段的定义 对原始数据进行剪裁
    //
    //3  标识该维度数据应该保存到哪张维度表
    @Override
    public void processElement(JSONObject jsonObject, ReadOnlyContext ctx, Collector<JSONObject> out) throws Exception {

        ReadOnlyBroadcastState<String, TableProcess> tableProcessReadOnlyBroadcastState = ctx.getBroadcastState(mapStateDescriptor);
        String tableName = jsonObject.getString("table");
        JSONObject dataJsonObj = jsonObject.getJSONObject("data");
        //1   过滤非维度数据
        TableProcess tableProcess = tableProcessReadOnlyBroadcastState.get(tableName);
        if(tableProcess!=null){ //如果能够查到维度配置信息，说明是维度表，否则事实表
          //  System.out.println("kafka的维度数据：" + jsonObject);
         //   System.out.println("kafka的维度配置信息：" + tableProcess);
            //2  根据维度配置字段的定义 对原始数据进行剪裁
            String sinkColumns = tableProcess.getSinkColumns();
            String[] columns = StringUtils.split(sinkColumns, ",");

            Collections.addAll(colSet,columns);

            //如果数据中的key不在集合中，则删除
            dataJsonObj.entrySet().removeIf(entry ->  !colSet.contains(entry.getKey())  );
            //3  标识该维度数据应该保存到哪张维度表
            jsonObject.put("sink_table",tableProcess.getSinkTable());
            //4 放入收集器，保留在主流中
            out.collect(jsonObject);
            colSet.clear();
        }else{
           // System.out.println("kafka的事实数据：" + jsonObject);
        }


    }

    //根据广播流的数据，写入状态
    @Override
    public void processBroadcastElement(String tableProcessJsonString, Context ctx, Collector<JSONObject> out) throws Exception {
        // System.out.println("广播流的维度配置数据：" + tableProcessJsonString);
        JSONObject tableProcessJsonObj = JSON.parseObject(tableProcessJsonString);
        String crudOptType = tableProcessJsonObj.getString("op"); //c: create ,r: retrieve   ,u: upate ,d: delete
         //查询初始化快照 op 是 r 操作
        TableProcess afterTableProcess = tableProcessJsonObj.getObject("after", TableProcess.class);

        //从context获得状态
        BroadcastState<String, TableProcess> tableProcessState = ctx.getBroadcastState(mapStateDescriptor);

        //key哪来  value哪来 往状态里写入
        tableProcessState.put(afterTableProcess.getSourceTable() ,afterTableProcess);

        // 自动建表  由于此处为写入状态的方法 每个并行度都会执行一次 ， 所以为了避免重复执行建表 增加字段等操作，从此处移到CheckTableFunction
        // autoCreateTable(afterTableProcess);

        //当维度配置信息删除某定义时 ，把该表的配置信息从状态中删除
        if(crudOptType.equals("d")){
            TableProcess beforeTableProcess = tableProcessJsonObj.getObject("before", TableProcess.class);
            tableProcessState.remove(beforeTableProcess.getSourceTable());
        }


    }



}
