package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseAppV1;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.common.Constant;
import com.atguigu.gmall.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Iterator;

/**
 * @author Archie
 * @date 2021-10-18 10:49
 * @description
 */
public class DwdDbApp extends BaseAppV1 {

	public static void main(String[] args) {
		new DwdDbApp().init(2002, 1, "DwdDbApp", "DwdDbApp", Constant.TOPIC_ODS_DB);
	}

	@Override
	protected void run(StreamExecutionEnvironment env, DataStreamSource<String> stream) {
		// 1. 对数据做 ETL
		SingleOutputStreamOperator<JSONObject> etledStream = etl(stream);
		// 2. 读取配置流
		SingleOutputStreamOperator<TableProcess> tpStream = readProcessTable(env);
		// 3. 数据流和配置流 进行 connect
		SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream = connectStream(etledStream, tpStream);
		// 4. 过滤不需要sink的字段
		SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filteredStream = filterColumns(connectedStream);
		// 5. 对数据进行动态分流
		Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> separatedStream = dynamicSplit(filteredStream);
		/*separatedStream.f0.print("kafka");
		separatedStream.f1.print("hbase");*/
		// 6. 不同的数据写入不同的sink中
		writeToKafka(separatedStream.f0);
		writeToHbase(separatedStream.f1);
	}

	/**
	 * 数据写入Hbase（借助Phoenix）
	 * @param stream
	 */
	private void writeToHbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
		// 1. 在Phoenix中建表（根据接收的未读数据，进行动态建表，当某个维度的首个数据抵达时，建表）
		// 2. 向表中写入数据（通过JDBC）
		// *. 因为我们的目标Sink需要做两件事：建表 + 写数据，而Flink提供的JDBC Sink只能满足其一，所以我们这里需要【自定义Sink】
		stream
				.keyBy(t -> t.f1.getSinkTable())
				.addSink(FlinkSinkUtil.getPhoenixSink());
	}

	/**
	 * 数据写入Kafka
	 * @param stream
	 */
	private void writeToKafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
		stream.addSink(FlinkSinkUtil.getKafkaSink());
	}

	/**
	 * 动态分流
	 * 流1 - 向Kafka写入数据
	 * 流2 - 向HBase写入数据
	 * @param filteredStream
	 * @return
	 */
	private Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplit(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filteredStream) {
		OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbaseTag"){};
		SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = filteredStream
				.keyBy(t -> t.f1.getSourceTable())
				.process(new KeyedProcessFunction<String, Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
					@Override
					public void processElement(Tuple2<JSONObject, TableProcess> value,
											   Context ctx,
											   Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
						String sinkType = value.f1.getSinkType();
						if (Constant.DWD_SINK_KAFKA.equals(sinkType)) {
							out.collect(value);
						} else if (Constant.DWD_SINK_HBASE.equals(sinkType)) {
							ctx.output(hbaseTag, value);
						}
					}
				});
		return Tuple2.of(kafkaStream, kafkaStream.getSideOutput(hbaseTag));
	}

	/**
	 * 过滤不需要sink的字段
	 * @param connectedStream
	 * @return
	 */
	private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterColumns(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream) {
		return connectedStream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
			@Override
			public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> value) throws Exception {
				JSONObject data = value.f0;
				TableProcess tp = value.f1;
				String columns = tp.getSinkColumns();
				/* 过滤掉多余字段 */
				Iterator<String> it = data.keySet().iterator();
				while (it.hasNext()) {
					String column = it.next();
					if (!columns.contains(column)) {
						it.remove();
					}
				}
				return value;
			}
		});
	}

	/**
	 * 合并双流
	 *  @param etledStream 数据流
	 * @param tpStream    配置流
	 * @return
	 */
	private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStream(SingleOutputStreamOperator<JSONObject> etledStream,
																					   SingleOutputStreamOperator<TableProcess> tpStream) {
		MapStateDescriptor<String, TableProcess> tpDesc = new MapStateDescriptor<>(
				"tpState",
				String.class,
				TableProcess.class
		);

		// 1. 将配置流做成广播流
		BroadcastStream<TableProcess> tpBDStream = tpStream.broadcast(tpDesc);

		// 2. 数据流合并广播流 & 处理数据
		return etledStream
				.keyBy(obj -> obj.getString("table")) /* 同一张表的数据进入同一组中 */
				.connect(tpBDStream)
				/*
				 * param1 - 输入流的Key类型
				 * param2 - 数据流类型
				 * param3 - 广播流类型
				 * param4 - 输出类型
				 **/
				.process(new KeyedBroadcastProcessFunction<String, JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
					/**
					 * 根据表和操作类型，从广播状态获取对应的配置数据
					 * @param value 数据流数据
					 * @param ctx
					 * @param out
					 * @throws Exception
					 */
					@Override
					public void processElement(JSONObject value,
											   ReadOnlyContext ctx,
											   Collector<Tuple2<JSONObject,
													   TableProcess>> out) throws Exception {
						ReadOnlyBroadcastState<String, TableProcess> bcState = ctx.getBroadcastState(tpDesc);
						// 从状态中取出value对应的配置信息
						String key = value.getString("table") + ":" + value.getString("type");
						TableProcess tp = bcState.get(key);
						// 某些表不需要Sink，无需处理，需要判定这里的 tp
						if (tp != null) {
							out.collect(Tuple2.of(value.getJSONObject("data"), tp));
						}
					}

					/**
					 * 将配置数据写入广播状态
					 * @param value 配置流数据
					 * @param ctx
					 * @param out
					 * @throws Exception
					 */
					@Override
					public void processBroadcastElement(TableProcess value,
														Context ctx,
														Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
						BroadcastState<String, TableProcess> bcState = ctx.getBroadcastState(tpDesc);
						bcState.put(value.getSourceTable() + ":" + value.getOperateType(), value);
					}
				});
	}

	/**
	 * 读取配置表数据
	 *
	 * @param env
	 * @return
	 */
	private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
		StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
		tenv
				.executeSql("CREATE TABLE `table_process`( " +
						"   `source_table`  string, " +
						"   `operate_type`  string, " +
						"   `sink_type`  string, " +
						"   `sink_table`  string, " +
						"   `sink_columns` string, " +
						"   `sink_pk`  string, " +
						"   `sink_extend`  string, " +
						"   PRIMARY KEY (`source_table`,`operate_type`)  NOT ENFORCED" +
						")with(" +
						"   'connector' = 'mysql-cdc', " +
						"   'hostname' = 'hadoop101', " +
						"   'port' = '3306', " +
						"   'username' = 'root', " +
						"   'password' = '123123', " +
						"   'database-name' = 'gmall2021_realtime', " +
						"   'table-name' = 'table_process'," +
						"   'debezium.snapshot.mode' = 'initial' " +  // Job启动时，读取mysql的全量(快照),增量以及更新数据
						")");

		Table table = tenv.sqlQuery("select " +
				"  source_table sourceTable, " +
				"  sink_type sinkType, " +
				"  operate_type operateType, " +
				"  sink_table sinkTable, " +
				"  sink_columns sinkColumns, " +
				"  sink_pk sinkPk, " +
				"  sink_extend sinkExtend " +
				"from table_process ");
		return tenv
				.toRetractStream(table, TableProcess.class)
				.filter(t -> t.f0)
				.map(t -> t.f1);
	}

	/**
	 * 数据ETL
	 *
	 * @param stream
	 * @return
	 */
	private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
		return stream
				.map(data -> JSON.parseObject(data.replaceAll("bootstrap-insert", "insert")))
				.filter(obj ->
						obj.getString("database") != null
								&& obj.getString("table") != null
								&& (obj.getString("data") != null && obj.getString("data").length() > 10)
								&& (obj.getString("type").equals("update") || obj.getString("type").contains("insert"))
				);
	}

}