package com.yifeng.repo.flink.data.transport.streaming.connectors.kafka;

import java.util.Collections;
import java.util.List;

import org.antlr.v4.runtime.CharStreams;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.tree.ParseTreeWalker;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.yifeng.repo.flink.data.transport.config.DdlConstants;
import com.yifeng.repo.flink.data.transport.dto.CdcTypeEnum;
import com.yifeng.repo.flink.data.transport.dto.DataRow;
import com.yifeng.repo.flink.data.transport.dto.DdlDataPacket;
import com.yifeng.repo.flink.data.transport.streaming.connectors.mysql.antlr.listener.MysqlDdlParserListener;
import com.yifeng.repo.flink.data.transport.streaming.connectors.oracle.antlr.listener.OracleDdlParserListener;
import com.yifeng.repo.flink.data.transport.utils.kafka.KafkaUtil;

import io.debezium.ddl.parser.mysql.generated.MySqlLexer;
import io.debezium.ddl.parser.mysql.generated.MySqlParser;
import io.debezium.ddl.parser.oracle.generated.PlSqlLexer;
import io.debezium.ddl.parser.oracle.generated.PlSqlParser;

/**
 * 增量设置不同的表到不同的Topic
 * @author lijing
 * @since 2023-01-29
 *
 */
public class IncreKafkaOracleSerializationSchema implements KafkaSerializationSchema<String> {
	private static final long serialVersionUID = 1L;

	private static final Logger LOG = LoggerFactory.getLogger(IncreKafkaOracleSerializationSchema.class);
	
	private final int partitionSize;
	
	private final String topic;
	
	public IncreKafkaOracleSerializationSchema(int partitionSize,String topic) {
		this.partitionSize = partitionSize;
		this.topic = topic;
	}
	
	@Override
	public void open(SerializationSchema.InitializationContext context) {
	}
	
	@SuppressWarnings("unchecked")
	@Override
	public ProducerRecord<byte[], byte[]> serialize(String data, Long timestamp) {
		 //根据op判断是DDL数据还是DML数据
		 DataRow dataRow = JSONObject.parseObject(data, DataRow.class);
		 if(DdlConstants.SCHEMA_CHANGE_OP_TYPE.equals(dataRow.getOp().name())) {
			 return ddlSerialize(data,timestamp);
		 }
		 int partition = KafkaUtil.choosePartition(dataRow.getSchema().trim() + dataRow.getTb().trim(),partitionSize);
		 List<DataRow> rows = Collections.singletonList(dataRow);
		 String value = JSONObject.toJSONString(rows,
	                SerializerFeature.WriteNullStringAsEmpty,
	                SerializerFeature.WriteNullListAsEmpty,
	                SerializerFeature.WriteMapNullValue);
	     return new ProducerRecord<byte[], byte[]>(topic,partition,null,value.getBytes());
	}
	
	/**
	 * ddl的消息序列化方法
	 * @param data ddl消息数据
	 * @param timestamp 时间戳
	 * @return kakfa消息
	 */
	private ProducerRecord<byte[], byte[]> ddlSerialize(String data, Long timestamp) {
		DdlDataPacket ddlDataPacket = JSONObject.parseObject(data, DdlDataPacket.class);
		int partition = KafkaUtil.choosePartition(ddlDataPacket.getSchema().trim() + ddlDataPacket.getTb().trim(),partitionSize);
		List<DdlDataPacket> rows = Collections.singletonList(ddlDataPacket);
		String value = JSONObject.toJSONString(rows,
	                SerializerFeature.WriteNullStringAsEmpty,
	                SerializerFeature.WriteNullListAsEmpty,
	                SerializerFeature.WriteMapNullValue);
	    return new ProducerRecord<byte[], byte[]>(topic,partition,null,value.getBytes());
	}
	
}
