package com.yifeng.repo.flink.data.transport.streaming.connectors.kafka;

import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.producer.ProducerRecord;

import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.yifeng.repo.flink.data.transport.dto.CdcTypeEnum;
import com.yifeng.repo.flink.data.transport.dto.DataPacket;
import com.yifeng.repo.flink.data.transport.utils.kafka.KafkaUtil;

/**
 * @author lijing
 * @since 2023-01-10
 *
 */
public class FullKafkaSerializationSchema implements KafkaSerializationSchema<DataPacket> {
	
	private static final long serialVersionUID = 1L;

	private final boolean partitionByHash;
	
	private final int partitionSize;
	
	private final String topic;
	
	private final static int PARTITION_NUM_DEFAULT = 0;
	
	public FullKafkaSerializationSchema(boolean partitionByHash,int partitionSize,String topic) {
		this.partitionByHash = partitionByHash;
		this.partitionSize = partitionSize;
		this.topic = topic;
	}
	
	@Override
	public void open(SerializationSchema.InitializationContext context) {
	}
	
	@SuppressWarnings("unchecked")
	@Override
	public ProducerRecord<byte[], byte[]> serialize(DataPacket dataPacket, Long timestamp) {
		int partition = PARTITION_NUM_DEFAULT;
		if(partitionByHash) {
			partition = KafkaUtil.choosePartition(dataPacket.getSchema().trim() + dataPacket.getTableName().trim(),partitionSize);
		} else {
			partition = dataPacket.getQueueNo()%partitionSize;
		}
		String value = JSONObject.toJSONString(dataPacket.getRows(), SerializerFeature.WriteNullStringAsEmpty,
				SerializerFeature.WriteNullListAsEmpty, SerializerFeature.WriteMapNullValue);
		return new ProducerRecord<byte[], byte[]>(topic, partition, null, value.getBytes());
	}

}
