package com.fanxl.flink.project.udf;

import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerRecord;

import java.io.IOException;
import java.nio.charset.StandardCharsets;

/**
 * @description:
 * @author: fanxiaole
 * @date: 2022/3/20 23:14
 */
public class AccessKafkaDeserializationSchema implements KafkaRecordDeserializationSchema<Tuple2<String, String>> {

    @Override
    public void open(DeserializationSchema.InitializationContext context) throws Exception {
        KafkaRecordDeserializationSchema.super.open(context);
    }

    @Override
    public void deserialize(ConsumerRecord<byte[], byte[]> record, Collector<Tuple2<String, String>> out) throws IOException {
        String topic = record.topic();
        int partition = record.partition();
        long offset = record.offset();
        String id = topic + "_" + partition + "_" + offset;
        out.collect(Tuple2.of(id, new String(record.value(), StandardCharsets.UTF_8)));
    }

    @Override
    public TypeInformation<Tuple2<String, String>> getProducedType() {
        return TypeInformation.of(new TypeHint<Tuple2<String, String>>() {});
    }
}
