package io.debezium.connector.postgresql.connection.pgoutput;

import com.huawei.gaussdb.jdbc.replication.fluent.logical.ChainedLogicalStreamBuilder;
import io.debezium.connector.postgresql.PostgresStreamingChangeEventSource;
import io.debezium.connector.postgresql.PostgresType;
import io.debezium.connector.postgresql.TypeRegistry;
import io.debezium.connector.postgresql.UnchangedToastedReplicationMessageColumn;
import io.debezium.connector.postgresql.connection.AbstractMessageDecoder;
import io.debezium.connector.postgresql.connection.AbstractReplicationMessageColumn;
import io.debezium.connector.postgresql.connection.GaussDBConnection;
import io.debezium.connector.postgresql.connection.LogicalDecodingMessage;
import io.debezium.connector.postgresql.connection.Lsn;
import io.debezium.connector.postgresql.connection.MessageDecoderContext;
import io.debezium.connector.postgresql.connection.ReplicationMessage;
import io.debezium.connector.postgresql.connection.ReplicationStream;
import io.debezium.connector.postgresql.connection.TransactionMessage;
import io.debezium.connector.postgresql.connection.WalPositionLocator;
import io.debezium.data.Envelope.Operation;
import io.debezium.relational.Column;
import io.debezium.relational.ColumnEditor;
import io.debezium.relational.Table;
import io.debezium.relational.TableId;
import io.debezium.util.HexConverter;
import io.debezium.util.Strings;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneOffset;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;

public class PgOutputMessageDecoder extends AbstractMessageDecoder {
    private static final Logger LOGGER = LoggerFactory.getLogger(PgOutputMessageDecoder.class);
    private static final Instant PG_EPOCH;
    private static final byte SPACE = 32;
    private final MessageDecoderContext decoderContext;
    private final GaussDBConnection connection;
    private Instant commitTimestamp;
    private Long transactionId;

    public PgOutputMessageDecoder(
            MessageDecoderContext decoderContext, GaussDBConnection connection) {
        this.decoderContext = decoderContext;
        this.connection = connection;
    }

    public boolean shouldMessageBeSkipped(
            ByteBuffer buffer, Lsn lastReceivedLsn, Lsn startLsn, WalPositionLocator walPosition) {
        int position = buffer.position();

        try {
            MessageType type = PgOutputMessageDecoder.MessageType.forType((char) buffer.get());
            LOGGER.trace("Message Type: {}", type);
            boolean candidateForSkipping =
                    super.shouldMessageBeSkipped(buffer, lastReceivedLsn, startLsn, walPosition);
            boolean var8;
            switch (type) {
                case COMMIT:
                case BEGIN:
                case RELATION:
                    LOGGER.trace("{} messages are always reprocessed", type);
                    var8 = false;
                    return var8;
                default:
                    var8 = candidateForSkipping;
                    return var8;
            }
        } finally {
            buffer.position(position);
        }
    }

    public void processNotEmptyMessage(
            ByteBuffer buffer,
            ReplicationStream.ReplicationMessageProcessor processor,
            TypeRegistry typeRegistry)
            throws SQLException, InterruptedException {
        if (LOGGER.isTraceEnabled()) {
            if (!buffer.hasArray()) {
                throw new IllegalStateException(
                        "Invalid buffer received from PG server during streaming replication");
            }

            byte[] source = buffer.array();
            byte[] content = Arrays.copyOfRange(source, buffer.arrayOffset(), source.length + 2);
            int lastPos = content.length - 1;
            content[lastPos - 1] = 32;
            content[lastPos] = 32;
            LOGGER.trace(
                    "Message arrived from database {}", HexConverter.convertToHexString(content));
        }

        MessageType messageType = PgOutputMessageDecoder.MessageType.forType((char) buffer.get());
        switch (messageType) {
            case COMMIT:
                this.handleCommitMessage(buffer, processor);
                break;
            case BEGIN:
                this.handleBeginMessage(buffer, processor);
                break;
            case RELATION:
                this.handleRelationMessage(buffer, typeRegistry);
                break;
            case LOGICAL_DECODING_MESSAGE:
                this.handleLogicalDecodingMessage(buffer, processor);
                break;
            case INSERT:
                this.decodeInsert(buffer, typeRegistry, processor);
                break;
            case UPDATE:
                this.decodeUpdate(buffer, typeRegistry, processor);
                break;
            case DELETE:
                this.decodeDelete(buffer, typeRegistry, processor);
                break;
            case TRUNCATE:
                if (this.isTruncateEventsIncluded()) {
                    this.decodeTruncate(buffer, typeRegistry, processor);
                } else {
                    LOGGER.trace("Message Type {} skipped, not processed.", messageType);
                }
                break;
            default:
                LOGGER.trace("Message Type {} skipped, not processed.", messageType);
        }
    }

    public ChainedLogicalStreamBuilder optionsWithMetadata(
            ChainedLogicalStreamBuilder builder,
            Function<Integer, Boolean> hasMinimumServerVersion) {
        builder =
                builder.withSlotOption("proto_version", 1)
                        .withSlotOption(
                                "publication_names",
                                this.decoderContext.getConfig().publicationName());
        if ((Boolean) hasMinimumServerVersion.apply(140000)) {
            builder = builder.withSlotOption("messages", true);
        }

        return builder;
    }

    public ChainedLogicalStreamBuilder optionsWithoutMetadata(
            ChainedLogicalStreamBuilder builder,
            Function<Integer, Boolean> hasMinimumServerVersion) {
        return builder;
    }

    private boolean isTruncateEventsIncluded() {
        return !this.decoderContext.getConfig().getSkippedOperations().contains(Operation.TRUNCATE);
    }

    private void handleBeginMessage(
            ByteBuffer buffer, ReplicationStream.ReplicationMessageProcessor processor)
            throws SQLException, InterruptedException {
        Lsn lsn = Lsn.valueOf(buffer.getLong());
        this.commitTimestamp = PG_EPOCH.plus(buffer.getLong(), ChronoUnit.MICROS);
        this.transactionId = Integer.toUnsignedLong(buffer.getInt());
        LOGGER.trace("Event: {}", PgOutputMessageDecoder.MessageType.BEGIN);
        LOGGER.trace("Final LSN of transaction: {}", lsn);
        LOGGER.trace("Commit timestamp of transaction: {}", this.commitTimestamp);
        LOGGER.trace("XID of transaction: {}", this.transactionId);
        processor.process(
                new TransactionMessage(
                        io.debezium.connector.postgresql.connection.ReplicationMessage.Operation
                                .BEGIN,
                        this.transactionId,
                        this.commitTimestamp));
    }

    private void handleCommitMessage(
            ByteBuffer buffer, ReplicationStream.ReplicationMessageProcessor processor)
            throws SQLException, InterruptedException {
        int flags = buffer.get();
        Lsn lsn = Lsn.valueOf(buffer.getLong());
        Lsn endLsn = Lsn.valueOf(buffer.getLong());
        Instant commitTimestamp = PG_EPOCH.plus(buffer.getLong(), ChronoUnit.MICROS);
        LOGGER.trace("Event: {}", PgOutputMessageDecoder.MessageType.COMMIT);
        LOGGER.trace("Flags: {} (currently unused and most likely 0)", Integer.valueOf(flags));
        LOGGER.trace("Commit LSN: {}", lsn);
        LOGGER.trace("End LSN of transaction: {}", endLsn);
        LOGGER.trace("Commit timestamp of transaction: {}", commitTimestamp);
        processor.process(
                new TransactionMessage(
                        io.debezium.connector.postgresql.connection.ReplicationMessage.Operation
                                .COMMIT,
                        this.transactionId,
                        commitTimestamp));
    }

    private void handleRelationMessage(ByteBuffer buffer, TypeRegistry typeRegistry)
            throws SQLException {
        int relationId = buffer.getInt();
        String schemaName = readString(buffer);
        String tableName = readString(buffer);
        int replicaIdentityId = buffer.get();
        short columnCount = buffer.getShort();
        LOGGER.trace(
                "Event: {}, RelationId: {}, Replica Identity: {}, Columns: {}",
                new Object[] {
                    PgOutputMessageDecoder.MessageType.RELATION,
                    relationId,
                    Integer.valueOf(replicaIdentityId),
                    columnCount
                });
        LOGGER.trace("Schema: '{}', Table: '{}'", schemaName, tableName);
        DatabaseMetaData databaseMetadata = this.connection.connection().getMetaData();
        TableId tableId = new TableId((String) null, schemaName, tableName);
        List<Column> readColumns =
                this.getTableColumnsFromDatabase(this.connection, databaseMetadata, tableId);
        Map<String, Optional<String>> columnDefaults =
                (Map)
                        readColumns.stream()
                                .filter(Column::hasDefaultValue)
                                .collect(
                                        Collectors.toMap(
                                                Column::name, Column::defaultValueExpression));
        Map<String, Boolean> columnOptionality =
                (Map)
                        readColumns.stream()
                                .collect(Collectors.toMap(Column::name, Column::isOptional));
        List<String> primaryKeyColumns =
                this.connection.readPrimaryKeyNames(databaseMetadata, tableId);
        if (primaryKeyColumns == null || primaryKeyColumns.isEmpty()) {
            LOGGER.warn(
                    "Primary keys are not defined for table '{}', defaulting to unique indices",
                    tableName);
            primaryKeyColumns = this.connection.readTableUniqueIndices(databaseMetadata, tableId);
        }

        List<ColumnMetaData> columns = new ArrayList();
        Set<String> columnNames = new HashSet();

        for (short i = 0; i < columnCount; ++i) {
            byte flags = buffer.get();
            String columnName = Strings.unquoteIdentifierPart(readString(buffer));
            int columnType = buffer.getInt();
            int attypmod = buffer.getInt();
            PostgresType postgresType = typeRegistry.get(columnType);
            boolean key =
                    this.isColumnInPrimaryKey(schemaName, tableName, columnName, primaryKeyColumns);
            Boolean optional = (Boolean) columnOptionality.get(columnName);
            if (optional == null) {
                LOGGER.warn(
                        "Column '{}' optionality could not be determined, defaulting to true",
                        columnName);
                optional = true;
            }

            boolean hasDefault = columnDefaults.containsKey(columnName);
            String defaultValueExpression =
                    (String)
                            ((Optional) columnDefaults.getOrDefault(columnName, Optional.empty()))
                                    .orElse((Object) null);
            columns.add(
                    new ColumnMetaData(
                            columnName,
                            postgresType,
                            key,
                            optional,
                            hasDefault,
                            defaultValueExpression,
                            attypmod));
            columnNames.add(columnName);
        }

        primaryKeyColumns.retainAll(columnNames);
        Table table =
                this.resolveRelationFromMetadata(
                        new PgOutputRelationMetaData(
                                relationId, schemaName, tableName, columns, primaryKeyColumns));
        this.decoderContext.getSchema().applySchemaChangesForTable(relationId, table);
    }

    private List<Column> getTableColumnsFromDatabase(
            GaussDBConnection connection, DatabaseMetaData databaseMetadata, TableId tableId)
            throws SQLException {
        List<Column> readColumns = new ArrayList<>();

        try {
            ResultSet columnMetadata =
                    databaseMetadata.getColumns(
                            (String) null, tableId.schema(), tableId.table(), (String) null);

            try {
                while (columnMetadata.next()) {
                    Optional<Column> var10000 =
                            connection.readColumnForDecoder(
                                    columnMetadata,
                                    tableId,
                                    this.decoderContext.getConfig().getColumnFilter());
                    Objects.requireNonNull(readColumns);
                    var10000.ifPresent(readColumns::add);
                }
            } catch (Throwable var9) {
                if (columnMetadata != null) {
                    try {
                        columnMetadata.close();
                    } catch (Throwable var8) {
                        var9.addSuppressed(var8);
                    }
                }

                throw var9;
            }

            if (columnMetadata != null) {
                columnMetadata.close();
            }

            return readColumns;
        } catch (SQLException var10) {
            SQLException e = var10;
            LOGGER.error(
                    "Failed to read column metadata for '{}.{}'",
                    tableId.schema(),
                    tableId.table());
            throw e;
        }
    }

    private boolean isColumnInPrimaryKey(
            String schemaName,
            String tableName,
            String columnName,
            List<String> primaryKeyColumns) {
        if (!primaryKeyColumns.isEmpty() && primaryKeyColumns.contains(columnName)) {
            return true;
        } else {
            if (primaryKeyColumns.isEmpty()) {
                Table existingTable =
                        this.decoderContext
                                .getSchema()
                                .tableFor(new TableId((String) null, schemaName, tableName));
                if (existingTable != null
                        && existingTable.primaryKeyColumnNames().contains(columnName)) {
                    return true;
                }
            }

            return false;
        }
    }

    private void decodeInsert(
            ByteBuffer buffer,
            TypeRegistry typeRegistry,
            ReplicationStream.ReplicationMessageProcessor processor)
            throws SQLException, InterruptedException {
        int relationId = buffer.getInt();
        char tupleType = (char) buffer.get();
        LOGGER.trace(
                "Event: {}, Relation Id: {}, Tuple Type: {}",
                new Object[] {PgOutputMessageDecoder.MessageType.INSERT, relationId, tupleType});
        Optional<Table> resolvedTable = this.resolveRelation(relationId);
        if (!resolvedTable.isPresent()) {
            processor.process(
                    new ReplicationMessage.NoopMessage(this.transactionId, this.commitTimestamp));
        } else {
            Table table = (Table) resolvedTable.get();
            List<ReplicationMessage.Column> columns =
                    resolveColumnsFromStreamTupleData(buffer, typeRegistry, table);
            processor.process(
                    new PgOutputReplicationMessage(
                            io.debezium.connector.postgresql.connection.ReplicationMessage.Operation
                                    .INSERT,
                            table.id().toDoubleQuotedString(),
                            this.commitTimestamp,
                            this.transactionId,
                            (List) null,
                            columns));
        }
    }

    private void decodeUpdate(
            ByteBuffer buffer,
            TypeRegistry typeRegistry,
            ReplicationStream.ReplicationMessageProcessor processor)
            throws SQLException, InterruptedException {
        int relationId = buffer.getInt();
        LOGGER.trace(
                "Event: {}, RelationId: {}", PgOutputMessageDecoder.MessageType.UPDATE, relationId);
        Optional<Table> resolvedTable = this.resolveRelation(relationId);
        if (!resolvedTable.isPresent()) {
            processor.process(
                    new ReplicationMessage.NoopMessage(this.transactionId, this.commitTimestamp));
        } else {
            Table table = (Table) resolvedTable.get();
            List<ReplicationMessage.Column> oldColumns = null;
            char tupleType = (char) buffer.get();
            if ('O' == tupleType || 'K' == tupleType) {
                oldColumns = resolveColumnsFromStreamTupleData(buffer, typeRegistry, table);
                tupleType = (char) buffer.get();
            }

            List<ReplicationMessage.Column> columns =
                    resolveColumnsFromStreamTupleData(buffer, typeRegistry, table);
            processor.process(
                    new PgOutputReplicationMessage(
                            io.debezium.connector.postgresql.connection.ReplicationMessage.Operation
                                    .UPDATE,
                            table.id().toDoubleQuotedString(),
                            this.commitTimestamp,
                            this.transactionId,
                            oldColumns,
                            columns));
        }
    }

    private void decodeDelete(
            ByteBuffer buffer,
            TypeRegistry typeRegistry,
            ReplicationStream.ReplicationMessageProcessor processor)
            throws SQLException, InterruptedException {
        int relationId = buffer.getInt();
        char tupleType = (char) buffer.get();
        LOGGER.trace(
                "Event: {}, RelationId: {}, Tuple Type: {}",
                new Object[] {PgOutputMessageDecoder.MessageType.DELETE, relationId, tupleType});
        Optional<Table> resolvedTable = this.resolveRelation(relationId);
        if (!resolvedTable.isPresent()) {
            processor.process(
                    new ReplicationMessage.NoopMessage(this.transactionId, this.commitTimestamp));
        } else {
            Table table = (Table) resolvedTable.get();
            List<ReplicationMessage.Column> columns =
                    resolveColumnsFromStreamTupleData(buffer, typeRegistry, table);
            processor.process(
                    new PgOutputReplicationMessage(
                            io.debezium.connector.postgresql.connection.ReplicationMessage.Operation
                                    .DELETE,
                            table.id().toDoubleQuotedString(),
                            this.commitTimestamp,
                            this.transactionId,
                            columns,
                            (List) null));
        }
    }

    private void decodeTruncate(
            ByteBuffer buffer,
            TypeRegistry typeRegistry,
            ReplicationStream.ReplicationMessageProcessor processor)
            throws SQLException, InterruptedException {
        int numberOfRelations = buffer.getInt();
        int optionBits = buffer.get();
        this.getTruncateOptions(optionBits);
        int[] relationIds = new int[numberOfRelations];

        for (int i = 0; i < numberOfRelations; ++i) {
            relationIds[i] = buffer.getInt();
        }

        List<Table> tables = new ArrayList();
        int[] var9 = relationIds;
        int i = relationIds.length;

        for (int var11 = 0; var11 < i; ++var11) {
            int relationId = var9[var11];
            Optional<Table> resolvedTable = this.resolveRelation(relationId);
            Objects.requireNonNull(tables);
            resolvedTable.ifPresent(tables::add);
        }

        if (LOGGER.isTraceEnabled()) {
            LOGGER.trace(
                    "Event: {}, RelationIds: {}, OptionBits: {}",
                    new Object[] {
                        PgOutputMessageDecoder.MessageType.TRUNCATE,
                        Arrays.toString(relationIds),
                        Integer.valueOf(optionBits)
                    });
        }

        int noOfResolvedTables = tables.size();

        for (i = 0; i < noOfResolvedTables; ++i) {
            Table table = (Table) tables.get(i);
            boolean lastTableInTruncate = i + 1 == noOfResolvedTables;
            processor.process(
                    new PgOutputTruncateReplicationMessage(
                            io.debezium.connector.postgresql.connection.ReplicationMessage.Operation
                                    .TRUNCATE,
                            table.id().toDoubleQuotedString(),
                            this.commitTimestamp,
                            this.transactionId,
                            lastTableInTruncate));
        }
    }

    private List<String> getTruncateOptions(int flag) {
        switch (flag) {
            case 1:
                return Collections.singletonList("CASCADE");
            case 2:
                return Collections.singletonList("RESTART IDENTITY");
            case 3:
                return Arrays.asList("RESTART IDENTITY", "CASCADE");
            default:
                return null;
        }
    }

    private void handleLogicalDecodingMessage(
            ByteBuffer buffer, ReplicationStream.ReplicationMessageProcessor processor)
            throws SQLException, InterruptedException {
        boolean isTransactional = buffer.get() == 1;
        Lsn lsn = Lsn.valueOf(buffer.getLong());
        String prefix = readString(buffer);
        int contentLength = buffer.getInt();
        byte[] content = new byte[contentLength];
        buffer.get(content);
        if (!isTransactional) {
            this.transactionId = null;
            this.commitTimestamp = null;
        }

        LOGGER.trace("Event: {}", PgOutputMessageDecoder.MessageType.LOGICAL_DECODING_MESSAGE);
        LOGGER.trace("Commit LSN: {}", lsn);
        LOGGER.trace("Commit timestamp of transaction: {}", this.commitTimestamp);
        LOGGER.trace("XID of transaction: {}", this.transactionId);
        LOGGER.trace("Transactional: {}", isTransactional);
        LOGGER.trace("Prefix: {}", prefix);
        processor.process(
                new LogicalDecodingMessage(
                        io.debezium.connector.postgresql.connection.ReplicationMessage.Operation
                                .MESSAGE,
                        this.commitTimestamp,
                        this.transactionId,
                        isTransactional,
                        prefix,
                        content));
    }

    private Optional<Table> resolveRelation(int relationId) {
        return Optional.ofNullable(this.decoderContext.getSchema().tableFor(relationId));
    }

    private Table resolveRelationFromMetadata(PgOutputRelationMetaData metadata) {
        List<Column> columns = new ArrayList();

        ColumnEditor editor;
        for (Iterator var3 = metadata.getColumns().iterator();
                var3.hasNext();
                columns.add(editor.create())) {
            ColumnMetaData columnMetadata = (ColumnMetaData) var3.next();
            editor =
                    Column.editor()
                            .name(columnMetadata.getColumnName())
                            .jdbcType(columnMetadata.getPostgresType().getRootType().getJdbcId())
                            .nativeType(columnMetadata.getPostgresType().getRootType().getOid())
                            .optional(columnMetadata.isOptional())
                            .type(
                                    columnMetadata.getPostgresType().getName(),
                                    columnMetadata.getTypeName())
                            .length(columnMetadata.getLength())
                            .scale(columnMetadata.getScale());
            if (columnMetadata.hasDefaultValue()) {
                editor.defaultValueExpression(columnMetadata.getDefaultValueExpression());
            }
        }

        Table table =
                Table.editor()
                        .addColumns(columns)
                        .setPrimaryKeyNames(metadata.getPrimaryKeyNames())
                        .tableId(metadata.getTableId())
                        .create();
        LOGGER.trace("Resolved '{}' as '{}'", table.id(), table);
        return table;
    }

    private static String readString(ByteBuffer buffer) {
        StringBuilder sb = new StringBuilder();
        byte b;
        while ((b = buffer.get()) != 0) {
            sb.append((char) b);
        }

        return sb.toString();
    }

    private static String readColumnValueAsString(ByteBuffer buffer) {
        int length = buffer.getInt();
        byte[] value = new byte[length];
        buffer.get(value, 0, length);
        return new String(value, Charset.forName("UTF-8"));
    }

    private static List<ReplicationMessage.Column> resolveColumnsFromStreamTupleData(
            ByteBuffer buffer, final TypeRegistry typeRegistry, Table table) {
        short numberOfColumns = buffer.getShort();
        List<ReplicationMessage.Column> columns = new ArrayList(numberOfColumns);

        for (short i = 0; i < numberOfColumns; ++i) {
            Column column = (Column) table.columns().get(i);
            final String columnName = column.name();
            String typeName = column.typeName();
            final PostgresType columnType = typeRegistry.get(typeName);
            final String typeExpression = column.typeExpression();
            boolean optional = column.isOptional();
            char type = (char) buffer.get();
            if (type == 't') {
                final String valueStr = readColumnValueAsString(buffer);
                columns.add(
                        new AbstractReplicationMessageColumn(
                                columnName, columnType, typeExpression, optional, true) {
                            public Object getValue(
                                    PostgresStreamingChangeEventSource.PgConnectionSupplier
                                            connection,
                                    boolean includeUnknownDatatypes) {
                                return PgOutputReplicationMessage.getValue(
                                        columnName,
                                        columnType,
                                        typeExpression,
                                        valueStr,
                                        connection,
                                        includeUnknownDatatypes,
                                        typeRegistry);
                            }

                            public String toString() {
                                return columnName + "(" + typeExpression + ")=" + valueStr;
                            }
                        });
            } else if (type == 'u') {
                columns.add(
                        new UnchangedToastedReplicationMessageColumn(
                                columnName, columnType, typeExpression, optional, true) {
                            public String toString() {
                                return columnName
                                        + "("
                                        + typeExpression
                                        + ") - Unchanged toasted column";
                            }
                        });
            } else if (type == 'n') {
                columns.add(
                        new AbstractReplicationMessageColumn(
                                columnName, columnType, typeExpression, true, true) {
                            public Object getValue(
                                    PostgresStreamingChangeEventSource.PgConnectionSupplier
                                            connection,
                                    boolean includeUnknownDatatypes) {
                                return null;
                            }
                        });
            }
        }

        columns.forEach(
                (c) -> {
                    LOGGER.trace("Column: {}", c);
                });
        return columns;
    }

    public void close() {
        if (this.connection != null) {
            this.connection.close();
        }
    }

    static {
        PG_EPOCH = LocalDate.of(2000, 1, 1).atStartOfDay().toInstant(ZoneOffset.UTC);
    }

    public static enum MessageType {
        RELATION,
        BEGIN,
        COMMIT,
        INSERT,
        UPDATE,
        DELETE,
        TYPE,
        ORIGIN,
        TRUNCATE,
        LOGICAL_DECODING_MESSAGE;

        private MessageType() {}

        public static MessageType forType(char type) {
            switch (type) {
                case 'B':
                    return BEGIN;
                case 'C':
                    return COMMIT;
                case 'D':
                    return DELETE;
                case 'E':
                case 'F':
                case 'G':
                case 'H':
                case 'J':
                case 'K':
                case 'L':
                case 'N':
                case 'P':
                case 'Q':
                case 'S':
                case 'V':
                case 'W':
                case 'X':
                default:
                    throw new IllegalArgumentException("Unsupported message type: " + type);
                case 'I':
                    return INSERT;
                case 'M':
                    return LOGICAL_DECODING_MESSAGE;
                case 'O':
                    return ORIGIN;
                case 'R':
                    return RELATION;
                case 'T':
                    return TRUNCATE;
                case 'U':
                    return UPDATE;
                case 'Y':
                    return TYPE;
            }
        }
    }
}
