package com.xrui.adc.hbase.schema;


import com.xrui.hbase.AvroSchemaResolver;
import com.xrui.hbase.TableName;
import com.xrui.hbase.impl.HTableInterfaceFactory;
import com.xrui.hbase.util.ReferenceCountable;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.type.TypeReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.*;


/**
 * <p>
 * Mapping between schema IDs, hashes and Avro schema objects.
 * This class is thread-safe.
 * </p>
 * <p>
 * <p>
 * Schemas are stored in two tables with a single column family named "schema" and that contains
 * SchemaTableEntry records. One table is indexed by schema hashes (128-bit MD5 hashes of the
 * schema JSON representation). Other table is indexed by schema IDs (integers &gt;= 0).
 * <p>
 * There may be multiple schema IDs for a single schema.
 * </p>
 */
public final class ViewTableSchemaResolver implements AvroSchemaResolver, ReferenceCountable<ViewTableSchemaResolver> {
    static final String SCHEMA_TABLE_NAME = "MetadataStore";
    /**
     * The column family in HBase used to store schema entries.
     */
    private static final String SCHEMA_COLUMN_FAMILY = "cf";
    /**
     * The column qualifier in HBase used to store schema entries.
     */
    private static final String SCHEMA_COLUMN_QUALIFIER = "hbase";
    private static final Logger LOG = LoggerFactory.getLogger(ViewTableSchemaResolver.class);
    private static final byte[] SCHEMA_COLUMN_FAMILY_BYTES = Bytes.toBytes(SCHEMA_COLUMN_FAMILY);
    private static final byte[] SCHEMA_COLUMN_QUALIFIER_BYTES = Bytes.toBytes(SCHEMA_COLUMN_QUALIFIER);

    /**
     * HTable used to map schema entries.
     */
    private final HTableInterface mSchemaTable;
    /**
     * Schema hash map cache.
     */
    private final Map<String, DerivedSchemaMeta> mSchemaCacheMap = new HashMap<>();

    /**
     * Open a connection to the HBase schema table for a HBase instance.
     *
     * @param tableName    The HBase table name.
     * @param conf         The Hadoop configuration.
     * @param tableFactory HTableInterface factory.
     * @throws IOException on I/O error.
     */
    public ViewTableSchemaResolver(
        TableName tableName,
        Configuration conf,
        HTableInterfaceFactory tableFactory
    ) throws IOException {
        mSchemaTable = newSchemaTable(tableName, conf, tableFactory);
    }

    /**
     * Creates an HTable handle to the schema hash table.
     *
     * @param conf    the Hadoop configuration.
     * @param factory HTableInterface factory.
     * @return a new interface for the table storing the mapping from schema hash to schema entry.
     * @throws IOException on I/O error.
     */
    private static HTableInterface newSchemaTable(
        TableName tableName,
        Configuration conf,
        HTableInterfaceFactory factory)
        throws IOException {
        return factory.create(conf, tableName.toString());
    }

    private String doGet(String rowKey, byte[] columnFamily, byte[] qualifier) {
        LOG.debug("Getting row '{}' in table '{}'...", rowKey, mSchemaTable.getName());
        final Get get = new Get(rowKey.getBytes()).addColumn(columnFamily, qualifier);
        final Cell cell;
        try {
            Result result = mSchemaTable.get(get);
            cell = result.getColumnLatestCell(columnFamily, qualifier);
        } catch (IOException e) {
            LOG.error("Error occurred while load schema", e);
            return null;
        }
        if (null == cell) {
            LOG.warn("Get nothing for row '{}' in table '{}'", rowKey, mSchemaTable.getName());
            return null;
        } else {
            return Bytes.toString(CellUtil.cloneValue(cell));
        }
    }

    public synchronized DerivedSchemaMeta getMeta(String metaID) {
        final DerivedSchemaMeta existing = mSchemaCacheMap.get(metaID);
        if (existing != null) {
            LOG.debug("Cache hit for schema of {}", metaID);
            return existing;
        }

        // On a lookup miss from the local meta cache, check to see if we can create the meta
        // from the original HBase table, cache it locally, and return it.
        LOG.debug("Cache miss for schema of {}", metaID);
        final String json = doGet(metaID, SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES);
        if (json != null && !json.isEmpty()) {
            try {
                final DerivedSchemaMeta schemaMeta = new DerivedSchemaMeta(json);
                mSchemaCacheMap.put(metaID, schemaMeta);
                return schemaMeta;
            } catch (IOException e) {
                LOG.error("Unable to resolve schema: " + json, e);
            }
        }
        LOG.warn("No schema configurations found for {}", metaID);
        return null;
    }

    /**
     * {@inheritDoc}
     */
    @Override
    public synchronized Schema apply(String avroSchemaId) {
        DerivedSchemaMeta schemaMeta = getMeta(avroSchemaId);
        return null == schemaMeta ? null : schemaMeta.getSchema();
    }

    @Override
    public void cache() throws IOException {
        LOG.info("Loading entries from schema hash table.");
        final Map<String, DerivedSchemaMeta> entries = new HashMap<>();
        final ResultScanner schemaTableScanner = mSchemaTable.getScanner(
            new Scan().addColumn(SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES));  // retrieve latest version
        for (Result result : schemaTableScanner) {
            final byte[] rowKey = result.getRow();
            Cell cell = result.getColumnLatestCell(SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES);
            final byte[] value = CellUtil.cloneValue(cell);
            String key = Bytes.toString(rowKey);
            String json = Bytes.toString(value);
            try {
                entries.put(key, DerivedSchemaMeta.ofValue(json));
            } catch (AvroRuntimeException are) {
                LOG.error(String.format(
                    "Unable to decode schema table entry for row %s, timestamp %d: %s", key, cell.getTimestamp(), are));
            }
        }
        LOG.info(String.format("Schema table has %d entries.", entries.size()));
        mSchemaCacheMap.clear();
        mSchemaCacheMap.putAll(entries);
    }

    /**
     * {@inheritDoc}
     */
    @Override
    public ViewTableSchemaResolver retain() {
        return this;
    }

    /**
     * {@inheritDoc}
     */
    @Override
    public void release() throws IOException {
        mSchemaTable.close();
    }

    public static final class DerivedSchemaMeta {
        private String mVersion;
        private String mTableName;
        private List<String> mHashItems = new ArrayList<>();
        private List<String> mUnHashItems = new ArrayList<>();
        private String mColumnFamily;
        private List<String> mQualifierItems = new ArrayList<>();
        private String mDelimiter;
        private String mCompression;
        private Schema mSchema;

        private DerivedSchemaMeta(String meta) throws IOException {
            final ObjectMapper mapper = new ObjectMapper();
            final JsonParser parser = new JsonFactory().createJsonParser(meta)
                .enable(JsonParser.Feature.ALLOW_COMMENTS)
                .enable(JsonParser.Feature.ALLOW_SINGLE_QUOTES)
                .enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES);
            final JsonNode root = mapper.readTree(parser);

            JsonNode hbaseNode = root.findValue("hbase");
            JsonNode rowKeyNode = hbaseNode.findValue("rowkey");
            JsonNode schemaNode = hbaseNode.findValue("avroSchema");

            String compression = hbaseNode.findValue("compression").asText();
            if(compression != null) {
                List<String> escapes = Arrays.asList("NONE", "NULL");
                for (String escape : escapes) {
                    if (compression.equalsIgnoreCase(escape)) {
                        compression = "";
                    }
                }
            }
            mVersion = root.findValue("version").asText();
            mTableName = hbaseNode.findValue("tableName").asText();
            mColumnFamily = hbaseNode.findValue("columnFamily").asText();
            mDelimiter = hbaseNode.findValue("delimiter").asText();
            mCompression = compression;
            mSchema = new Schema.Parser().parse(schemaNode.toString());
            mQualifierItems = mapper.readValue(hbaseNode.findValue("qualifier"), new TypeReference<List<String>>() {
            });
            mHashItems = mapper.readValue(rowKeyNode.findValue("hashitems"), new TypeReference<List<String>>() {
            });
            mUnHashItems = mapper.readValue(rowKeyNode.findValue("unhashitems"), new TypeReference<List<String>>() {
            });
        }

        public static DerivedSchemaMeta ofValue(String meta) throws IOException {
            return new DerivedSchemaMeta(meta);
        }

        public String getVersion() {
            return mVersion;
        }

        public String getTableName() {
            return mTableName;
        }

        public List<String> getUnHashItems() {
            return mUnHashItems;
        }

        public List<String> getQualifierItems() {
            return mQualifierItems;
        }

        public String getCompression() {
            return mCompression;
        }

        public String getDelimiter() {
            return mDelimiter;
        }

        public String getColumnFamily() {
            return mColumnFamily;
        }

        public List<String> getHashItems() {
            return mHashItems;
        }

        public Schema getSchema() {
            return mSchema;
        }
    }
}
