package com.xrui.adc.hbase.schema;

import com.google.common.base.Joiner;
import com.xrui.hbase.AvroSchemaResolver;
import com.xrui.hbase.TableName;
import com.xrui.hbase.impl.HTableInterfaceFactory;
import com.xrui.hbase.util.ReferenceCountable;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;


/**
 * <p>
 * Mapping between schema IDs, hashes and Avro schema objects.
 * This class is thread-safe.
 * </p>
 * <p>
 * <p>
 * Schemas are stored in two tables with a single column family named "schema" and that contains
 * SchemaTableEntry records. One table is indexed by schema hashes (128-bit MD5 hashes of the
 * schema JSON representation). Other table is indexed by schema IDs (integers &gt;= 0).
 * <p>
 * There may be multiple schema IDs for a single schema.
 * </p>
 */
public final class CDFViewTableSchemaResolver implements AvroSchemaResolver, ReferenceCountable<CDFViewTableSchemaResolver> {
    static final String SCHEMA_TABLE_NAME = "Type";

    /**
     * The column family in HBase used to store schema entries.
     */
    private static final String SCHEMA_COLUMN_FAMILY = "d";
    /**
     * The column qualifier in HBase used to store schema entries.
     */
    private static final String SCHEMA_COLUMN_QUALIFIER = "schema";
    private static final char SEPARATOR = '_';
    private static final Logger LOG = LoggerFactory.getLogger(CDFViewTableSchemaResolver.class);
    private static final byte[] SCHEMA_COLUMN_FAMILY_BYTES = Bytes.toBytes(SCHEMA_COLUMN_FAMILY);
    private static final byte[] SCHEMA_COLUMN_QUALIFIER_BYTES = Bytes.toBytes(SCHEMA_COLUMN_QUALIFIER);

    /**
     * HTable used to map schema entries.
     */
    private final HTableInterface mSchemaTable;
    /**
     * Schema hash map cache.
     */
    private final Map<String, Schema> mSchemaCacheMap = new HashMap<>();

    /**
     * Open a connection to the HBase schema table for a HBase instance.
     *
     * @param tableName    The HBase table name.
     * @param conf         The Hadoop configuration.
     * @param tableFactory HTableInterface factory.
     * @throws IOException on I/O error.
     */
    public CDFViewTableSchemaResolver(
        TableName tableName,
        Configuration conf,
        HTableInterfaceFactory tableFactory
    ) throws IOException {
        mSchemaTable = newSchemaTable(tableName, conf, tableFactory);
    }

    /**
     * Creates an HTable handle to the schema hash table.
     *
     * @param conf    the Hadoop configuration.
     * @param factory HTableInterface factory.
     * @return a new interface for the table storing the mapping from schema hash to schema entry.
     * @throws IOException on I/O error.
     */
    private static HTableInterface newSchemaTable(
        TableName tableName,
        Configuration conf,
        HTableInterfaceFactory factory)
        throws IOException {
        return factory.create(conf, tableName.toString());
    }

    private String doGet(String rowKey, byte[] columnFamily, byte[] qualifier) {
        final Get get = new Get(rowKey.getBytes()).addColumn(columnFamily, qualifier);
        final Cell cell;
        try {
            Result result = mSchemaTable.get(get);
            cell = result.getColumnLatestCell(columnFamily, qualifier);
        } catch (IOException e) {
            LOG.error("Error occurred while load schema", e);
            return null;
        }
        return null == cell ? null : Bytes.toString(CellUtil.cloneValue(cell));
    }

    private Map<String, Schema> resolveSchemas(String json) throws IOException {
        final ObjectMapper mapper = new ObjectMapper();
        final JsonParser parser = new JsonFactory().createJsonParser(json)
            .enable(JsonParser.Feature.ALLOW_COMMENTS)
            .enable(JsonParser.Feature.ALLOW_SINGLE_QUOTES)
            .enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES);
        final JsonNode root = mapper.readTree(parser);

        Map<String, Schema> schemas = new HashMap<>();
        for (Iterator<String> it = root.getFieldNames(); it.hasNext(); ) {
            String field = it.next();
            JsonNode node = root.get(field);
            Schema schema = new Schema.Parser().parse(node.toString());
            schemas.put(field, schema);
        }

        return schemas;
    }

    /**
     * {@inheritDoc}
     */
    @Override
    public synchronized Schema apply(String avroSchemaId) {
        final Schema existing = mSchemaCacheMap.get(avroSchemaId);
        if (existing != null) {
            return existing;
        }

        // On a lookup miss from the local meta cache, check to see if we can create the meta
        // from the original HBase table, cache it locally, and return it.
        //String type = Splitter.on(SEPARATOR).limit(2).split(avroSchemaId).iterator().next();
        String type = "CCC"; //Splitter.on(SEPARATOR).limit(2).split(avroSchemaId).iterator().next();
        final String json = doGet(type, SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES);
        if (json != null && !json.isEmpty()) {
            try {
                for(Map.Entry<String, Schema> schemaEntry : resolveSchemas(json).entrySet()) {
                    String id = Joiner.on(SEPARATOR).join(type, schemaEntry.getKey());
                    mSchemaCacheMap.put(id, schemaEntry.getValue());
                }

                return mSchemaCacheMap.get(avroSchemaId);
            } catch (IOException e) {
                LOG.error("Unable to resolve schema: " + json, e);
            }
        }
        return null;
    }

    @Override
    public void cache() throws IOException {
        LOG.info("Loading entries from schema hash table.");
        final Map<String, Schema> entries = new HashMap<>();
        final ResultScanner schemaTableScanner = mSchemaTable.getScanner(
            new Scan().addColumn(SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES));  // retrieve latest version
        for (Result result : schemaTableScanner) {
            final byte[] rowKey = result.getRow();
            Cell cell = result.getColumnLatestCell(SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES);
            final byte[] value = CellUtil.cloneValue(cell);
            String key = Bytes.toString(rowKey);
            String json = Bytes.toString(value);
            try {
                for(Map.Entry<String, Schema> schemaEntry : resolveSchemas(json).entrySet()) {
                    String id = Joiner.on(SEPARATOR).join(key, schemaEntry.getKey());
                    entries.put(id, schemaEntry.getValue());
                }
            } catch (AvroRuntimeException are) {
                LOG.error(String.format(
                    "Unable to decode schema table entry for row %s, timestamp %d: %s", key, cell.getTimestamp(), are));
            }
        }
        LOG.info(String.format("Schema table has %d entries.", entries.size()));
        mSchemaCacheMap.clear();
        mSchemaCacheMap.putAll(entries);
    }

    /**
     * {@inheritDoc}
     */
    @Override
    public CDFViewTableSchemaResolver retain() {
        return this;
    }

    /**
     * {@inheritDoc}
     */
    @Override
    public void release() throws IOException {
        mSchemaTable.close();
    }
}
