/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.alibaba.alink.common.io;

import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.GlobalConfiguration;
import org.apache.flink.configuration.IllegalConfigurationException;
import org.apache.flink.connectors.hive.FlinkHiveException;
import org.apache.flink.connectors.hive.HiveOptions;
import org.apache.flink.connectors.hive.HiveTablePartition;
import org.apache.flink.connectors.hive.read.HiveTableInputFormat;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.catalog.CatalogTable;
import org.apache.flink.table.catalog.ObjectPath;
import org.apache.flink.table.catalog.hive.client.HiveMetastoreClientFactory;
import org.apache.flink.table.catalog.hive.client.HiveMetastoreClientWrapper;
import org.apache.flink.table.catalog.hive.client.HiveShim;
import org.apache.flink.table.catalog.hive.client.HiveShimLoader;
import org.apache.flink.table.catalog.hive.descriptors.HiveCatalogValidator;
import org.apache.flink.table.catalog.hive.util.HiveReflectionUtils;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.functions.hive.conversion.HiveInspectors;
import org.apache.flink.table.runtime.types.TypeInfoDataTypeConverter;
import org.apache.flink.table.sources.*;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalTypeRoot;
import org.apache.flink.table.utils.TableConnectorUtils;
import org.apache.flink.util.Preconditions;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.mapred.JobConf;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.annotation.Nullable;
import java.io.IOException;
import java.sql.Date;
import java.sql.Timestamp;
import java.util.*;
import java.util.stream.Collectors;

/**
 * A TableSource implementation to read data from Hive tables.
 */
public class HiveBatchSource implements
    BatchTableSource<RowData>,
    PartitionableTableSource,
    ProjectableTableSource<RowData>,
    LimitableTableSource<RowData> {

    private static final Logger LOG = LoggerFactory.getLogger(HiveBatchSource.class);

    private final JobConf jobConf;
    private final ObjectPath tablePath;
    private final CatalogTable catalogTable;
    // Remaining partition specs after partition pruning is performed. Null if pruning is not pushed down.
    @Nullable
    private List<Map<String, String>> remainingPartitions = null;
    private String hiveVersion;
    private HiveShim hiveShim;
    private boolean partitionPruned;
    private int[] projectedFields;
    private boolean isLimitPushDown = false;
    private long limit = -1L;

    public HiveBatchSource(JobConf jobConf, ObjectPath tablePath, CatalogTable catalogTable) {
        this.jobConf = Preconditions.checkNotNull(jobConf);
        this.tablePath = Preconditions.checkNotNull(tablePath);
        this.catalogTable = Preconditions.checkNotNull(catalogTable);
        this.hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION),
            "Hive version is not defined");
        hiveShim = HiveShimLoader.loadHiveShim(hiveVersion);
        partitionPruned = false;
    }

    // A constructor mainly used to create copies during optimizations like partition pruning and projection push down.
    private HiveBatchSource(JobConf jobConf, ObjectPath tablePath, CatalogTable catalogTable,
                            List<Map<String, String>> remainingPartitions,
                            String hiveVersion,
                            boolean partitionPruned,
                            int[] projectedFields,
                            boolean isLimitPushDown,
                            long limit) {
        this.jobConf = Preconditions.checkNotNull(jobConf);
        this.tablePath = Preconditions.checkNotNull(tablePath);
        this.catalogTable = Preconditions.checkNotNull(catalogTable);
        this.remainingPartitions = remainingPartitions;
        this.hiveVersion = hiveVersion;
        hiveShim = HiveShimLoader.loadHiveShim(hiveVersion);
        this.partitionPruned = partitionPruned;
        this.projectedFields = projectedFields;
        this.isLimitPushDown = isLimitPushDown;
        this.limit = limit;
    }

    @Override
    public DataSet<RowData> getDataSet(ExecutionEnvironment execEnv) {
        List<HiveTablePartition> allHivePartitions = initAllPartitions();

        @SuppressWarnings("unchecked")
        TypeInformation<RowData> typeInfo =
            (TypeInformation<RowData>) TypeInfoDataTypeConverter.fromDataTypeToTypeInfo(getProducedDataType());
        Configuration conf = GlobalConfiguration.loadConfiguration();
        HiveTableInputFormat inputFormat = getInputFormat(allHivePartitions, conf.getBoolean(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER));
        DataSource<RowData> source = execEnv.createInput(inputFormat, typeInfo);

        if (conf.getBoolean(HiveOptions.TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM)) {
            int max = conf.getInteger(HiveOptions.TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM_MAX);
            if (max < 1) {
                throw new IllegalConfigurationException(
                    HiveOptions.TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM_MAX.key() +
                        " cannot be less than 1");
            }

            int splitNum;
            try {
                long nano1 = System.nanoTime();
                splitNum = inputFormat.createInputSplits(0).length;
                long nano2 = System.nanoTime();
                LOG.info(
                    "Hive source({}}) createInputSplits use time: {} ms",
                    tablePath,
                    (nano2 - nano1) / 1_000_000);
            } catch (IOException e) {
                throw new FlinkHiveException(e);
            }
            source.setParallelism(Math.min(Math.max(1, splitNum), max));
        }
        return source.name(explainSource());
    }

    @VisibleForTesting
    HiveTableInputFormat getInputFormat(List<HiveTablePartition> allHivePartitions, boolean useMapRedReader) {
        return new HiveTableInputFormat(
            jobConf, catalogTable, allHivePartitions, projectedFields, limit, hiveVersion, useMapRedReader);
    }

    @Override
    public TableSchema getTableSchema() {
        return catalogTable.getSchema();
    }

    @Override
    public DataType getProducedDataType() {
        return getProducedTableSchema().toRowDataType().bridgedTo(RowData.class);
    }

    private TableSchema getProducedTableSchema() {
        TableSchema fullSchema = getTableSchema();
        if (projectedFields == null) {
            return fullSchema;
        } else {
            String[] fullNames = fullSchema.getFieldNames();
            DataType[] fullTypes = fullSchema.getFieldDataTypes();
            return TableSchema.builder().fields(
                Arrays.stream(projectedFields).mapToObj(i -> fullNames[i]).toArray(String[]::new),
                Arrays.stream(projectedFields).mapToObj(i -> fullTypes[i]).toArray(DataType[]::new)).build();
        }
    }

    @Override
    public boolean isLimitPushedDown() {
        return isLimitPushDown;
    }

    @Override
    public TableSource<RowData> applyLimit(long limit) {
        return new HiveBatchSource(jobConf, tablePath, catalogTable, remainingPartitions, hiveVersion,
            partitionPruned, projectedFields, true, limit);
    }

    @Override
    public List<Map<String, String>> getPartitions() {
        throw new UnsupportedOperationException(
            "Please use Catalog API to retrieve all partitions of a table");
    }

    @Override
    public TableSource<RowData> applyPartitionPruning(List<Map<String, String>> remainingPartitions) {
        if (catalogTable.getPartitionKeys() == null || catalogTable.getPartitionKeys().size() == 0) {
            return this;
        } else {
            return new HiveBatchSource(jobConf, tablePath, catalogTable, remainingPartitions, hiveVersion,
                true, projectedFields, isLimitPushDown, limit);
        }
    }

    private List<HiveTablePartition> initAllPartitions() {
        List<HiveTablePartition> allHivePartitions = new ArrayList<>();
        // Please note that the following directly accesses Hive metastore, which is only a temporary workaround.
        // Ideally, we need to go thru Catalog API to get all info we need here, which requires some major
        // refactoring. We will postpone this until we merge Blink to Flink.
        try (HiveMetastoreClientWrapper client = HiveMetastoreClientFactory.create(new HiveConf(jobConf, HiveConf.class), hiveVersion)) {
            String dbName = tablePath.getDatabaseName();
            String tableName = tablePath.getObjectName();
            List<String> partitionColNames = catalogTable.getPartitionKeys();
            Table hiveTable = client.getTable(dbName, tableName);
            Properties tableProps = HiveReflectionUtils.getTableMetadata(hiveShim, hiveTable);
            if (partitionColNames != null && partitionColNames.size() > 0) {
                final String defaultPartitionName = jobConf.get(HiveConf.ConfVars.DEFAULTPARTITIONNAME.varname,
                    HiveConf.ConfVars.DEFAULTPARTITIONNAME.defaultStrVal);
                List<Partition> partitions = new ArrayList<>();
                if (remainingPartitions != null) {
                    for (Map<String, String> spec : remainingPartitions) {
                        partitions.add(client.getPartition(dbName, tableName, partitionSpecToValues(spec, partitionColNames)));
                    }
                } else {
                    partitions.addAll(client.listPartitions(dbName, tableName, (short) -1));
                }
                for (Partition partition : partitions) {
                    HiveTablePartition hiveTablePartition = toHiveTablePartition(
                        catalogTable.getPartitionKeys(),
                        catalogTable.getSchema().getFieldNames(),
                        catalogTable.getSchema().getFieldDataTypes(),
                        hiveShim,
                        tableProps,
                        defaultPartitionName,
                        partition);
                    allHivePartitions.add(hiveTablePartition);
                }
            } else {
                allHivePartitions.add(new HiveTablePartition(hiveTable.getSd(), tableProps));
            }
        } catch (TException e) {
            throw new FlinkHiveException("Failed to collect all partitions from hive metaStore", e);
        }
        return allHivePartitions;
    }

    public static HiveTablePartition toHiveTablePartition(
        List<String> partitionKeys,
        String[] fieldNames,
        DataType[] fieldTypes,
        HiveShim shim,
        Properties tableProps,
        String defaultPartitionName,
        Partition partition) {
        StorageDescriptor sd = partition.getSd();
        Map<String, Object> partitionColValues = new HashMap<>();
        List<String> nameList = Arrays.asList(fieldNames);
        for (int i = 0; i < partitionKeys.size(); i++) {
            String partitionColName = partitionKeys.get(i);
            String partitionValue = partition.getValues().get(i);
            DataType type = fieldTypes[nameList.indexOf(partitionColName)];
            Object partitionObject;
            if (defaultPartitionName.equals(partitionValue)) {
                LogicalTypeRoot typeRoot = type.getLogicalType().getTypeRoot();
                // while this is inline with Hive, seems it should be null for string columns as well
                partitionObject = typeRoot == LogicalTypeRoot.CHAR || typeRoot == LogicalTypeRoot.VARCHAR ? defaultPartitionName : null;
            } else {
                partitionObject = restorePartitionValueFromFromType(shim, partitionValue, type);
            }
            partitionColValues.put(partitionColName, partitionObject);
        }
        return new HiveTablePartition(sd, partitionColValues, tableProps);
    }

    private static List<String> partitionSpecToValues(Map<String, String> spec, List<String> partitionColNames) {
        Preconditions.checkArgument(spec.size() == partitionColNames.size() && spec.keySet().containsAll(partitionColNames),
            "Partition spec (%s) and partition column names (%s) doesn't match", spec, partitionColNames);
        return partitionColNames.stream().map(spec::get).collect(Collectors.toList());
    }

    private static Object restorePartitionValueFromFromType(HiveShim shim, String valStr, DataType type) {
        LogicalTypeRoot typeRoot = type.getLogicalType().getTypeRoot();
        //note: it's not a complete list ofr partition key types that Hive support, we may need add more later.
        switch (typeRoot) {
            case CHAR:
            case VARCHAR:
                return valStr;
            case BOOLEAN:
                return Boolean.parseBoolean(valStr);
            case TINYINT:
                return Integer.valueOf(valStr).byteValue();
            case SMALLINT:
                return Short.valueOf(valStr);
            case INTEGER:
                return Integer.valueOf(valStr);
            case BIGINT:
                return Long.valueOf(valStr);
            case FLOAT:
                return Float.valueOf(valStr);
            case DOUBLE:
                return Double.valueOf(valStr);
            case DATE:
                return HiveInspectors.toFlinkObject(
                    HiveInspectors.getObjectInspector(type),
                    shim.toHiveDate(Date.valueOf(valStr)),
                    shim);
            case TIMESTAMP_WITHOUT_TIME_ZONE:
                return HiveInspectors.toFlinkObject(
                    HiveInspectors.getObjectInspector(type),
                    shim.toHiveTimestamp(Timestamp.valueOf(valStr)),
                    shim);
            default:
                break;
        }
        throw new FlinkHiveException(
            new IllegalArgumentException(String.format("Can not convert %s to type %s for partition value", valStr, type)));
    }

    @Override
    public String explainSource() {
        String explain = String.format(" TablePath: %s, PartitionPruned: %s, PartitionNums: %d",
            tablePath.getFullName(), partitionPruned, null == remainingPartitions ? null : remainingPartitions.size());
        if (projectedFields != null) {
            explain += ", ProjectedFields: " + Arrays.toString(projectedFields);
        }
        if (isLimitPushDown) {
            explain += String.format(", LimitPushDown %s, Limit %d", isLimitPushDown, limit);
        }
        return TableConnectorUtils.generateRuntimeName(getClass(), getTableSchema().getFieldNames()) + explain;
    }

    @Override
    public TableSource<RowData> projectFields(int[] fields) {
        return new HiveBatchSource(jobConf, tablePath, catalogTable, remainingPartitions, hiveVersion,
            partitionPruned, fields, isLimitPushDown, limit);
    }
}
