/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.flink.formats.parquet;

import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.file.src.util.Pool;
import org.apache.flink.core.fs.Path;
import org.apache.flink.formats.parquet.utils.SerializableConfiguration;
import org.apache.flink.formats.parquet.vector.ColumnBatchFactory;
import org.apache.flink.table.data.ColumnarRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.vector.ColumnVector;
import org.apache.flink.table.data.vector.VectorizedColumnBatch;
import org.apache.flink.table.data.vector.writable.WritableColumnVector;
import org.apache.flink.table.filesystem.ColumnarRowIterator;
import org.apache.flink.table.filesystem.PartitionValueConverter;
import org.apache.flink.table.runtime.typeutils.InternalTypeInfo;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.table.utils.PartitionPathUtils;

import org.apache.hadoop.conf.Configuration;

import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.stream.Collectors;

import static org.apache.flink.formats.parquet.vector.ParquetSplitReaderUtil.createVectorFromConstant;

/**
 * A {@link ParquetVectorizedInputFormat} to provide {@link RowData} iterator.
 * Using {@link ColumnarRowData} to provide a row view of column batch.
 */
public class ParquetColumnarRowInputFormat extends ParquetVectorizedInputFormat<RowData> {

	private static final long serialVersionUID = 1L;

	private final RowType producedType;

	/**
	 * Constructor to create parquet format without other fields.
	 */
	public ParquetColumnarRowInputFormat(
			Configuration hadoopConfig,
			String[] projectedFields,
			LogicalType[] projectedTypes,
			int batchSize,
			boolean isUtcTimestamp,
			boolean isCaseSensitive) {
		this(
				hadoopConfig,
				projectedFields,
				projectedTypes,
				RowType.of(projectedTypes, projectedFields),
				ColumnBatchFactory.DEFAULT,
				batchSize,
				isUtcTimestamp,
				isCaseSensitive);
	}

	/**
	 * Constructor to create parquet format with other fields created by {@link ColumnBatchFactory}.
	 */
	public ParquetColumnarRowInputFormat(
			Configuration hadoopConfig,
			String[] projectedFields,
			LogicalType[] projectedTypes,
			RowType producedType,
			ColumnBatchFactory batchFactory,
			int batchSize,
			boolean isUtcTimestamp,
			boolean isCaseSensitive) {
		super(
				new SerializableConfiguration(hadoopConfig),
				projectedFields,
				projectedTypes,
				batchFactory,
				batchSize,
				isUtcTimestamp,
				isCaseSensitive);
		this.producedType = producedType;
	}

	@Override
	protected ParquetReaderBatch<RowData> createReaderBatch(
			WritableColumnVector[] writableVectors,
			VectorizedColumnBatch columnarBatch,
			Pool.Recycler<ParquetReaderBatch<RowData>> recycler) {
		return new ColumnarRowReaderBatch(writableVectors, columnarBatch, recycler);
	}

	@Override
	public TypeInformation<RowData> getProducedType() {
		return InternalTypeInfo.of(producedType);
	}

	private static class ColumnarRowReaderBatch extends ParquetReaderBatch<RowData> {

		private final ColumnarRowIterator result;

		private ColumnarRowReaderBatch(
				WritableColumnVector[] writableVectors,
				VectorizedColumnBatch columnarBatch,
				Pool.Recycler<ParquetReaderBatch<RowData>> recycler) {
			super(writableVectors, columnarBatch, recycler);
			this.result = new ColumnarRowIterator(new ColumnarRowData(columnarBatch), this::recycle);
		}

		@Override
		public RecordIterator<RowData> convertAndGetIterator(long rowsReturned) {
			result.set(columnarBatch.getNumRows(), rowsReturned);
			return result;
		}
	}

	/**
	 * Create a partitioned {@link ParquetColumnarRowInputFormat}, the partition columns can be
	 * generated by {@link Path}.
	 */
	public static ParquetColumnarRowInputFormat createPartitionedFormat(
			Configuration hadoopConfig,
			RowType tableType,
			List<String> partitionKeys,
			String defaultPartValue,
			int[] selectedFields,
			PartitionValueConverter valueConverter,
			int batchSize,
			boolean isUtcTimestamp,
			boolean isCaseSensitive) {
		List<String> selParquetNames = Arrays.stream(selectedFields)
				.mapToObj(i -> tableType.getFieldNames().get(i))
				.filter(n -> !partitionKeys.contains(n))
				.collect(Collectors.toList());

		ColumnBatchFactory factory = (Path filePath, ColumnVector[] parquetVectors) -> {
			LinkedHashMap<String, String> partitionSpec = PartitionPathUtils.extractPartitionSpecFromPath(filePath);
			// create and initialize the row batch
			ColumnVector[] vectors = new ColumnVector[selectedFields.length];
			for (int i = 0; i < vectors.length; i++) {
				RowType.RowField field = tableType.getFields().get(selectedFields[i]);

				if (partitionKeys.contains(field.getName())) {
					if (!partitionSpec.containsKey(field.getName())) {
						throw new RuntimeException(
								"Cannot find the partition value from path for partition: " + field.getName());
					}

					String valueStr = partitionSpec.get(field.getName());
					valueStr = valueStr.equals(defaultPartValue) ? null : valueStr;
					Object value = valueConverter.convert(valueStr, field.getType());
					vectors[i] = createVectorFromConstant(field.getType(), value, batchSize);
				} else {
					vectors[i] = parquetVectors[selParquetNames.indexOf(field.getName())];
				}
			}
			return new VectorizedColumnBatch(vectors);
		};

		return new ParquetColumnarRowInputFormat(
				hadoopConfig,
				selParquetNames.toArray(new String[0]),
				selParquetNames.stream()
						.map(name -> tableType.getTypeAt(tableType.getFieldIndex(name)))
						.toArray(LogicalType[]::new),
				new RowType(Arrays.stream(selectedFields).mapToObj(i ->
						tableType.getFields().get(i)).collect(Collectors.toList())),
				factory,
				batchSize,
				isUtcTimestamp,
				isCaseSensitive);
	}
}
