package com.alibaba.alink.operator.batch.dataproc;

import com.alibaba.alink.operator.common.dataproc.StringIndexerUtil;
import com.alibaba.alink.common.utils.OutputColsHelper;
import com.alibaba.alink.common.utils.TableUtil;
import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.params.dataproc.MultiStringIndexerPredictParams;
import com.alibaba.alink.params.dataproc.StringIndexerPredictParams;
import com.alibaba.alink.params.shared.colname.HasSelectedCols;
import org.apache.flink.api.common.functions.*;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.api.java.utils.DataSetUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.ml.api.misc.param.Params;
import org.apache.flink.types.Row;
import org.apache.flink.util.Collector;

import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static com.alibaba.alink.operator.common.dataproc.StringIndexerUtil.HandleInvalidStrategy.ERROR;
import static com.alibaba.alink.operator.common.dataproc.StringIndexerUtil.HandleInvalidStrategy.SKIP;

/**
 * Map string to index based on the model generated by {@link MultiStringIndexerTrainBatchOp}.
 */
public final class MultiStringIndexerPredictBatchOp
    extends BatchOperator<MultiStringIndexerPredictBatchOp>
    implements MultiStringIndexerPredictParams<MultiStringIndexerPredictBatchOp> {

    public MultiStringIndexerPredictBatchOp() {
        this(new Params());
    }

    public MultiStringIndexerPredictBatchOp(Params params) {
        super(params);
    }

    /**
     * Extract model meta from the model table.
     *
     * @param model The model fitted by {@link MultiStringIndexerTrainBatchOp}.
     * @return A DataSet of only one record, which is the meta string of the model.
     */
    private static DataSet<String> getModelMeta(BatchOperator model) {
        DataSet<Row> modelRows = model.getDataSet();
        return modelRows
            .flatMap(new RichFlatMapFunction<Row, String>() {
                @Override
                public void flatMap(Row row, Collector<String> out) throws Exception {
                    long columnIndex = (Long) row.getField(0);
                    if (columnIndex < 0L) {
                        out.collect((String) row.getField(1));
                    }

                }
            })
            .name("get_model_meta");
    }

    /**
     * Extract the token to index mapping from the model. The <code>selectedCols</code> should be a subset
     * of those columns used to train the model.
     *
     * @param model        The model fitted by {@link MultiStringIndexerTrainBatchOp}.
     * @param modelMeta    The meta string of the model.
     * @param selectedCols The selected columns in prediction data.
     * @return A DataSet of tuples of column index, token, token index.
     */
    private static DataSet<Tuple3<Integer, String, Long>> getModelData(BatchOperator model, DataSet<String> modelMeta,
                                                                       final String[] selectedCols) {
        DataSet<Row> modelRows = model.getDataSet();
        return modelRows
            .flatMap(new RichFlatMapFunction<Row, Tuple3<Integer, String, Long>>() {
                transient int[] selectedColIdxInModel;

                @Override
                public void open(Configuration parameters) throws Exception {
                    List<String> metaList = getRuntimeContext().getBroadcastVariable("modelMeta");
                    if (metaList.size() != 1) {
                        throw new IllegalArgumentException("Invalid model.");
                    }
                    Params meta = Params.fromJson(metaList.get(0));
                    String[] trainColNames = meta.get(HasSelectedCols.SELECTED_COLS);
                    selectedColIdxInModel = new int[selectedCols.length];
                    for (int i = 0; i < selectedCols.length; i++) {
                        String selectedColName = selectedCols[i];
                        int colIdxInModel = TableUtil.findColIndex(trainColNames, selectedColName);
                        if (colIdxInModel < 0) {
                            throw new RuntimeException("Can't find col in model: " + selectedColName);
                        }
                        selectedColIdxInModel[i] = colIdxInModel;
                    }
                }

                @Override
                public void flatMap(Row row, Collector<Tuple3<Integer, String, Long>> out) throws Exception {
                    long columnIndex = (Long) row.getField(0);
                    if (columnIndex >= 0L) {
                        int colIdx = ((Long) row.getField(0)).intValue();
                        for (int i = 0; i < selectedColIdxInModel.length; i++) {
                            if (selectedColIdxInModel[i] == colIdx) {
                                out.collect(Tuple3.of(i, (String) row.getField(1), (Long) row.getField(2)));
                                break;
                            }
                        }
                    }
                }
            })
            .withBroadcastSet(modelMeta, "modelMeta")
            .name("get_model_data");
    }

    @Override
    public MultiStringIndexerPredictBatchOp linkFrom(BatchOperator<?>... inputs) {
        Params params = super.getParams();
        BatchOperator model = inputs[0];
        BatchOperator data = inputs[1];

        String[] selectedColNames = params.get(MultiStringIndexerPredictParams.SELECTED_COLS);
        String[] outputColNames = params.get(MultiStringIndexerPredictParams.OUTPUT_COLS);
        if (outputColNames == null) {
            outputColNames = selectedColNames;
        }
        String[] keepColNames = params.get(StringIndexerPredictParams.RESERVED_COLS);
        TypeInformation[] outputColTypes = new TypeInformation[outputColNames.length];
        Arrays.fill(outputColTypes, Types.LONG);

        OutputColsHelper outputColsHelper = new OutputColsHelper(data.getSchema(), outputColNames,
            outputColTypes, keepColNames);

        final int[] selectedColIdx = TableUtil.findColIndices(data.getSchema(), selectedColNames);
        final StringIndexerUtil.HandleInvalidStrategy handleInvalidStrategy
            = StringIndexerUtil.HandleInvalidStrategy
            .valueOf(params.get(StringIndexerPredictParams.HANDLE_INVALID).toUpperCase());

        DataSet<Tuple2<Long, Row>> dataWithId = DataSetUtils.zipWithUniqueId(data.getDataSet());

        DataSet<String> modelMeta = getModelMeta(model);
        DataSet<Tuple3<Integer, String, Long>> modelData = getModelData(model, modelMeta, selectedColNames);

        // tuple: column index, default token id
        DataSet<Tuple2<Integer, Long>> defaultIndex = modelData
            .<Tuple2<Integer, Long>>project(0, 2)
            .groupBy(0)
            .reduce(new ReduceFunction<Tuple2<Integer, Long>>() {
                @Override
                public Tuple2<Integer, Long> reduce(Tuple2<Integer, Long> value1, Tuple2<Integer, Long> value2)
                    throws Exception {
                    return Tuple2.of(value1.f0, Math.max(value1.f1, value2.f1));
                }
            })
            .map(new MapFunction<Tuple2<Integer, Long>, Tuple2<Integer, Long>>() {
                @Override
                public Tuple2<Integer, Long> map(Tuple2<Integer, Long> value) throws Exception {
                    return Tuple2.of(value.f0, value.f1 + 1L);
                }
            })
            .name("get_default_index");

        // tuple: record id, column index, token
        DataSet<Tuple3<Long, Integer, String>> flattened = dataWithId
            .flatMap(new RichFlatMapFunction<Tuple2<Long, Row>, Tuple3<Long, Integer, String>>() {
                @Override
                public void flatMap(Tuple2<Long, Row> value, Collector<Tuple3<Long, Integer, String>> out)
                    throws Exception {
                    for (int i = 0; i < selectedColIdx.length; i++) {
                        Object o = value.f1.getField(selectedColIdx[i]);
                        if (o != null) {
                            out.collect(Tuple3.of(value.f0, i, String.valueOf(o)));
                        }
                    }
                }
            })
            .name("flatten_pred_data");

        // tuple: record id, column index, token id
        DataSet<Tuple3<Long, Integer, Long>> indexedNulTokens = dataWithId
            .flatMap(new FlatMapFunction<Tuple2<Long, Row>, Tuple3<Long, Integer, Long>>() {
                @Override
                public void flatMap(Tuple2<Long, Row> value, Collector<Tuple3<Long, Integer, Long>> out) throws Exception {
                    for (int i = 0; i < selectedColIdx.length; i++) {
                        Object o = value.f1.getField(selectedColIdx[i]);
                        if (o == null) {
                            // because null value is ignored during training, so it will always
                            // be treated as "unseen" token.
                            out.collect(Tuple3.of(value.f0, i, -1L));
                        }
                    }
                }
            })
            .name("map_null_token_to_index");

        // record id, column index, token index
        DataSet<Tuple3<Long, Integer, Long>> indexed = flattened
            .leftOuterJoin(modelData)
            .where(1, 2).equalTo(0, 1)
            .with(new JoinFunction<Tuple3<Long, Integer, String>, Tuple3<Integer, String, Long>, Tuple3<Long, Integer, Long>>() {
                @Override
                public Tuple3<Long, Integer, Long> join(Tuple3<Long, Integer, String> first,
                                                        Tuple3<Integer, String, Long> second) throws Exception {
                    if (second == null) {
                        return Tuple3.of(first.f0, first.f1, -1L);
                    } else {
                        return Tuple3.of(first.f0, first.f1, second.f2);
                    }
                }
            })
            .name("map_token_to_index");

        // tuple: record id, prediction result
        DataSet<Tuple2<Long, Row>> aggregateResult = indexed
            .union(indexedNulTokens)
            .groupBy(0)
            .reduceGroup(new RichGroupReduceFunction<Tuple3<Long, Integer, Long>, Tuple2<Long, Row>>() {
                //transient long[] defaultIndex;
                transient Map<Integer, Long> defaultIndex;

                @Override
                public void open(Configuration parameters) throws Exception {
                    if (handleInvalidStrategy.equals(SKIP)
                        || handleInvalidStrategy.equals(ERROR)) {
                        return;
                    }
                    List<Tuple2<Integer, Long>> bc = getRuntimeContext().getBroadcastVariable("defaultIndex");
                    defaultIndex = new HashMap <>();
                    for (int i = 0; i < bc.size(); i++) {
                        defaultIndex.put(bc.get(i).f0, bc.get(i).f1);
                    }
                }

                @Override
                public void reduce(Iterable<Tuple3<Long, Integer, Long>> values, Collector<Tuple2<Long, Row>> out)
                    throws Exception {

                    Long id = null;
                    Row r = new Row(selectedColIdx.length);
                    for (Tuple3<Long, Integer, Long> v : values) {
                        Long index = v.f2;
                        if (index == -1L) {
                            switch (handleInvalidStrategy) {
                                case KEEP:
                                    index = defaultIndex.get(v.f1);
                                    index = index == null ? 0L : index;
                                    break;
                                case SKIP:
                                    index = null;
                                    break;
                                case ERROR:
                                    throw new RuntimeException("Unknown token.");
                            }
                        }

                        int col = v.f1;
                        r.setField(col, index);
                        id = v.f0;
                    }
                    out.collect(Tuple2.of(id, r));
                }
            })
            .withBroadcastSet(defaultIndex, "defaultIndex")
            .name("aggregate_result");

        DataSet<Row> output = dataWithId
            .join(aggregateResult)
            .where(0).equalTo(0)
            .with(new JoinFunction<Tuple2<Long, Row>, Tuple2<Long, Row>, Row>() {
                @Override
                public Row join(Tuple2<Long, Row> first, Tuple2<Long, Row> second) throws Exception {
                    return outputColsHelper.getResultRow(first.f1, second.f1);
                }
            });

        this.setOutput(output, outputColsHelper.getResultSchema());
        return this;
    }
}
