package LibDL.models;

import LibDL.core.*;
import LibDL.core.nn.*;
import LibDL.core.nn.Module;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;


/**
 * A pytorch implementation of DeepFM for rates prediction problem.
 * ref: https://github.com/chenxijun1029/DeepFM_with_PyTorch
 *
 * A DeepFM network with RMSE loss for rates prediction problem.
 * There are two parts in the architecture of this network: fm part for low
 * order interactions of features and deep part for higher order. In this
 * network, we use bachnorm and dropout technology for all hidden layers,
 * and "Adam" method for optimazation.
 * You may find more details in this paper:
 * DeepFM: A Factorization-Machine based Neural Network for CTR Prediction,
 * Huifeng Guo, Ruiming Tang, Yunming Yey, Zhenguo Li, Xiuqiang He.
 */
public class DeepFM extends Module {
    int field_size, embedding_size, num_classes;
    int[] feature_sizes, hidden_dims;
    Tensor bias;
    DeepFM self;
    Dtype dtype;
    Device device;
    List<Embedding> fm_first_order_embeddings,
            fm_second_order_embeddings;
    List<Linear> linears;
    List<BatchNorm1d> batchNorms;
    List<Dropout> dropouts;

    /**
     * Initialize a new network
     *
     * @param feature_sizes  A list of integer giving the size of features for each field.
     * @param embedding_size An integer giving size of feature embedding.
     * @param hidden_dims    A list of integer giving the size of each hidden layer.
     * @param num_classes    An integer giving the number of classes to predict. For example,
     *                       someone may rate 1,2,3,4 or 5 stars to a film.
     * @param dropout
     */
    DeepFM(int[] feature_sizes,
           int embedding_size,
           int[] hidden_dims,
           int num_classes,
           double[] dropout) {
        self = this;
        self.field_size = feature_sizes.length;
        self.feature_sizes = feature_sizes;
        self.embedding_size = embedding_size;
        self.hidden_dims = hidden_dims;
        self.num_classes = num_classes;
        self.dtype = Dtype.INT64;
        self.bias = register_parameter("bias", functional.randn(1));
        self.fm_first_order_embeddings = new ArrayList<>();
        self.fm_second_order_embeddings = new ArrayList<>();
        for (int i : self.feature_sizes) {
            self.fm_first_order_embeddings.add(new Embedding(i, 1));
            self.fm_second_order_embeddings.add(new Embedding(i, self.embedding_size));
        }
        register_modules("fm_first_order_embeddings", fm_first_order_embeddings);
        register_modules("fm_second_order_embeddings", fm_second_order_embeddings);
        List<Integer> all_dims = new ArrayList<>(Arrays.asList(self.field_size * self.embedding_size));
        all_dims.addAll(Arrays.stream(self.hidden_dims).boxed().collect(Collectors.toList()));
        all_dims.add(self.num_classes);
        linears = new ArrayList<>();
        dropouts = new ArrayList<>();
        batchNorms = new ArrayList<>();
        for (int i = 0; i < hidden_dims.length; i++) {
            linears.add(new Linear(all_dims.get(i), all_dims.get(i + 1)));
            batchNorms.add(new BatchNorm1d(all_dims.get(i + 1)));
            dropouts.add(new Dropout(dropout[i]));
        }
        register_modules(new String[]{"linears", "batchNorms", "dropouts"},
                linears, batchNorms, dropouts);
    }

    /**
     * Forward process of network.
     *
     * @param Xi A tensor of input's index, shape of (N, field_size, 1)
     * @param Xv A tensor of input's value, shape of (N, field_size, 1)
     * @return predicted value
     */
    Tensor forward(Tensor Xi, Tensor Xv) {
        StdVectorTensor fm_first_order_emb_arr = new StdVectorTensor();
        for (int i = 0; i < fm_first_order_embeddings.size(); i++) {
            fm_first_order_emb_arr.add(functional.sum(fm_first_order_embeddings.get(i)
                    .forward(Xi.index_select(1, functional.tensor(i))), 1).t()
                    .mul(Xv.index_select(1, functional.tensor(i)).t()));
        }
        Tensor fm_first_order = functional.cat(fm_first_order_emb_arr, 1);
        StdVectorTensor fm_second_order_emb_arr = new StdVectorTensor();
        for (int i = 0; i < fm_second_order_embeddings.size(); i++) {
            fm_second_order_emb_arr.add(functional.sum(fm_second_order_embeddings.get(i)
                    .forward(Xi.index_select(1, functional.tensor(i))), 1).t()
                    .mul(Xv.index_select(1, functional.tensor(i)).t()));
        }
        Tensor fm_sum_second_order_emb = fm_second_order_emb_arr.get(0).clone();
        for (int i = 1; i < fm_second_order_emb_arr.size(); i++) {
            fm_sum_second_order_emb = fm_second_order_emb_arr.get(i);
        }
        Tensor fm_sum_second_order_emb_square = fm_sum_second_order_emb.mul(fm_sum_second_order_emb);
        List<Tensor> fm_second_order_emb_square = fm_second_order_emb_arr.stream().map(i -> i.mul(i)).collect(Collectors.toList());
        Tensor fm_second_order_emb_square_sum = fm_second_order_emb_square.get(0);
        for (int i = 1; i < fm_second_order_emb_square.size(); i++) {
            fm_second_order_emb_square_sum = fm_second_order_emb_square.get(i);
        }
        Tensor fm_second_order = (fm_sum_second_order_emb_square.sub(fm_second_order_emb_square_sum)).mul(new Scalar(0.5));

        //deep part
        Tensor deep_out = functional.cat(fm_second_order_emb_arr, 1);

        for (int i = 0; i < hidden_dims.length; i++) {
            deep_out = linears.get(i).forward(deep_out);
            deep_out = batchNorms.get(i).forward(deep_out);
            deep_out = dropouts.get(i).forward(deep_out);
        }

        return functional.sum(fm_first_order, 1).add(functional.sum(fm_second_order, 1)).add(functional.sum(deep_out, 1));
    }
}