package LibDL.recommender.general;

import LibDL.core.*;
import LibDL.core.optim.SGD;
import LibDL.recommender.data.utils.NumUtils;
import LibDL.models.BPR;
import LibDL.recommender.GeneralRecommender;
import LibDL.recommender.LibDLRecommender;
import net.librec.common.LibrecException;
import net.librec.math.algorithm.Randoms;
import net.librec.math.structure.SequentialAccessSparseMatrix;
import org.jetbrains.annotations.Nullable;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;

public class BPRRecommender extends GeneralRecommender {

    private int dim;

    @Override
    public void setup() throws LibrecException {
        super.setup();
        dim = conf.getInt("libdl.bpr.dim", 10);
        this.train_matrix = (SequentialAccessSparseMatrix) getDataModel().getTrainDataSet();
        this.test_matrix = (SequentialAccessSparseMatrix) getDataModel().getTestDataSet();
        this.num_users = train_matrix.rowSize();
        this.num_items = train_matrix.columnSize();
        this.module = new BPR(num_users, num_items, dim);
        this.optimizer = new SGD(this.module.parameters(), this.learning_rate);
    }
    public void addEvalPara(){
        int[] numDroppedItemsArray = new int[num_users]; // for AUCEvaluator
        int maxNumTestItemsByUser = 0; //for idcg
        for (int userIdx = 0; userIdx < num_users; ++userIdx) {
            numDroppedItemsArray[userIdx] = num_items - train_matrix.row(userIdx).getNumEntries();
            int numTestItemsByUser = test_matrix.row(userIdx).getNumEntries();
            maxNumTestItemsByUser = maxNumTestItemsByUser < numTestItemsByUser ? numTestItemsByUser : maxNumTestItemsByUser;
        }
        int[] itemPurchasedCount = new int[num_items]; // for NoveltyEvaluator
        for (int itemIdx = 0; itemIdx < num_items; ++itemIdx) {
            itemPurchasedCount[itemIdx] = train_matrix.column(itemIdx).getNumEntries()
                    + test_matrix.column(itemIdx).getNumEntries();
        }
        conf.setInts("rec.eval.auc.dropped.num", numDroppedItemsArray);//for aucEvaluator
        conf.setInt("rec.eval.key.test.max.num", maxNumTestItemsByUser); //for nDCGEvaluator
        conf.setInt("rec.eval.item.num", test_matrix.columnSize()); // for EntropyEvaluator
        conf.setInts("rec.eval.item.purchase.num", itemPurchasedCount); // for NoveltyEvaluator
    }
    public List<Float> predict(int user_id, @Nullable List<Integer> item_ids) {
        if (test_matrix == null)
            LOG.warn("Missing test sequences, cannot make predictions");
        this.module.eval();

        functional.no_grad();
        addEvalPara();
        if (item_ids == null)
            item_ids = NumUtils.arange(0, num_items, 1);

        Tensor items = functional.as_tensor(new StdVectorLong(item_ids), new TensorOptions(Dtype.INT64));

        Tensor auc = ((BPR) module).predict(user_id, items);

        return new ArrayList<>(auc.flatten().tolist_float());
    }

    public List<Float> predict(List<Integer> user_ids, @Nullable List<Integer> item_ids) {
        if (this.test_matrix == null)
            LOG.warn("Missing test sequences, cannot make predictions");
        this.module.eval();

        functional.no_grad();
        addEvalPara();
        if (item_ids == null)
            item_ids = NumUtils.arange(0, num_items, 1);

        Tensor items = functional.as_tensor(new StdVectorLong(item_ids), new TensorOptions(Dtype.INT64));
        Tensor users = functional.as_tensor(new StdVectorLong(user_ids), new TensorOptions(Dtype.INT64));

        Tensor auc = ((BPR) module).predict(users, items);

        return new ArrayList<>(auc.flatten().tolist_float());
    }

    @Override
    public void trainModel() {
        int start_epoch = 0;
        Map<Integer, Tensor> userItems = getUserItems(train_matrix);
        for (int epoch_num = start_epoch; epoch_num < this.epoch; epoch_num++) {
            long t1 = System.currentTimeMillis();
            this.module.train();
            loss = 0.0d;
            for (int batchCount = 0; batchCount < batch_size; batchCount++) {
                functional.enable_grad();
                int[] user_indexes = new int[sample_size], pos_item_indexes = new int[sample_size], neg_item_indexes = new int[sample_size];
                int user_index, pos_item_index, neg_item_index;
                for (int sampleCount = 0; sampleCount < sample_size; sampleCount++) {
                    while (true) {
                        user_index = Randoms.uniform(num_users);
                        Tensor positems = userItems.get(user_index);
                        if (positems.size(0) == 0 || positems.size(0) == num_items)
                            continue;
                        pos_item_index = (int) positems.get(Randoms.uniform(positems.size(0))).item().to_double();
                        do {
                            neg_item_index = Randoms.uniform(num_items);
                        } while (positems.contains(new Scalar(neg_item_index)));
                        break;
                    }
                    user_indexes[sampleCount] = user_index;
                    pos_item_indexes[sampleCount] = pos_item_index;
                    neg_item_indexes[sampleCount] = neg_item_index;
                }
                Tensor users = functional.as_tensor(new StdVectorLong(user_indexes), new TensorOptions(Dtype.INT64));
                Tensor pos_items = functional.as_tensor(new StdVectorLong(pos_item_indexes), new TensorOptions(Dtype.INT64));
                Tensor neg_items = functional.as_tensor(new StdVectorLong(neg_item_indexes), new TensorOptions(Dtype.INT64));
                this.optimizer.zero_grad();
                Tensor loss_tensor = ((BPR) module).forward(users, pos_items, neg_items).add(((BPR) module).l2_norm(users, pos_items, neg_items).mul(new Scalar(l2)));
                if (this.device == LibDLRecommender.USE_CUDA) {
                    loss_tensor = loss_tensor.cuda();
                } else {
                    loss_tensor = loss_tensor.cpu();
                }
                loss += loss_tensor.mean().item().to_double();
                loss_tensor.backward();
                this.optimizer.step();
                if (batchCount % 500 == 0)
                    System.out.print("batchCount: " + batchCount + "\r");
            }
            System.out.println();
            loss /= batch_size;
            long t2 = System.currentTimeMillis();
            String output_str = String.format("Epoch %d [%.1f s]\tloss=%.4f",
                    epoch_num + 1,
                    (float) (t2 - t1) / 1000,
                    loss
            );
            LOG.info(output_str);
            if (isConverged(epoch_num, verbose) && earlyStop) {
                break;
            }
            lastLoss = loss;
        }
    }
}
