package com.kili.mirco.spark_als.ref.hanlder;

import com.google.common.collect.Maps;
import com.kili.mirco.spark_als.com.CommonConstants;
import com.kili.mirco.spark_als.exp.InitializedPathExeception;
import com.kili.mirco.spark_als.model.Ratings;
import com.kili.mirco.spark_als.ref.cmp.MatrixComparetor;
import com.kili.mirco.spark_als.spark.ParseRating;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.recommendation.ALS;
import org.apache.spark.mllib.recommendation.MatrixFactorizationModel;
import org.apache.spark.mllib.recommendation.Rating;
import org.apache.spark.rdd.RDD;
import org.jblas.DoubleMatrix;
import org.springframework.core.io.support.PropertiesLoaderUtils;
import scala.Tuple2;

import java.io.IOException;
import java.util.*;
import java.util.stream.Stream;

import static java.util.stream.Collectors.toList;

/**
 * @date 19-6-3
 * @auther jackliang
 * @description TODO
 */
@Data
@Slf4j
public class MFMRecommendProcessor implements RecommendProcessor {


    private static MatrixFactorizationModel model = null;

    private static JavaRDD<Rating> ratings = null;

    private static String SPARK_FILE_RESOURCE_PATH = null;

    static {
        try {

            SPARK_FILE_RESOURCE_PATH = Optional.
                    ofNullable(PropertiesLoaderUtils.loadAllProperties("path.properties")).
                    orElseThrow(() -> new InitializedPathExeception()).
                    getProperty(CommonConstants.LOADER_PATH);

        } catch (IOException e) {
            log.error("Properties loaded error , Please to check your configuration.");
            System.exit(-1);
        }
    }


    public MFMRecommendProcessor() {
        this(new JavaSparkContext(new SparkConf().
                setAppName("als_model").
                setMaster(CommonConstants.SPARK_MASTER_PATH)));
    }


    public MFMRecommendProcessor(JavaSparkContext sparkContext) {
        JavaRDD<String> lines = sparkContext.textFile(SPARK_FILE_RESOURCE_PATH);
        ratings = lines.map(new ParseRating());
        this.doBasicValidate(ratings);
        MatrixFactorizationModel modelx =
                ALS.train(
                        ratings.rdd(),
                        CommonConstants.RANK,
                        CommonConstants.ITERATIONS,
                        CommonConstants.LAMBDA,
                        CommonConstants.BLOCKS);
        model = modelx;

    }

    /**
     * Calculate a list of similar products corresponding to a product
     *
     * @param itemId
     * @return
     */
    public List<Ratings> refiningSimilarityItemCF(final long itemId) {

        JavaRDD<Tuple2<Object, double[]>> modelRdd = model.productFeatures().toJavaRDD();
        JavaPairRDD<Object, double[]> pairRDD = JavaPairRDD.fromJavaRDD(modelRdd);

        List<double[]> list = pairRDD.lookup(itemId);
        double[] itemFactors = list.iterator().next();

        DoubleMatrix itemVector = new DoubleMatrix(itemFactors);

        JavaPairRDD<Object, Double> tuple2 = pairRDD.mapToPair(v1 -> {
            DoubleMatrix factorVector = new DoubleMatrix(v1._2);
            return new Tuple2<>(v1._1, cosineSimilarity(factorVector, itemVector));
        });

        List<Tuple2<Object, Double>> tuple3 = tuple2.
                cache().
                top(10, MatrixComparetor.MC_CMP);

        return tuple3.
                stream().
                map(var -> new Ratings(0, (int) var._1(), var._2())).
                collect(toList());
    }


    //Recommend TOP N products for each user
    public Map<Integer, List<Ratings>> recommendUsersForProducts(final int num) {
        RDD<Tuple2<Object, Rating[]>> varRDD = model.recommendUsersForProducts(num);
        List<Tuple2<Object, Rating[]>> var1 = varRDD.toJavaRDD().cache().collect();
        Map<Integer, List<Ratings>> var = this.extractTupleTransformMap(var1);
        return var;
    }


    //Recommend TOP N items for specified users
    public List<Ratings> recommendProducts(final int userId, final int num) {
        return Arrays.stream(model.recommendProducts(userId, num)).map(var1 -> new Ratings(var1.user(), var1.product(), var1.rating())).collect(toList());
    }


    //Recommend the most likely user set N for a given item
    public List<Ratings> recommendUsers(final int productId, final int num) {
        return Arrays.stream(model.recommendUsers(productId, num)).map(var1 -> new Ratings(var1.user(), var1.product(), var1.rating())).collect(toList());
    }


    //Recommend the most interested collection of TOP N individuals for all products
    public Map<Integer, List<Ratings>> recommendProductsForUsers(int num) {
        RDD<Tuple2<Object, Rating[]>> varRDD = model.recommendProductsForUsers(num);
        List<Tuple2<Object, Rating[]>> var1 = varRDD.toJavaRDD().cache().collect();
        Map<Integer, List<Ratings>> var = this.extractTupleTransformMap(var1);
        return var;

    }

    private static Double cosineSimilarity(DoubleMatrix t_1, DoubleMatrix t_2) {
        return t_1.dot(t_2) / (t_1.norm2() * t_2.norm2());
    }

    private Map<Integer, List<Ratings>> extractTupleTransformMap(List<Tuple2<Object, Rating[]>> var1) {

        Map<Integer, List<Ratings>> var = Maps.newHashMapWithExpectedSize(var1.size());

        var1.parallelStream().forEach(var2 -> {

            int _1 = (int) var2._1();
            Rating[] _2 = var2._2();

            List<Ratings> _4 = var.computeIfAbsent(_1, k -> new LinkedList<>());

            _4.addAll(Stream.of(_2).
                    map(var3 ->
                            new Ratings(var3.user(), var3.product(), var3.rating())).
                    collect(toList()));

        });

        return var;
    }

    @Override
    public void doValidate(JavaRDD RDD) {
        log.trace("Ignoring for Recently");
    }
}