package com.xxrl.shop.service;

import com.xxrl.shop.domain.ProductSimilar;
import com.xxrl.shop.domain.UserRating;
import com.xxrl.shop.repository.ProductSimilarRepository;
import org.apache.commons.io.FileUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.evaluation.RegressionMetrics;
import org.apache.spark.mllib.recommendation.ALS;
import org.apache.spark.mllib.recommendation.MatrixFactorizationModel;
import org.apache.spark.mllib.recommendation.Rating;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import scala.Tuple2;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

/**
 * @author mis
 */
@Service
public class RecommendService {

    private final Logger logger = LoggerFactory.getLogger(RecommendService.class);

    private final ProductSimilarRepository productSimilarRepository;

    private final String dataPath;

    private final String modelPath;

    private final JavaSparkContext sparkContext;

    private MatrixFactorizationModel model;

    public RecommendService(@Value("${my.spark.master}") String master,
                            @Value("${my.spark.dataPath}") String dataPath,
                            @Value("${my.spark.modelPath}") String modelPath,
                            @Value("${my.spark.appName}") String appName,
                            @Value("${my.env}") String env,
                            @Value("${my.firstStart}") Boolean firstStart,
                            ProductSimilarRepository productSimilarRepository) {
        this.productSimilarRepository = productSimilarRepository;
        if ("dev".equals(env)) {
            // 本机环境 设置hadoop
//            System.setProperty("hadoop.home.dir","D:\\Program_Files\\hadoop-2.9.2");
        }

        this.dataPath = dataPath;
        this.modelPath = modelPath;
        SparkConf sparkConf = new SparkConf()
                .setAppName(appName)
                .setMaster(master)
                .set("spark.driver.host", "localhost");
        sparkContext = new JavaSparkContext(sparkConf);
        // 首次加载会用初始数据训练模型
        // 否侧会加载模型
        if (firstStart) {
//            initData();
            trainModel("init_ratings");
        } else {
            model = MatrixFactorizationModel.load(sparkContext.sc(), modelPath);
        }
    }

    private JavaRDD<Rating> loadData(String path) {
        JavaRDD<String> rawData = sparkContext.textFile(dataPath + "/" + path);
        JavaRDD<String[]> rawRatings = rawData.map(
                x -> Arrays.asList(x.split(",")).subList(0, 3).toArray(new String[]{})
        );
        return rawRatings.map(
                x -> new Rating(Integer.parseInt(x[0]), Integer.parseInt(x[1]), Double.parseDouble(x[2]))
        );
    }

    public JavaRDD<Rating> trainModel(String path) {
        JavaRDD<Rating> ratings = loadData(path);

        model = ALS.train(ratings.rdd(), 5, 1, 0.01);

        logger.info("training finished for {}, with {} user features , {} product features",
                path,
                model.userFeatures().count(),
                model.productFeatures().count());

        try {
            // 删除老模型
            FileUtils.deleteDirectory(new File(modelPath));
        } catch (IOException e) {
            logger.error("delete model file failed for {}",e.getMessage());
        }
        model.save(sparkContext.sc(), modelPath);

        logger.info("training model saving at {}",modelPath);

        return ratings;
    }

    public void evaluate(String path) {

        JavaRDD<Rating> ratings = loadData(path);

        JavaRDD<Tuple2<Object, Object>> userProducts = ratings.map(r -> new Tuple2<>(r.user(), r.product()));

        JavaPairRDD<Tuple2<Integer, Integer>, Object> predictions = JavaPairRDD.fromJavaRDD(
                model.predict(JavaRDD.toRDD(userProducts)).toJavaRDD().map(r ->
                        new Tuple2<>(new Tuple2<>(r.user(), r.product()), r.rating())));
        JavaRDD<Tuple2<Object, Object>> ratesAndPreds =
                JavaPairRDD.fromJavaRDD(ratings.map(r ->
                        new Tuple2<Tuple2<Integer, Integer>, Object>(
                                new Tuple2<>(r.user(), r.product()),
                                r.rating())
                )).join(predictions).values();

        RegressionMetrics regressionMetrics = new RegressionMetrics(ratesAndPreds.rdd());

        logger.info("training evaluation: rootMeanSquaredError = {} R-squared = {}",
                regressionMetrics.rootMeanSquaredError(),
                regressionMetrics.r2());

    }

    /**
     * 推荐类似商品
     * @param productId 用户id
     * @param size 商品上限
     * @return 商品id列表
     *
     */
    public List<Long> recommendForProduct(Integer productId, int size) {
        ProductSimilar productSimilar = productSimilarRepository.findTopByProductId(productId);
        List<Long> res = new ArrayList<>();
        if (productSimilar == null) {
            return res;
        }
        String[] split = productSimilar.getSimilarIds().split(",");
        size = Math.min(size, split.length);
        for (int i = 0; i < size; ++i) {
            res.add(Long.parseLong(split[i]));
        }
        return res;
    }

    /**
     * 为用户推荐商品
     * @param userId 用户id
     * @param size 商品上限
     * @return 商品id列表
     */
    public List<Long> recommendForUser(Integer userId, int size) {
        List<Long> productList = new ArrayList<>();
        for (Rating rating : model.recommendProducts(userId, size)) {
            productList.add((long) rating.product());
        }
        return productList;
    }

    /**
     * @deprecated
     * 将 init_ratings.csv 读取成 分块的 init_rating
     * 仅为测试使用
     */
    @Deprecated
    public void initData() {
        String data = dataPath+"/init_ratings.csv";
        String path = dataPath+"/init_ratings";
        JavaRDD<String> stringJavaRdd = sparkContext.textFile(data);
        stringJavaRdd.saveAsTextFile(path);
    }

    public void generateNewData(String newFileName, String data, List<UserRating> all) {
        data = dataPath + "/" + data  ;
        newFileName = dataPath + "/" + newFileName ;
        try {
            FileUtils.deleteDirectory(new File(newFileName));
        } catch (IOException e) {
            logger.error("delete old data failed for {}",e.getMessage());
        }
        JavaRDD<String> rawData = sparkContext.textFile(data);
        // 抽取原数据20%的数据
        JavaRDD<String> sample = rawData.sample(false, 0.2);
        JavaRDD<UserRating> parallelize = sparkContext.parallelize(all);
        JavaRDD<String> map = parallelize
                .map(x -> x.getUserId() + "," + x.getProductId() + "," + x.getRating().toString())
                .union(sample);
        // 保存
        map.saveAsTextFile(newFileName);
    }
}
