package com.tzg157.fitness.modals;

import java.io.IOException;

import org.apache.spark.ml.evaluation.RegressionEvaluator;
import org.apache.spark.ml.recommendation.ALS;
import org.apache.spark.ml.recommendation.ALSModel;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

public class ALSModal {
    public static void main(String[] args) throws IOException {
        // 创建 SparkSession
        System.setProperty("HADOOP_USER_NAME","root");
        SparkSession spark = SparkSession.builder()
                .master("local[4]")
                .appName("ALSModal")
                .enableHiveSupport()
                .config( "hive.metastore.uris", "thrift://localhost:9083" )
                .getOrCreate();
// 加载数据集并检查 schema
        Dataset<Row> data = spark.sql("SELECT * FROM default.user_rating");
        // 训练数据的路径
//        String filePath = "/opt/hive/rating.data";
        data.printSchema();
        /*// 读取并解析数据文件
        JavaRDD<Rating> ratingsRDD = spark.read().textFile(filePath).javaRDD().map(new Function<String, Rating>() {
            @Override
            public Rating call(String s) throws Exception {
                String[] fields = s.split("::");
                if (fields.length == 3) {
                    return new Rating(Integer.parseInt(fields[0]), Integer.parseInt(fields[1]), Double.parseDouble(fields[2]));
                }
                return null;
            }
        });*/

        // 创建 DataFrame
//        Dataset<Row> ratings = spark.createDataFrame(ratingsRDD, Rating.class);

        // 分割数据集为训练集和测试集
        Dataset<Row>[] splits = data.randomSplit(new double[] { 0.8, 0.2 });
        Dataset<Row> training = splits[0];
        Dataset<Row> test = splits[1];

        // 使用 ALS 算法训练模型
        ALS als = new ALS()
                .setMaxIter(5)
                .setRegParam(0.01)
                .setUserCol("uid")
                .setItemCol("cid")
                .setRatingCol("rating");

        ALSModel model = als.fit(training);

        // 设置冷启动策略，删除包含 NaN 值的数据
        model.setColdStartStrategy("drop");

        // 在测试数据上进行预测
        Dataset<Row> predictions = model.transform(test);

        // 用均方根误差评估模型
        RegressionEvaluator evaluator = new RegressionEvaluator()
                .setMetricName("rmse")
                .setLabelCol("rating")
                .setPredictionCol("prediction");

        double rmse = evaluator.evaluate(predictions);
        System.out.println("均方根误差 = " + rmse);
        // 保存模型到 HDFS
//        String modelPath = "hdfs://localhost:9000/user/tzg157/models/als_model";
        String modelPath = "/opt/hive/models/als_model";
        model.save(modelPath);
        System.out.println("模型已保存到 " + modelPath);
        // 给每个用户推荐 Top 10 个课程
        Dataset<Row> userRecs = model.recommendForAllUsers(10);

        // 给每个课程推荐 Top 10 个用户
        Dataset<Row> courseRecs = model.recommendForAllItems(10);

        // 给指定用户推荐 10 个课程
        Dataset<Row> users = data.select(als.getUserCol()).distinct().limit(3);
        Dataset<Row> userSubsetRecs = model.recommendForUserSubset(users, 10);

        // 给指定课程推荐 10 个用户
        Dataset<Row> course = data.select(als.getItemCol()).distinct().limit(3);
        Dataset<Row> courseSubSetRecs = model.recommendForItemSubset(course, 10);

        // 显示部分结果
        training.show(false);
        predictions.show(false);
        userRecs.show(false);
        courseRecs.show(false);
        userSubsetRecs.show(false);
        courseSubSetRecs.show(false);

        // 关闭 SparkSession
        spark.stop();
    }
}