import org.apache.spark.sql.*;
import org.apache.spark.sql.expressions.Window;
import org.apache.spark.sql.expressions.WindowSpec;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.ml.recommendation.ALS;
import org.apache.spark.ml.recommendation.ALSModel;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;

import java.util.Properties;

import static org.apache.spark.sql.functions.*;

public class StudentSparkAnalysisJava {

    private static final String HDFS_INPUT_PATH = "hdfs://192.168.40.43:8020/home/flume/term6Program_hdfs_landing/*/*.txt";
    private static final String MYSQL_HOST = "192.168.40.43";
    private static final String MYSQL_PORT = "3306";
    private static final String MYSQL_DB_NAME = "program";
    private static final String MYSQL_USER = "root";
    private static final String MYSQL_PASSWORD = "Root123!";
    private static final String MYSQL_DRIVER = "com.mysql.cj.jdbc.Driver";
    private static final String MYSQL_SPARK_URL = String.format(
            "jdbc:mysql://%s:%s/%s?rewriteBatchedStatements=true&allowPublicKeyRetrieval=true&serverTimezone=UTC",
            MYSQL_HOST, MYSQL_PORT, MYSQL_DB_NAME);
    private static final String MYSQL_SETUP_URL_NO_DB = String.format(
            "jdbc:mysql://%s:%s/?allowPublicKeyRetrieval=true&serverTimezone=UTC",
            MYSQL_HOST, MYSQL_PORT);
    public static void main(String[] args) {
        System.out.println("开始 Spark 学生数据分析作业 (Java版本)...");

        try {
            setupDatabase();
            System.out.println("数据库和表结构初始化/重建完成。");
        } catch (SQLException e) {
            System.err.println("数据库初始化失败: " + e.getMessage());
            e.printStackTrace();
            return;
        }

        System.out.println("正在初始化 SparkSession...");
        SparkSession spark = SparkSession.builder()
                .appName("StudentDataProcessingFromHDFS_to_MySQL_Java")
                .master("local[*]")
                .config("spark.driver.host", "localhost")
                .getOrCreate();
        System.out.println("SparkSession 初始化完成. Spark 版本: " + spark.version());
        spark.sparkContext().setLogLevel("WARN");

        System.out.println("正在从 HDFS 路径读取数据: " + HDFS_INPUT_PATH);
        Dataset<Row> rawTextDf;
        try {
            rawTextDf = spark.read().text(HDFS_INPUT_PATH);
            System.out.println("成功从 HDFS 发起读取操作。");
        } catch (Exception e) {
            System.err.println("从 HDFS 读取数据时出错: " + e.getMessage());
            e.printStackTrace();
            spark.stop();
            return;
        }

        if (rawTextDf.isEmpty()) {
            System.out.println("HDFS 路径 " + HDFS_INPUT_PATH + " 中没有找到数据。作业退出。");
            spark.stop();
            return;
        }
        System.out.println("从 HDFS 读取到的原始数据行数: " + rawTextDf.count());
        rawTextDf.show(5, false);

        System.out.println("正在解析数据...");
        Column[] splitCols = new Column[8];
        for (int i = 0; i < 8; i++) {
            splitCols[i] = split(col("value"), "\t").getItem(i);
        }

        Dataset<Row> parsedDf = rawTextDf.select(
                trim(splitCols[0]).as("class_id"),
                trim(splitCols[1]).as("name"),
                trim(splitCols[2]).cast(DataTypes.IntegerType).as("gender"),
                to_date(trim(splitCols[3]), "yyyy-MM-dd").as("dob"),
                trim(splitCols[4]).as("student_id"),
                trim(splitCols[5]).cast(DataTypes.IntegerType).as("semester"),
                trim(splitCols[6]).cast(DataTypes.DoubleType).as("gpa"),
                trim(splitCols[7]).as("status")
        ).filter(
                col("student_id").isNotNull()
                        .and(col("student_id").notEqual(""))
                        .and(col("gpa").isNotNull())
                        .and(col("semester").isNotNull())
        );
        parsedDf.cache();

        System.out.println("解析后的数据 Schema 和样本:");
        parsedDf.printSchema();
        parsedDf.show(5, false);
        System.out.println("parsedDf 处理得到的行数: " + parsedDf.count());

        if (parsedDf.isEmpty()) {
            System.out.println("解析数据后没有有效记录。作业退出。");
            spark.stop();
            return;
        }

        System.out.println("正在计算学生最新信息 (student_info)...");
        WindowSpec windowSpecLatest = Window.partitionBy("student_id").orderBy(col("semester").desc());
        Dataset<Row> studentInfoDf = parsedDf.withColumn("rn", row_number().over(windowSpecLatest))
                .filter(col("rn").equalTo(1))
                .select(
                        col("student_id"), col("class_id"), col("name"), col("gender"), col("dob"),
                        col("semester").as("latest_semester"),
                        col("gpa").as("latest_gpa"), col("status")
                ).cache();

        System.out.println("student_info_df 处理得到的行数: " + studentInfoDf.count());
        if (!studentInfoDf.isEmpty()) {
            System.out.println("最新的学生信息 (student_info_df) 样本:");
            studentInfoDf.show(5, false);
            writeToMySQL(studentInfoDf, "student_info");
        } else {
            System.out.println("student_info_df 为空，跳过写入 MySQL。");
        }

        System.out.println("正在进行协同过滤推荐 (cf_recommendations)...");

        System.out.println("正在为学生生成从0开始的连续整数ID...");
        // 生成映射表：学号 → 整数ID
        WindowSpec studentIdWindow = Window.orderBy(col("student_id"));
        Dataset<Row> uniqueStudentsWithIntIdDf = parsedDf.select("student_id").distinct()
                .withColumn("user_id_int", row_number().over(studentIdWindow).minus(lit(1)))
                .select(
                        col("student_id"),
                        col("user_id_int").cast(DataTypes.IntegerType).as("user_id_int")
                );
        uniqueStudentsWithIntIdDf.cache();
        System.out.println("生成的学生整数ID映射表示例:");
        uniqueStudentsWithIntIdDf.show(5, false);
        uniqueStudentsWithIntIdDf.printSchema();

        Dataset<Row> uMap1Prep = uniqueStudentsWithIntIdDf
                .withColumnRenamed("student_id", "u1_original_sid")
                .withColumnRenamed("user_id_int", "u1_user_id_int")
                .as("u_map1");

        Dataset<Row> uMap2Prep = uniqueStudentsWithIntIdDf
                .withColumnRenamed("student_id", "u2_original_sid")
                .withColumnRenamed("user_id_int", "u2_user_id_int")
                .as("u_map2");

        Dataset<Row> alsUserUserInputDf = parsedDf
                .join(uMap1Prep, parsedDf.col("student_id").equalTo(uMap1Prep.col("u1_original_sid")))
                .join(uMap2Prep, parsedDf.col("student_id").equalTo(uMap2Prep.col("u2_original_sid")))
                .select(
                        uMap1Prep.col("u1_user_id_int").as("user_A"),
                        uMap2Prep.col("u2_user_id_int").as("user_B_as_item"),
                        col("gpa")
                );

        if (!alsUserUserInputDf.isEmpty()) {
            System.out.println("ALS user-user 输入数据行数: " + alsUserUserInputDf.count());
            alsUserUserInputDf.show(5,false);
            alsUserUserInputDf.printSchema();

            ALS als = new ALS()
                    .setUserCol("user_A")
                    .setItemCol("user_B_as_item")
                    .setRatingCol("gpa")
                    .setColdStartStrategy("drop")
                    .setRank(10)
                    .setMaxIter(10)
                    .setRegParam(0.1)
                    .setNonnegative(true);

            System.out.println("正在训练 ALS user-user 模型...");
            try {
                ALSModel modelUserUser = als.fit(alsUserUserInputDf);
                System.out.println("ALS user-user 模型训练完成。");

                Dataset<Row> userRecommendations = modelUserUser.recommendForAllUsers(6 + 1);

                Dataset<Row> cfResultsExploded = userRecommendations.select(
                        col("user_A").as("src_user_id_int"),
                        explode(col("recommendations")).as("rec_struct")
                ).select(
                        col("src_user_id_int"),
                        col("rec_struct.user_B_as_item").as("rec_user_id_int"),
                        col("rec_struct.rating").as("score")
                ).filter(col("src_user_id_int").notEqual(col("rec_user_id_int")));

                // 推荐结果中的整数ID还原为学号
                Dataset<Row> cfFinalDf = cfResultsExploded
                        .join(uniqueStudentsWithIntIdDf.as("u1_map_back"), expr("src_user_id_int = u1_map_back.user_id_int"))
                        .join(uniqueStudentsWithIntIdDf.as("u2_map_back"), expr("rec_user_id_int = u2_map_back.user_id_int"))
                        .select(
                                col("u1_map_back.student_id").as("user_student_id"),
                                col("u2_map_back.student_id").as("recommended_student_id"),
                                col("score")
                        );

                WindowSpec windowSpecRec = Window.partitionBy("user_student_id").orderBy(col("score").desc());
                Dataset<Row> cfResultsRankedDf = cfFinalDf.withColumn("rank", dense_rank().over(windowSpecRec))
                        .filter(col("rank").leq(6));

                if (!cfResultsRankedDf.isEmpty()) {
                    System.out.println("协同过滤推荐结果 (cf_results_ranked_df) 样本:");
                    cfResultsRankedDf.show(12, false);
                    writeToMySQL(cfResultsRankedDf, "cf_recommendations");
                } else {
                    System.out.println("没有生成 CF 推荐结果。");
                }
            } catch (Exception e_als) {
                System.err.println("ALS 模型训练或推荐过程中出错: " + e_als.getMessage());
                e_als.printStackTrace();
            }
        } else {
            System.out.println("ALS user-user 输入数据为空，跳过 CF 推荐。");
        }

        System.out.println("正在计算全局 Top 3 学生 (top_students)...");
        if (!studentInfoDf.isEmpty()) {
            WindowSpec windowSpecGpaRank = Window.orderBy(col("latest_gpa").desc(), col("student_id").asc());
            Dataset<Row> topStudentsDf = studentInfoDf
                    .withColumn("rank", dense_rank().over(windowSpecGpaRank))
                    .filter(col("rank").leq(3))
                    .select("rank", "student_id", "name", "latest_gpa");

            System.out.println("全局 Top 3 学生 (top_students_df) 样本:");
            topStudentsDf.show(false);
            writeToMySQL(topStudentsDf, "top_students");
        } else {
            System.out.println("student_info_df 为空，跳过 Top 3 学生计算。");
        }

        System.out.println("正在解除缓存并停止 SparkSession...");
        parsedDf.unpersist();
        studentInfoDf.unpersist();
        uniqueStudentsWithIntIdDf.unpersist();

        spark.stop();
        System.out.println("Spark 作业 (HDFS to MySQL, Java版本) 完成。");
    }

    private static void setupDatabase() throws SQLException {
        System.out.println("正在初始化MySQL数据库 '" + MYSQL_DB_NAME + "'...");
        String dropDatabaseSQL = "DROP DATABASE IF EXISTS " + MYSQL_DB_NAME;
        String createDatabaseSQL = "CREATE DATABASE " + MYSQL_DB_NAME + " CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci";
        String createStudentInfoTableSQL = "CREATE TABLE student_info (" +
                "student_id VARCHAR(20) PRIMARY KEY COMMENT '学号'," +
                "class_id VARCHAR(50) COMMENT '班级号'," +
                "name VARCHAR(100) COMMENT '姓名'," +
                "gender INT COMMENT '性别 1男 0女'," +
                "dob DATE COMMENT '出生年月日'," +
                "latest_semester INT COMMENT '最新学期'," +
                "latest_gpa DOUBLE COMMENT '最新绩点'," +
                "status VARCHAR(5) COMMENT '在校状态 A在校 L毕业'" +
                ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='学生最新信息表'";
        String createCfRecommendationsTableSQL = "CREATE TABLE cf_recommendations (" +
                "user_student_id VARCHAR(20) COMMENT '被推荐的学生学号'," +
                "recommended_student_id VARCHAR(20) COMMENT '推荐给该学生的其他学生学号'," +
                "score DOUBLE COMMENT '推荐分数/相似度'," +
                "`rank` INT COMMENT '推荐排名'," +
                "PRIMARY KEY (user_student_id, recommended_student_id)" +
                ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='协同过滤推荐结果表'";
        String createTopStudentsTableSQL = "CREATE TABLE top_students (" +
                "`rank` INT PRIMARY KEY COMMENT '排名'," +
                "student_id VARCHAR(20) COMMENT '学号'," +
                "name VARCHAR(100) COMMENT '姓名'," +
                "latest_gpa DOUBLE COMMENT '最新绩点'" +
                ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='全局TopN学生表'";
        String createWebUsersTableSQL = "CREATE TABLE web_users (" +
                "id INT AUTO_INCREMENT PRIMARY KEY," +
                "username VARCHAR(80) UNIQUE NOT NULL COMMENT '登录用户名'," +
                "password_hash VARCHAR(255) NOT NULL COMMENT '哈希后的密码'," +
                "email VARCHAR(120) UNIQUE COMMENT '用户邮箱 (可选)'" +
                ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='Web应用用户表'";
        try {
            Class.forName(MYSQL_DRIVER);
        } catch (ClassNotFoundException e) {
            System.err.println("未找到MySQL JDBC驱动: " + e.getMessage());
            throw new SQLException("MySQL JDBC Driver not found", e);
        }
        Properties connectionPropsNoDb = new Properties();
        connectionPropsNoDb.put("user", MYSQL_USER);
        connectionPropsNoDb.put("password", MYSQL_PASSWORD);
        try (Connection conn = DriverManager.getConnection(MYSQL_SETUP_URL_NO_DB, connectionPropsNoDb);
             Statement stmt = conn.createStatement()) {
            System.out.println("执行: " + dropDatabaseSQL);
            stmt.executeUpdate(dropDatabaseSQL);
            System.out.println("执行: " + createDatabaseSQL);
            stmt.executeUpdate(createDatabaseSQL);
            System.out.println("数据库 '" + MYSQL_DB_NAME + "' 已创建/重建。");
        }
        Properties connectionPropsWithDb = new Properties();
        connectionPropsWithDb.put("user", MYSQL_USER);
        connectionPropsWithDb.put("password", MYSQL_PASSWORD);
        String mysqlJdbcUrlForTableCreation = String.format("jdbc:mysql://%s:%s/%s?allowPublicKeyRetrieval=true&serverTimezone=UTC", MYSQL_HOST, MYSQL_PORT, MYSQL_DB_NAME);

        try (Connection conn = DriverManager.getConnection(mysqlJdbcUrlForTableCreation, connectionPropsWithDb);
             Statement stmt = conn.createStatement()) {
            System.out.println("执行: " + createStudentInfoTableSQL);
            stmt.executeUpdate(createStudentInfoTableSQL);
            System.out.println("执行: " + createCfRecommendationsTableSQL);
            stmt.executeUpdate(createCfRecommendationsTableSQL);
            System.out.println("执行: " + createTopStudentsTableSQL);
            stmt.executeUpdate(createTopStudentsTableSQL);
            System.out.println("执行: " + createWebUsersTableSQL);
            stmt.executeUpdate(createWebUsersTableSQL);
            System.out.println("所有表已在数据库 '" + MYSQL_DB_NAME + "' 中创建。");
        }
    }

    private static void writeToMySQL(Dataset<Row> df, String tableName) {
        System.out.println("正在将数据写入 MySQL 表: " + tableName);
        try {
            Properties connectionProperties = new Properties();
            connectionProperties.put("user", MYSQL_USER);
            connectionProperties.put("password", MYSQL_PASSWORD);
            connectionProperties.put("driver", MYSQL_DRIVER);
            df.write()
                    .mode(SaveMode.Overwrite)
                    .jdbc(MYSQL_SPARK_URL, tableName, connectionProperties);
            System.out.println("成功将数据写入 MySQL 表: " + tableName);
        } catch (Exception e) {
            System.err.println("写入数据到 MySQL 表 '" + tableName + "' 时出错: " + e.getMessage());
            e.printStackTrace();
        }
    }
}