package com.example;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import scala.Tuple2;

import java.text.SimpleDateFormat;
import java.util.*;
import java.util.stream.Collectors;

public class RepurchaseMetricsCalculator {

    public static void main(String[] args) {
        SparkConf conf = new SparkConf()
                .setAppName("UserRepurchaseMetrics")
                .setMaster("local[*]"); // 添加master配置，使用本地模式

        SparkSession spark = SparkSession.builder().config(conf).getOrCreate();

        // 处理CSV文件路径参数
        String filePath;
        if (args.length > 0) {
            filePath = args[0];
            System.out.println("使用指定的文件路径: " + filePath);
        } else {
            // 设置默认文件路径
            filePath = "data/user_purchases.csv";
            System.out.println("使用默认文件路径: " + filePath);
        }

        try {
            // 读取用户购买数据
            Dataset<Row> purchaseData = spark.read()
                    .option("header", "true")  // 确保第一行是表头
                    .option("inferSchema", "true")
                    .option("sep", ",")  // 明确指定分隔符（默认是逗号，若不同需修改）
                    .csv(filePath);

            // 注册临时表以便SQL查询
            purchaseData.createOrReplaceTempView("user_purchases");

            // 计算复购率指标
            calculateBasicRepurchaseRate(spark);
            calculateFrequencyBasedRepurchase(spark);
            calculateRFMScore(spark);
            calculateCategoryRepurchase(spark);

        } catch (Exception e) {
            System.err.println("处理数据时发生错误: " + e.getMessage());
            e.printStackTrace();
        } finally {
            spark.stop();
        }
    }

    /**
     * 计算基本复购率
     */
    private static void calculateBasicRepurchaseRate(SparkSession spark) {
        // 查询至少购买2次的用户数
        Dataset<Row> repeatBuyers = spark.sql(
                "SELECT COUNT(DISTINCT userId) as repeatBuyerCount " +
                        "FROM user_purchases " +
                        "GROUP BY userId " +
                        "HAVING COUNT(orderId) >= 2"
        );

        // 查询总用户数
        Dataset<Row> totalBuyers = spark.sql(
                "SELECT COUNT(DISTINCT userId) as totalBuyerCount " +
                        "FROM user_purchases"
        );

        // 计算基本复购率
        Row repeatBuyerRow = repeatBuyers.collectAsList().get(0);
        Row totalBuyerRow = totalBuyers.collectAsList().get(0);

        long repeatBuyerCount = repeatBuyerRow.getLong(0);
        long totalBuyerCount = totalBuyerRow.getLong(0);
        double repurchaseRate = (double) repeatBuyerCount / totalBuyerCount * 100;

        System.out.println("基本复购率 (至少购买2次的用户比例): " + String.format("%.2f%%", repurchaseRate));
    }

    /**
     * 基于购买频率计算复购情况
     */
    private static void calculateFrequencyBasedRepurchase(SparkSession spark) {
        // 计算每个用户的购买频率
        Dataset<Row> purchaseFrequency = spark.sql(
                "SELECT userId, " +
                        "       COUNT(orderId) as purchaseCount, " +
                        "       DATEDIFF(MAX(purchaseTime), MIN(purchaseTime)) as days, " +
                        "       COUNT(orderId) / (DATEDIFF(MAX(purchaseTime), MIN(purchaseTime)) + 1) * 30 as monthlyFrequency " +
                        "FROM user_purchases " +
                        "GROUP BY userId " +
                        "HAVING DATEDIFF(MAX(purchaseTime), MIN(purchaseTime)) > 0"
        );

        // 关键补充：将数据集注册为临时表
        purchaseFrequency.createOrReplaceTempView("purchaseFrequency");

        // 按频率分组统计用户数（此时可正常引用purchaseFrequency表）
        Dataset<Row> frequencyDistribution = spark.sql(  // 注意：使用spark.sql而非purchaseFrequency.sqlContext().sql
                "SELECT " +
                        "       CASE " +
                        "           WHEN monthlyFrequency < 1 THEN '低频用户 (<1次/月)' " +
                        "           WHEN monthlyFrequency < 2 THEN '中频用户 (1-2次/月)' " +
                        "           ELSE '高频用户 (>=2次/月)' " +
                        "       END as frequencyGroup, " +
                        "       COUNT(userId) as userCount " +
                        "FROM purchaseFrequency " +
                        "GROUP BY CASE " +
                        "           WHEN monthlyFrequency < 1 THEN '低频用户 (<1次/月)' " +
                        "           WHEN monthlyFrequency < 2 THEN '中频用户 (1-2次/月)' " +
                        "           ELSE '高频用户 (>=2次/月)' " +
                        "       END"
        );

        System.out.println("\n购买频率分布:");
        frequencyDistribution.show();
    }

    /**
     * 计算RFM分数
     */
    private static void calculateRFMScore(SparkSession spark) {
        // 计算RFM值
        Dataset<Row> rfmData = spark.sql(
                "SELECT userId, " +
                        "       DATEDIFF(CURRENT_DATE, MAX(purchaseTime)) as recency, " +
                        "       COUNT(orderId) as frequency, " +
                        "       AVG(amount) as monetary " +
                        "FROM user_purchases " +
                        "GROUP BY userId"
        );

        // 计算RFM分数 - 使用更灵活的类型转换
        JavaRDD<Row> rfmScores = rfmData.javaRDD().map(row -> {
            // 使用更安全的类型转换方式
            int userId = row.getAs("userId") instanceof Long ?
                    ((Long)row.getAs("userId")).intValue() :
                    (Integer)row.getAs("userId");

            int recency = row.getAs("recency") instanceof Long ?
                    ((Long)row.getAs("recency")).intValue() :
                    (Integer)row.getAs("recency");

            int frequency = row.getAs("frequency") instanceof Long ?
                    ((Long)row.getAs("frequency")).intValue() :
                    (Integer)row.getAs("frequency");

            double monetary = row.getAs("monetary") instanceof Double ?
                    (Double)row.getAs("monetary") :
                    ((Float)row.getAs("monetary")).doubleValue();

            // 计算R/F/M分数
            int rScore = calculateRScore(recency);
            int fScore = calculateFScore(frequency);
            int mScore = calculateMScore(monetary);
            int rfmScore = rScore * 100 + fScore * 10 + mScore;

            return RowFactory.create(userId, rScore, fScore, mScore, rfmScore);
        });

        // 定义RFM结果表的schema
        List<StructField> fields = Arrays.asList(
                DataTypes.createStructField("userId", DataTypes.IntegerType, false),
                DataTypes.createStructField("rScore", DataTypes.IntegerType, false),
                DataTypes.createStructField("fScore", DataTypes.IntegerType, false),
                DataTypes.createStructField("mScore", DataTypes.IntegerType, false),
                DataTypes.createStructField("rfmScore", DataTypes.IntegerType, false)
        );
        StructType schema = DataTypes.createStructType(fields);

        Dataset<Row> rfmScoreDF = spark.createDataFrame(rfmScores, schema);
        rfmScoreDF.createOrReplaceTempView("rfm_scores");

        // 分析RFM分布
        Dataset<Row> rfmDistribution = spark.sql(
                "SELECT " +
                        "       CASE " +
                        "           WHEN rfmScore >= 444 THEN '重要价值客户' " +
                        "           WHEN rfmScore >= 441 THEN '重要发展客户' " +
                        "           WHEN rfmScore >= 414 THEN '重要保持客户' " +
                        "           WHEN rfmScore >= 411 THEN '重要挽留客户' " +
                        "           WHEN rfmScore >= 144 THEN '一般价值客户' " +
                        "           WHEN rfmScore >= 141 THEN '一般发展客户' " +
                        "           WHEN rfmScore >= 114 THEN '一般保持客户' " +
                        "           ELSE '一般挽留客户' " +
                        "       END as customerType, " +
                        "       COUNT(userId) as userCount " +
                        "FROM rfm_scores " +
                        "GROUP BY CASE " +
                        "           WHEN rfmScore >= 444 THEN '重要价值客户' " +
                        "           WHEN rfmScore >= 441 THEN '重要发展客户' " +
                        "           WHEN rfmScore >= 414 THEN '重要保持客户' " +
                        "           WHEN rfmScore >= 411 THEN '重要挽留客户' " +
                        "           WHEN rfmScore >= 144 THEN '一般价值客户' " +
                        "           WHEN rfmScore >= 141 THEN '一般发展客户' " +
                        "           WHEN rfmScore >= 114 THEN '一般保持客户' " +
                        "           ELSE '一般挽留客户' " +
                        "       END"
        );
        System.out.println("\nRFM客户分类:");
        rfmDistribution.show(false);
    }

    /**
     * 计算R分数 (越小越好)
     */
    private static int calculateRScore(int recency) {
        if (recency <= 7) return 5;
        if (recency <= 15) return 4;
        if (recency <= 30) return 3;
        if (recency <= 60) return 2;
        return 1;
    }

    /**
     * 计算F分数 (越大越好)
     */
    private static int calculateFScore(int frequency) {
        if (frequency >= 10) return 5;
        if (frequency >= 7) return 4;
        if (frequency >= 4) return 3;
        if (frequency >= 2) return 2;
        return 1;
    }

    /**
     * 计算M分数 (越大越好)
     */
    private static int calculateMScore(double monetary) {
        if (monetary >= 1000) return 5;
        if (monetary >= 500) return 4;
        if (monetary >= 200) return 3;
        if (monetary >= 100) return 2;
        return 1;
    }

    /**
     * 计算不同类目的复购率
     */
    private static void calculateCategoryRepurchase(SparkSession spark) {
        // 假设有一个category字段
        Dataset<Row> categoryData = spark.sql(
                "SELECT category, userId, COUNT(DISTINCT orderId) as purchaseCount " +
                        "FROM user_purchases " +
                        "GROUP BY category, userId"
        );

        categoryData.createOrReplaceTempView("category_purchases");

        // 计算每个类目的复购率
        Dataset<Row> categoryRepurchase = spark.sql(
                "SELECT category, " +
                        "       COUNT(CASE WHEN purchaseCount >= 2 THEN userId END) as repeatBuyers, " +
                        "       COUNT(userId) as totalBuyers, " +
                        "       COUNT(CASE WHEN purchaseCount >= 2 THEN userId END) / COUNT(userId) * 100 as repurchaseRate " +
                        "FROM category_purchases " +
                        "GROUP BY category " +
                        "ORDER BY repurchaseRate DESC"
        );

        System.out.println("\n各类目复购率:");
        categoryRepurchase.show(false);
    }
}