package com.bbmall.ads;

import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.functions;
import org.apache.spark.sql.expressions.Window;
import org.apache.spark.sql.expressions.WindowSpec;
import static org.apache.spark.sql.functions.*;
import java.util.Arrays;

public class ADSMemberProcessor {

    private SparkSession spark;

    public ADSMemberProcessor(SparkSession spark) {
        this.spark = spark;
    }

    /**
     * 处理会员核心指标表
     */
    public void processMemberCoreMetrics(String dt) {
        Dataset<Row> basicStats = spark.table("dws.dws_member_basic_stats")
                .filter(col("dt").equalTo(dt));

        Dataset<Row> monthlyTrend = spark.table("dws.dws_member_monthly_trend")
                .orderBy(col("stat_year").desc(), col("stat_month").desc())
                .limit(1);

        Dataset<Row> coreMetrics = basicStats
                .crossJoin(monthlyTrend.select("new_member_conversion_rate"))
                .select(
                        col("stat_date"),
                        col("total_members"),
                        col("active_members"),
                        col("activity_rate"),
                        col("avg_consumption"),
                        col("repurchase_rate"),
                        col("new_member_conversion_rate"),
                        lit(0.0).as("total_members_change"),
                        lit(0.0).as("active_members_change"),
                        lit(0.0).as("avg_consumption_change"),
                        lit(0.0).as("repurchase_rate_change"),
                        current_timestamp().as("create_time")
                );

        coreMetrics.write()
                .mode("overwrite")
                .insertInto("ads.ads_member_core_metrics");
    }

    /**
     * 处理会员等级分布展示表
     */
    public void processMemberLevelDisplay(String dt) {
        Dataset<Row> levelDist = spark.table("dws.dws_member_level_dist")
                .filter(col("dt").equalTo(dt));

        long totalMembers = levelDist.agg(sum("member_count")).first().getLong(0);

        Dataset<Row> levelDisplay = levelDist
                .withColumn("member_percentage",
                        round(col("member_count").multiply(100).divide(totalMembers), 2))
                .select(
                        col("stat_date"),
                        col("member_level"),
                        col("member_count"),
                        col("member_percentage"),
                        col("total_consumption"),
                        col("avg_consumption"),
                        current_timestamp().as("create_time")
                );

        levelDisplay.write()
                .mode("overwrite")
                .insertInto("ads.ads_member_level_display");
    }

    /**
     * 处理会员月度趋势展示表
     */
    public void processMemberTrendDisplay(String dt) {
        int year = Integer.parseInt(dt.substring(0, 4));
        int month = Integer.parseInt(dt.substring(5, 7));

        Dataset<Row> monthlyTrend = spark.table("dws.dws_member_monthly_trend")
                .filter(col("stat_year").equalTo(year).and(col("stat_month").equalTo(month)));

        int prevMonth = month - 1;
        int prevYear = year;
        if (prevMonth == 0) {
            prevMonth = 12;
            prevYear = year - 1;
        }

        Dataset<Row> prevMonthTrend = spark.table("dws.dws_member_monthly_trend")
                .filter(col("stat_year").equalTo(prevYear).and(col("stat_month").equalTo(prevMonth)));

        double currentConsumption = monthlyTrend.select("consumption_amount").first().getDouble(0);
        double prevConsumption = prevMonthTrend.select("consumption_amount").first().getDouble(0);
        double consumptionChange = prevConsumption > 0 ?
                (currentConsumption - prevConsumption) * 100 / prevConsumption : 0.0;

        long currentNewMembers = monthlyTrend.select("new_members").first().getLong(0);
        long prevNewMembers = prevMonthTrend.select("new_members").first().getLong(0);
        double newMembersChange = prevNewMembers > 0 ?
                (currentNewMembers - prevNewMembers) * 100.0 / prevNewMembers : 0.0;

        Dataset<Row> trendDisplay = monthlyTrend
                .withColumn("period_type", lit("MONTH"))
                .withColumn("avg_order_amount",
                        round(col("consumption_amount").divide(col("consumption_orders")), 2))
                .withColumn("consumption_amount_change", lit(consumptionChange))
                .withColumn("new_members_change", lit(newMembersChange))
                .select(
                        col("stat_year"), col("stat_month"), col("stat_year_month"),
                        col("period_type"), col("consumption_amount"), col("consumption_orders"),
                        col("avg_order_amount"), col("total_members"), col("new_members"),
                        col("active_members"), col("new_member_conversion_rate"),
                        col("consumption_amount_change"), col("new_members_change"),
                        current_timestamp().as("create_time")
                );

        trendDisplay.write()
                .mode("overwrite")
                .insertInto("ads.ads_member_trend_display");
    }

    /**
     * 处理高价值会员TOP5展示表 - 使用最基础的join语法
     */
    public void processMemberTop5Display(String dt) {
        Dataset<Row> top5 = spark.table("dws.dws_member_top5")
                .filter(col("dt").equalTo(dt));

        // 获取TOP5的会员ID列表
        Dataset<Row> top5MemberIds = top5.select("member_id");

        // 从会员维度表获取注册日期 - 使用简单的join
        Dataset<Row> memberInfo = spark.table("dwd.dwd_dim_member")
                .filter(col("dt").equalTo(dt))
                .join(top5MemberIds, "member_id")
                .select("member_id", "register_date");

        // 从销售事实表计算订单统计 - 使用简单的join
        Dataset<Row> memberOrderStats = spark.table("dwd.dwd_fact_sales")
                .filter(col("sale_date").leq(dt))
                .join(top5MemberIds, "member_id")
                .groupBy("member_id")
                .agg(
                        countDistinct("transaction_id").as("order_count"),
                        max("sale_date").as("last_order_date")
                );

        // 分步join，避免复杂语法
        Dataset<Row> step1 = top5.join(memberInfo, "member_id");

        // 使用最基本的join语法 - 列条件表达式
        Dataset<Row> top5Display = step1.join(
                        memberOrderStats,
                        step1.col("member_id").equalTo(memberOrderStats.col("member_id")),
                        "left_outer"
                )
                .drop(memberOrderStats.col("member_id")) // 删除重复的member_id列
                .withColumn("member_since_days",
                        datediff(lit(dt), col("register_date")))
                .select(
                        col("stat_date"),
                        col("rank_num"),
                        col("member_id"),
                        col("member_name"),
                        col("member_level"),
                        col("total_consumption"),
                        coalesce(col("order_count"), lit(0)).as("order_count"),
                        col("last_order_date"),
                        col("member_since_days"),
                        current_timestamp().as("create_time")
                );

        top5Display.write()
                .mode("overwrite")
                .insertInto("ads.ads_member_top5_display");
    }

    /**
     * 替代方案：完全避免使用Arrays.asList
     */
    public void processMemberTop5DisplayAlternative(String dt) {
        Dataset<Row> top5 = spark.table("dws.dws_member_top5")
                .filter(col("dt").equalTo(dt));

        // 收集会员ID到列表
        java.util.List<String> memberIdList = top5.select("member_id")
                .distinct()
                .as(org.apache.spark.sql.Encoders.STRING())
                .collectAsList();

        String[] memberIds = memberIdList.toArray(new String[0]);

        // 使用filter + join替代复杂的join语法
        Dataset<Row> memberInfo = spark.table("dwd.dwd_dim_member")
                .filter(col("dt").equalTo(dt))
                .filter(col("member_id").isin(memberIds))
                .select("member_id", "register_date");

        Dataset<Row> memberOrderStats = spark.table("dwd.dwd_fact_sales")
                .filter(col("sale_date").leq(dt))
                .filter(col("member_id").isin(memberIds))
                .groupBy("member_id")
                .agg(
                        countDistinct("transaction_id").as("order_count"),
                        max("sale_date").as("last_order_date")
                );

        // 使用最简单的join方式
        Dataset<Row> result1 = top5.join(memberInfo, "member_id");
        Dataset<Row> result2 = result1.join(
                memberOrderStats,
                result1.col("member_id").equalTo(memberOrderStats.col("member_id")),
                "left"
        );

        Dataset<Row> top5Display = result2
                .withColumn("member_since_days",
                        datediff(lit(dt), col("register_date")))
                .select(
                        result1.col("stat_date"),
                        result1.col("rank_num"),
                        result1.col("member_id"),
                        result1.col("member_name"),
                        result1.col("member_level"),
                        result1.col("total_consumption"),
                        coalesce(memberOrderStats.col("order_count"), lit(0)).as("order_count"),
                        memberOrderStats.col("last_order_date"),
                        col("member_since_days"),
                        current_timestamp().as("create_time")
                );

        top5Display.write()
                .mode("overwrite")
                .insertInto("ads.ads_member_top5_display");
    }

    /**
     * 处理会员偏好分析展示表
     */
    public void processMemberPreferenceDisplay(String dt) {
        Dataset<Row> preference = spark.table("dws.dws_member_preference")
                .filter(col("dt").equalTo(dt));

        WindowSpec preferenceWindow = Window
                .partitionBy("preference_type")
                .orderBy(col("consumption_amount").desc());

        Dataset<Row> preferenceDisplay = preference
                .withColumn("avg_consumption_per_member",
                        round(col("consumption_amount").divide(col("member_count")), 2))
                .withColumn("total_consumption_percentage", col("preference_rate"))
                .withColumn("preference_rank", row_number().over(preferenceWindow))
                .select(
                        col("stat_date"), col("preference_type"), col("preference_value"),
                        col("member_count"), col("consumption_amount"), col("order_count"),
                        col("preference_rate"), col("avg_consumption_per_member"),
                        col("total_consumption_percentage"), col("preference_rank"),
                        current_timestamp().as("create_time")
                );

        preferenceDisplay.write()
                .mode("overwrite")
                .insertInto("ads.ads_member_preference_display");
    }

    /**
     * 主处理方法 - 处理所有ADS表
     */
    public void processAllADSTables(String processDate) {
        System.out.println("开始处理ADS层数据，日期: " + processDate);

        processMemberCoreMetrics(processDate);
        System.out.println("✓ 会员核心指标表处理完成");

        processMemberLevelDisplay(processDate);
        System.out.println("✓ 会员等级分布展示表处理完成");

        processMemberTrendDisplay(processDate);
        System.out.println("✓ 会员月度趋势展示表处理完成");

        // 使用最基础的方法
        try {
            processMemberTop5Display(processDate);
            System.out.println("✓ 高价值会员TOP5展示表处理完成");
        } catch (Exception e) {
            System.out.println("方法1失败: " + e.getMessage());
            System.out.println("尝试替代方法...");
            processMemberTop5DisplayAlternative(processDate);
            System.out.println("✓ 高价值会员TOP5展示表处理完成（替代方法）");
        }

        processMemberPreferenceDisplay(processDate);
        System.out.println("✓ 会员偏好分析展示表处理完成");

        System.out.println("ADS层所有表处理完成!");
    }

    public static void main(String[] args) {
        SparkSession spark = SparkSession.builder()
                .appName("ADS Member Processing")
                .enableHiveSupport()
                .config("spark.sql.crossJoin.enabled", "true")
                .getOrCreate();

        spark.sparkContext().setLogLevel("WARN");

        String processDate = "2025-10-13";
        ADSMemberProcessor processor = new ADSMemberProcessor(spark);
        processor.processAllADSTables(processDate);

        spark.stop();
    }
}