package com.yuaer.demo.springsparkjobhandlerdemo.runner;

import com.yuaer.demo.springsparkjobhandlerdemo.DecisionService;
import com.yuaer.demo.springsparkjobhandlerdemo.executor.DagExecutor;
import com.yuaer.demo.springsparkjobhandlerdemo.executor.GlobalContext;
import com.yuaer.demo.springsparkjobhandlerdemo.model.MktActivityDecision;
import lombok.extern.slf4j.Slf4j;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.ApplicationArguments;
import org.springframework.stereotype.Component;

import java.util.HashMap;
import java.util.Map;

@Slf4j
@Component
public class ActivityStandardRunner extends ScriptRunner{

    @Autowired
    private DecisionService decisionService;

    public ActivityStandardRunner() {
        super("ActivityStandard");
    }

    @Override
    public void runTask(ApplicationArguments args) throws IllegalAccessException {
        //准备活动相关数据
        String moveNo = args.getNonOptionArgs().get(1); //获取活动编号
        MktActivityDecision activityDecision = decisionService.getActivitiesToBeExecute(moveNo); //获取Dag节点数据
        log.info("ActivityStandardRunner moveNo:{}, nodes:{}", moveNo, activityDecision);

        // 初始化SparkSession
        SparkSession spark = SparkSession.builder()
                .appName("ActivityStandard_"+moveNo)
                .enableHiveSupport()
                .config("spark.sql.warehouse.dir", "/user/hive/warehouse") // 关键！//todo config的参数可以从nacos中获取
                .config("hive.metastore.uris", "thrift://namenode-host:9083") // 关键！
                .config("spark.sql.sources.partitionOverwriteMode", "dynamic") // 关键！
                .config("spark.sql.shuffle.partitions", "2")
                .config("spark.executor.memory", "2g")
                .config("spark.executor.cores", "2")
                .config("spark.driver.memory", "2g")
                .master("local[*]")
                .getOrCreate();


        // 全局配置 仅限参数和少量缓存数据
        Map<String, String> globalConfig = new HashMap<>();
        globalConfig.put("move.type", activityDecision.getType());

        GlobalContext context = new GlobalContext(spark, globalConfig);

        // 模拟节点定义
//        Node filter = new Node("n1", "filter", List.of(), Map.of("condition", "age > 30"));
//        Node agg = new Node("n2", "aggregate", List.of("n1"), Map.of("groupBy", "gender", "aggCol", "income"));
        // 注册初始输入结果（可扩展为缓存表）
        DagExecutor executor = new DagExecutor(context, activityDecision.getNodes());
        Map<String, Dataset<Row>> resultMap = executor.execute();
        //todo 可以先聚合所有Dataset 后再统一写入
        for (Dataset<Row> value : resultMap.values()) {
            value.write().saveAsTable("customer_reach_info");
        }
        resultMap.get("n2").show();
    }
}
