package com.swsc.ai.tasks;

import com.swsc.ai.config.SparkSqlConf;
import com.swsc.ai.enums.TFEnum;
import com.swsc.ai.enums.TaskModeEnum;
import com.swsc.ai.taskfactory.Task;
import com.swsc.ai.util.HDFSUtil;
import com.swsc.ai.util.SparkUtil;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.sql.SQLException;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import java.util.Map;

public abstract class TemplateTask implements Task {

    @Override
    public void run(Map<String, String> configMap) throws SQLException{
        String projectName = configMap.getOrDefault("projectName", "test");
        String taskName = configMap.getOrDefault("taskName", "test");
        String subTaskName = configMap.getOrDefault("subTaskName", "test");
        String taskDate = configMap.getOrDefault("dt", LocalDate.now().format(DateTimeFormatter.ofPattern("yyyyMMdd")));

        String className = configMap.getOrDefault("className", "test");
        String mode = configMap.getOrDefault("mode", "test");
        String isTest = configMap.getOrDefault("isTest", TFEnum.FALSE.getName());

        String tableName = configMap.getOrDefault("tableName", "defaultTable");
        String partColumn = configMap.getOrDefault("partColumn", "defaultColumn");

        String outputPath = HDFSUtil.getOutHDFSPath(projectName, taskName, subTaskName, taskDate);
        if (TFEnum.TRUE.equals(TFEnum.fromTypeName(isTest))) {
            outputPath = HDFSUtil.getTestOutHDFSPath(className, subTaskName, taskDate);
        }
        SparkSession session = SparkUtil.getSession(isTest, className);
        if (TaskModeEnum.RDD.equals(TaskModeEnum.fromTypeName(mode))) {
            String inputPath = genInputPath(tableName);
            processData(session, outputPath, inputPath);
        } else if (TaskModeEnum.SQL.equals(TaskModeEnum.fromTypeName(mode))) {
            String condition = genWhereCondition(taskDate);
            String selectColumns = genSelectColumns();
            Dataset<Row> df = SparkSqlConf.getDataByPartition(session, tableName, partColumn, selectColumns, condition);
            df.createOrReplaceTempView(className);
            processData(session, outputPath, className, taskDate, projectName, taskName, subTaskName, isTest);
        }
    }

    //用于RDD模式下输入路径的确定；
    public abstract String genInputPath(String dataSource);

    //用于SQL模式下，直连mysql，定义数据查询的筛选需要的数据列。
    protected abstract String genSelectColumns();

    //用于SQL模式下，直连mysql，定义数据查询的筛选条件，一定要以“where”开头。
    public abstract String genWhereCondition(String taskDate);

    //数据计算逻辑定义，RDD或者SQL模式都支持
    public abstract void processData(SparkSession session, String outputPath, String... inputParams) throws SQLException;
}
