package avicit.bdp.dds.server.worker.task;

import avicit.bdp.common.utils.SpringApplicationContext;
import avicit.bdp.dds.dispatch.enums.TaskType;
import avicit.bdp.dds.dispatch.task.algorithm.AlgorithmParameters;
import avicit.bdp.core.util.json.JSONUtils;
import avicit.bdp.dds.common.utils.EnumUtils;
import avicit.bdp.dds.dao.entity.AlgoNodes;
import avicit.bdp.dds.dao.entity.TaskNodes;
import avicit.bdp.dds.server.entity.TaskExecutionContext;
import avicit.bdp.dds.server.worker.task.algorithm.AlgorithmPythonTask;
import avicit.bdp.dds.server.worker.task.algorithm.AlgorithmTask;
import avicit.bdp.dds.server.worker.task.collect.MetadataCollectTask;
import avicit.bdp.dds.server.worker.task.datax.DataxTask;
import avicit.bdp.dds.server.worker.task.datax.DataxIncrementTask;
import avicit.bdp.dds.server.worker.task.flink.FlinkTask;
import avicit.bdp.dds.server.worker.task.ft.FTTask;
import avicit.bdp.dds.server.worker.task.http.HttpTask;
import avicit.bdp.dds.server.worker.task.java.JavaTask;
import avicit.bdp.dds.server.worker.task.monitor.MonitorDirectoryTask;
import avicit.bdp.dds.server.worker.task.mr.MapReduceTask;
import avicit.bdp.dds.server.worker.task.processdure.ProcedureTask;
import avicit.bdp.dds.server.worker.task.python.PythonTask;
import avicit.bdp.dds.server.worker.task.shell.ShellTask;
import avicit.bdp.dds.server.worker.task.spark.SparkTask;
import avicit.bdp.dds.server.worker.task.sparkflow.SparkFlowTask;
import avicit.bdp.dds.server.worker.task.sql.SqlTask;
import avicit.bdp.dds.server.worker.task.sqoop.SqoopTask;
import avicit.bdp.dds.server.worker.task.standard.DataElementEvaluateTask;
import avicit.bdp.dds.server.worker.task.table.TableTask;
import avicit.bdp.dds.service.process.ProcessService;
import org.slf4j.Logger;

/** task manaster */
public class TaskManager {

  /**
   * create new task
   *
   * @param taskExecutionContext taskExecutionContext
   * @param logger logger
   * @return AbstractTask
   * @throws IllegalArgumentException illegal argument exception
   */
  public static AbstractTask newTask(TaskExecutionContext taskExecutionContext, Logger logger)
      throws IllegalArgumentException {
    // 处理自定义算法库里不同类型的算法包
    ProcessService processService = SpringApplicationContext.getBean(ProcessService.class);

    switch (EnumUtils.getEnum(TaskType.class, taskExecutionContext.getTaskType())) {
      case SHELL:
        return new ShellTask(taskExecutionContext, logger);
      case FTTASK:
        return new FTTask(taskExecutionContext, logger);
      case PROCEDURE:
        return new ProcedureTask(taskExecutionContext, logger);
      case SQL:
        return new SqlTask(taskExecutionContext, logger);
      case MR:
        return new MapReduceTask(taskExecutionContext, logger);
      case SPARK:
        return new SparkTask(taskExecutionContext, logger);
        // return new SparkOnYarnTask(taskExecutionContext, logger);
      case FLINK:
        return new FlinkTask(taskExecutionContext, logger);
      case PYTHON:
        return new PythonTask(taskExecutionContext, logger);
      case HTTP:
        return new HttpTask(taskExecutionContext, logger);
      case DATAX:
        return new DataxTask(taskExecutionContext, logger);
      case DATAXINCREMENT:
        return new DataxIncrementTask(taskExecutionContext, logger);
      case SQOOP:
        return new SqoopTask(taskExecutionContext, logger);
      case JAVA:
        return new JavaTask(taskExecutionContext, logger);
      case MODEL:
        return new TableTask(taskExecutionContext, logger);
      case MONITORDIRECTORY:
        return new MonitorDirectoryTask(taskExecutionContext, logger);
      case SPARKFLOW:
        return new SparkFlowTask(taskExecutionContext, logger);
      case METADATA_COLLECT:
        return new MetadataCollectTask(taskExecutionContext, logger);
      case DATA_ELEMENT_EVALUATE:
        return new DataElementEvaluateTask(taskExecutionContext, logger);
      case ALGORITHM:
        AlgorithmParameters algorithmParameters =
            JSONUtils.parseObject(taskExecutionContext.getTaskParams(), AlgorithmParameters.class);
        if (algorithmParameters != null) {
          Integer type = algorithmParameters.getType();
          String nodeId = algorithmParameters.getNodeId();

          // 算法库算法.
          Integer programType = 0;
          if (AlgorithmParameters.CUSTOM_ALGORITHM_LIBRARY.equals(type)) {
            AlgoNodes algoNodes = processService.getTaskAlgoNodesById(nodeId);
            programType = algoNodes.getProgramType();
          } else {
            TaskNodes taskNodes = processService.getTaskNodesById(nodeId);
            programType = taskNodes.getProgramType();
          }
          if (5 == programType) {
            // python执行节点
            return new AlgorithmPythonTask(taskExecutionContext, logger);
          } else { // spark 执行节点
            return new AlgorithmTask(taskExecutionContext, logger);
          }
        }
        return null;
      case SPARKSQL:
        // 处理自定义算法库里不同类型的算法包
        algorithmParameters =
            JSONUtils.parseObject(taskExecutionContext.getTaskParams(), AlgorithmParameters.class);
        if (algorithmParameters != null) {
          Integer type = algorithmParameters.getType();
          String nodeId = algorithmParameters.getNodeId();

          // 算法库算法.
          Integer programType = 0;
          if (AlgorithmParameters.CUSTOM_ALGORITHM_LIBRARY.equals(type)) {
            AlgoNodes algoNodes = processService.getTaskAlgoNodesById(nodeId);
            programType = algoNodes.getProgramType();
          }
          if (5 == programType) {
            // python执行节点
            return new AlgorithmPythonTask(taskExecutionContext, logger);
          } else { // spark 执行节点
            return new AlgorithmTask(taskExecutionContext, logger);
          }
        }
      default:
        logger.error("un support task type: {}", taskExecutionContext.getTaskType());
        throw new IllegalArgumentException("not support task type");
    }
  }
}
