import re

class EtlUtil(object):
    CLASS_NAME = "1etl.core.DriverApplication"
    COMPLEX_CLASS_NAME = "1etl.core.DriverApplicationComplex"
    # reader
    HIVE_READER_TYPE = "1etl.hive.HiveReader"
    JDBC_READER_TYPE = "1etl.jdbc.MySqlReader"
    # writer
    HIVE_WRITER_TYPE = "1etl.hive.HiveWriter"
    JDBC_WRITER_TYPE = "1etl.jdbc.MySqlWriter"

    HIVE_WRITER_PATH = "/apps/hive/warehouse/"

    SOURCE_TYPE_KAFKA = 1
    SOURCE_TYPE_HIVE = 2
    SOURCE_TYPE_JDBC = 3
    SOURCE_TYPE_MONGODB = 4
    SOURCE_TYPE_ES = 5

    SINK_TYPE_KAFKA = 1
    SINK_TYPE_HIVE = 2
    SINK_TYPE_JDBC = 3
    SINK_TYPE_ES = 4
    SINK_TYPE_HDFSTEXTFILE = 5

    SHELL = 'SHELL'
    SPARK = "SPARK"
    FLINK = "FLINK"
    LOGSTASH = "LOGSTASH"
    DATAFACTORY = "DATAFACTORY"

    @staticmethod
    def sql_cluster_decoder(sql):
        sql = sql + " "
        result = {}
        regex_arr = ["(\\s+|\\*?)(from|FROM)\\s+(\\w+\\.)?(\\w+\\.)*(\\w+)(\\)*)\\s+(.|\\s)*?\\w*",
                     "(\\s+)(join|JOIN)\\s+(\\w+\\.)?(\\w+\\.)*(\\w+)\\s+(.|\\s)*?(on|ON)"]
        for regex in regex_arr:
            pattern = re.compile(regex)
            matcher = pattern.findall(sql)
            if matcher:
                for mat in matcher:
                    table = mat[4]
                    cluster = mat[2]
                    if cluster and cluster != '':
                        cluster = cluster[:len(cluster)-1]
                    if cluster not in result.keys():
                        tables = [table]
                        result[cluster] = tables
                    else:
                        tables = result[cluster]
                        tables.append(table)

        return result


    @staticmethod
    def format_metrics(operator, task, project, workflow):
        metrics = {
            "cpu": 0,
            "memory": 0,
            "task_name": "",
            "project_id": 0,
            "project_name": "",
            "workflow_id": 0,
            "workflow_name": "",
            "type": ""
        }
        cpu = 0
        memory = 0
        if task.operator == EtlUtil.SPARK or task.operator == EtlUtil.DATAFACTORY:
            executor_core = int(operator._executor_cores)
            executor_memory_str = operator._executor_memory
            executor_memory = int(executor_memory_str[:len(executor_memory_str) - 1])
            driver_memory_str = operator._driver_memory
            driver_memory = int(driver_memory_str[:len(executor_memory_str) - 1])
            num_executor = operator._num_executors
            driver_cpu = 1
            cpu = driver_cpu + executor_core
            memory = driver_memory + 1 + num_executor * (executor_memory + 1)

        elif task.operator == EtlUtil.FLINK:
            jobmanager_core = int(operator._jobmanager_cpu)
            taskmanager_core = int(operator._taskmanager_cpu)
            jobmanager_memory_str = operator._jobmanager_memory
            taskmanager_memory_str = operator._taskmanager_memory
            taskmanager_memory = int(taskmanager_memory_str[:len(taskmanager_memory_str) - 1])
            jobmanager_memory = int(jobmanager_memory_str[:len(jobmanager_memory_str) - 1])
            slots = operator._slots
            parallelism = operator._parallelism
            num_taskmanager = parallelism / slots
            cpu = jobmanager_core + num_taskmanager * taskmanager_core
            memory = jobmanager_memory + num_taskmanager * taskmanager_memory
        metrics['cpu'] = cpu
        metrics['memory'] = memory
        metrics['task_name'] = task.name
        metrics['project_id'] = task.project_id
        metrics['project_name'] = project.project_name
        if workflow:
            metrics['workflow_id'] = workflow.id
            metrics['workflow_name'] = workflow.name
        metrics['type'] = task.type
        return metrics








