import json
import logging
import random
import time
from datetime import datetime
from multiprocessing import Process

import requests
from requests.auth import HTTPBasicAuth

from airflow.patsnap.util.common_util import CommonUtil
from airflow.patsnap.util.es_util import EsUtil
from airflow.patsnap.util.etl_util import EtlUtil
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.kube_config import KubeConfig
from airflow.kubernetes.kubernetes_helper_functions import create_pod_id
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.kubernetes.pod_launcher import PodLauncher
from airflow.models import TaskInstance, DagPickle
from airflow.models.idata_datasource import Datasource
from airflow.models.idata_env import Env
from airflow.models.idata_project import Project
from airflow.models.idata_workflow import WorkFlow
from airflow.models.idata_task import Task, OperatorType
from airflow.models.idata_task_file import TaskFile
from airflow.models.idata_workflow_task import WorkFlowTask
from airflow.utils import dates
from airflow.utils.session import provide_session
from airflow.utils.state import State

log = logging.getLogger(__name__)


def start_executor(ti, pickle_id):
    executor = ExecutorLoader.get_default_executor()
    executor.start()
    executor.queue_task_instance(
        ti,
        pickle_id=pickle_id,
        ignore_all_deps=True,
        ignore_depends_on_past=True,
        ignore_task_deps=True,
        ignore_ti_state=True
    )
    executor.heartbeat()
    while executor.running:
        time.sleep(5)
        executor.heartbeat()

    executor.end()


def run_in_process(task: Task):
    '''
    run task by api from task define in database
    :param task:
    :return:
    '''

    operator = task.create_operator()
    pickle = DagPickle(operator.dag)
    pickle.save()

    log.info('pickle the DAG pickle_id {}'.format(pickle.id))
    ti = TaskInstance(operator, dates.sh_now())
    ti.refresh_from_db()
    ti.init_run_context()

    p = Process(target=start_executor, args=(ti, pickle.id))
    p.start()

    return ti.to_json()


@provide_session
def run_by_scheduler(task: Task, execution_date: int, session=None):
    operator = task.create_operator()
    pickle = DagPickle(operator.dag)
    pickle.save()
    log.info('pickle the DAG pickle_id {}'.format(pickle.id))
    if execution_date:
        execution_date = dates.from_milliseconds(execution_date + random.randint(0, 1000))
    else:
        execution_date = dates.sh_now()
    ti = TaskInstance(operator, execution_date, external_trigger=1)
    ti.state = State.SCHEDULED
    session.add(ti)
    return ti.to_json()


def list_namespaced_pod(namespace='idata', app=None, status=None):
    kube_client = get_kube_client()
    pods = kube_client.list_namespaced_pod(namespace).to_dict()
    rs = [{
        'annotations': pod['metadata']['annotations'],
        'labels': pod['metadata']['labels'],
        'name': pod['metadata']['name'],
        'namespace': pod['metadata']['namespace'],
        'status': pod['status']
    } for pod in pods['items']]
    ans = []
    for item in rs:
        item['status'].pop('conditions')
        item['status'].pop('container_statuses')
        if status and item['status']['phase'] != status:
            continue
        if app and item['labels']['app'] != app:
            continue
        ans.append(item)
    return {
        'size': len(ans),
        'items': ans
    }


def run_in_pod(task: Task):
    '''
    run task by api from task define in database
    :param task:
    :return:
    '''

    operator = task.create_operator()
    pickle = DagPickle(operator.dag)
    pickle.save()

    log.info('pickle the DAG pickle_id {}'.format(pickle.id))

    exec_date = dates.sh_now()

    kube_config = KubeConfig()
    kube_client = get_kube_client()

    command = ['airflow', 'tasks', 'run', operator.dag_id, operator.task_id, exec_date.isoformat(), '--local',
               '--ignore-depends-on-past']

    base_worker_pod = PodGenerator.deserialize_model_file(kube_config.pod_template_file)

    pod = PodGenerator.construct_pod(
        namespace=kube_config.kube_namespace,
        scheduler_job_id='0',
        pod_id=create_pod_id(operator.dag_id, operator.task_id),
        dag_id=operator.dag_id,
        task_id=operator.task_id,
        kube_image=kube_config.kube_image,
        try_number=1,
        date=exec_date,
        args=command,
        pod_override_object=None,
        base_worker_pod=base_worker_pod,
    )

    log.info("Kubernetes running for command %s", command)

    launcher = PodLauncher(kube_client=kube_client)
    launcher.run_pod_async(pod, **kube_config.kube_client_request_args)

    ti = TaskInstance(operator, exec_date)
    return ti.to_json()


@provide_session
def insert_task(task, session=None):
    # 格式化血缘
    if not task.project_id:
        raise AirflowException('project_id 不能为空')
    project = Project.get_project(task.project_id)
    if not project.pool:
        raise AirflowException('项目队列不能为空')
    task.pool = project.pool
    if task.operator == 'LOGSTASH':
        task.type = 1

    old = session.query(Task).filter(Task.project_id == task.project_id, Task.name == task.name).first()
    if old is not None:
        raise AirflowException('项目中作业名称重复')
    env = Env.get_default_env()
    is_stream = False
    if task.type == 1:
        is_stream = True
    if task.operator == OperatorType.DATAFACTORY:
        task.param = {
            "envId": env.id,
            "pyspark": "",
            "version": "2.3",
            "sparkArgs": {
                "sparkConf": "",
                "driverCores": "1",
                "driverMemory": "1g",
                "numExecutors": 1,
                "executorCores": "1",
                "executorMemory": "1g"
            },
            "date_format": "",
            "filterModel": {

            },
            "isStreaming": is_stream,
            "readerModel": {

            },
            "writerModel": {

            },
            "isMetricsMonitor": False
        }
    task.insert()
    if task.files:
        for path in task.files.split(','):
            TaskFile(task_id=task.id, file_path=path).insert(session=session)


@provide_session
def insert_clone_task(task, session=None):
    old = session.query(Task).filter(Task.project_id == task.project_id, Task.name == task.name).first()
    if old is not None:
        raise AirflowException('项目中作业名称重复')
    task.insert()

@provide_session
def update_task(task, session=None):
    # 格式化血缘
    task = format_blood(task)
    task.update(session=session)


@provide_session
def task_validate(task_id, data, session=None):
    if len(data) == 1 and 'name' in data:
        return None
    if len(data) == 1 and 'folder_id' in data:
        return None
    task = Task.get_task(task_id, session=session)
    for key in data.keys():
        if key == 'name' and data[key]:
            task.name = data[key],
        if key == 'folder_id' and data[key]:
            task.folder_id = data[key]
        if key == 'content' and data[key]:
            if isinstance(data[key], dict) and task.operator == OperatorType.DATAFACTORY:
                data[key] = json.dumps(data['content'])
            task.content = data[key]
        if key == 'param' and data[key]:
            task.param = data[key]
        if key == 'files' and data[key]:
            task.files = data[key]
        if key == 'owner' and data[key]:
            task.owner = data[key]
        if key == 'email_alert':
            task.email_alert = data[key]
        if key == 'email':
            task.email = data[key]
        if key == 'retries' and data[key]:
            task.retries = data[key]
        if key == 'type':
            task.type = data[key]
        if key == 'version' and data[key]:
            task.version = data[key]
        if key == 'execution_timeout':
            task.execution_timeout = data[key]
    return task.error_content()


@provide_session
def patch_task(task_id, data, session=None):
    fields = {}
    for key in data.keys():
        if key == 'name' and data[key]:
            fields[Task.name] = data[key],
        if key == 'folder_id' and data[key]:
            fields[Task.folder_id] = data[key]
        if key == 'content' and data[key]:
            fields[Task.content] = data[key]
        if key == 'param' and data[key]:
            fields[Task.param] = data[key]
        if key == 'files':
            if data[key]:
                fields[Task.files] = data[key]
            else:
                fields[Task.files] = None
        if key == 'owner' and data[key]:
            fields[Task.owner] = data[key]
        if key == 'email_alert':
            fields[Task.email_alert] = data[key]
        if key == 'email':
            fields[Task.email] = data[key]
        if key == 'retries':
            fields[Task.retries] = data[key]
        if key == 'type':
            fields[Task.type] = data[key]
        if key == 'version' and data[key]:
            fields[Task.version] = data[key]
        if key == 'execution_timeout':
            fields[Task.execution_timeout] = data[key]
    if len(fields) > 0:
        session.query(Task).filter(Task.id == task_id).update(fields)
        # update task related workflow
        task_name = data.get('name', None)
        for wt in session.query(WorkFlowTask).filter(WorkFlowTask.task_id == task_id).all():
            wf = session.query(WorkFlow).filter(WorkFlow.id == wt.workflow_id).first()
            if task_name:
                nodes = wf.content.get('nodes', None)
                if nodes:
                    for node in nodes:
                        nt_id = node.get('taskId', None)
                        if nt_id and nt_id == task_id:
                            node['name'] = task_name
            if wf and wf.content_error() is None:
                wf.update_content(content=wf.content, session=session)
                wf.sync_dag(session=session)


@provide_session
def delete_task(task_id, session=None):
    rs = WorkFlowTask.query(task_id, session=session)
    if len(rs) > 0:
        return '该任务已经被 Workflow 使用， 无法删除'
    else:
        session.query(Task).filter(Task.id == task_id).delete()


@provide_session
def get_history_instance(task_id: int, session=None):
    tis = session.query(TaskInstance).filter(TaskInstance.task_id == 'task-{}'.format(task_id)) \
        .order_by(TaskInstance.start_date.desc()).limit(10).all()
    rs = []
    proxy_url = conf.get('yarn', 'proxy_url')
    app_url = conf.get('yarn', 'app_url')
    spark_run_proxy_url = conf.get('yarn', 'spark_app_run_proxy_url')
    flink_run_proxy_url = conf.get('yarn', 'flink_app_run_proxy_url')
    app_end_proxy_url = conf.get('yarn', 'app_end_proxy_url')
    for ti in tis:
        _task_instance = ti.to_json()
        CommonUtil.set_proxy_url(_task_instance, spark_run_proxy_url, flink_run_proxy_url, app_end_proxy_url, app_url, proxy_url)
        rs.append(_task_instance)
    return rs


@provide_session
def get_task_options(project_id: int, operator: str, session=None):
    '''
    工作流关联 Task 下拉选项， 排除 Streaming 任务
    :param project_id:
    :param operator:
    :param session:
    :return:
    '''
    tks = session.query(Task.id, Task.name).filter(Task.project_id == project_id,
                                                   Task.operator == operator,
                                                   Task.type == 0).all()

    return [{'id': it[0], 'name': it[1]} for it in tks]


@provide_session
def tasks_of_workflow_instance(dag_id: str, execution_date, session=None):
    qry = session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id,
                                             TaskInstance.execution_date == execution_date)
    tis = qry.order_by(TaskInstance.start_date.asc()).all()
    data = []
    proxy_url = conf.get('yarn', 'proxy_url')
    app_url = conf.get('yarn', 'app_url')
    spark_run_proxy_url = conf.get('yarn', 'spark_app_run_proxy_url')
    flink_run_proxy_url = conf.get('yarn', 'flink_app_run_proxy_url')
    app_end_proxy_url = conf.get('yarn', 'app_end_proxy_url')
    for ti in tis:
        _task_instance = ti.to_json()
        CommonUtil.set_proxy_url(_task_instance, spark_run_proxy_url, flink_run_proxy_url, app_end_proxy_url, app_url, proxy_url)
        data.append(_task_instance)
    return data

@provide_session
def get_task_log(dag_id, task_id, session=None):
    tk = (session.query(TaskInstance)
           .filter(TaskInstance.dag_id == dag_id,TaskInstance.task_id == task_id)
           .order_by(TaskInstance.start_date.desc()).first())
    hostname = tk.hostname

    if tk.operator == 'FLINK' or tk.operator == 'FlinkSubmitOperator':
        pay_load = {
            "_source": ["raw"],
            "query": {
                "bool": {
                    "filter": [
                        {
                            "term": {
                                "fields.daemon": hostname
                            }
                        },
                        {
                            "term": {
                                "fields.component": "jobmanager"
                            }
                        }
                    ]
                }
            },
            "sort": [
                {
                    "date": {
                        "order": "desc"
                    }
                }
            ],
            "size": 200
        }
    else:
        pay_load = {
            "_source": ["raw"],
            "query": {
                "bool": {
                    "filter": {
                        "term": {
                            "fields.podname": hostname
                        }
                    }
                }
            },
            "sort": [
                {
                    "date": {
                        "order": "desc"
                    }
                }
            ],
            "size": 200
        }

    rs = []
    es_url = conf.get('elastic', 'url')
    es_user = conf.get('elastic', 'user')
    es_password = conf.get('elastic', 'password')
    index = 'all-logs-*'
    url = '{}/{}/_search?ignore_unavailable=true&allow_no_indices=true'.format(es_url, index)
    resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
    if resp.status_code == 200:
        data = resp.json()
        items = [it['_source']['raw'] for it in data['hits']['hits']]
        items.reverse()
        rs = items
    else:
        log.info("The return of ES is error with code:  %s", resp.status_code)
    return rs


def download_task_log(hostname, operator, start):
    size = abs(start)
    es_url = conf.get('elastic', 'url')
    es_user = conf.get('elastic', 'user')
    es_password = conf.get('elastic', 'password')
    index = 'all-logs-*'
    if size > 10000:
        pay_load = {
            "_source": ["raw", "row_time"],
            "query": {
                "bool": {
                    "filter": [
                    ]
                }
            },
            "sort": [
                {
                    "row_time": {
                        "order": "asc"
                    }
                }
            ],
            "size": 1000
        }
        if operator == 'FLINK' or operator == 'FlinkSubmitOperator':
            pay_load.get("query").get("bool").get("filter").append(
                {
                    "term": {
                        "fields.component": "jobmanager"
                    }
                }
            )
            pay_load.get("query").get("bool").get("filter").append(
                {
                    "term": {
                        "fields.daemon": hostname
                    }
                }
            )
        else:
            pay_load.get("query").get("bool").get("filter").append(
                {
                    "term": {
                        "fields.podname": hostname
                    }
                }
            )
        url = '{}/{}/_search?scroll=1m'.format(es_url, index)
        resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
        if resp.status_code == 200 and resp.json() and resp.json().get('_scroll_id'):
            data = resp.json()
            scroll_id = data['_scroll_id']
            items = [it['_source']['raw'] for it in data['hits']['hits']]
            chunk = '\n'.join(items) + '\n'
            yield chunk.encode()
            while True:
                pay_load = {
                    "scroll": "1m",
                    "scroll_id": scroll_id
                }
                url = '{}/_search/scroll'.format(es_url)
                resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
                if resp.status_code == 200:
                    data = resp.json()
                    items = [it['_source']['raw'] for it in data['hits']['hits']]
                    if items and len(items) > 0:
                        chunk = '\n'.join(items) + '\n'
                        yield chunk.encode()
                    else:
                        break
                else:
                    break
    else:
        pay_load = {
            "_source": ["raw", "row_time"],
            "query": {
                "bool": {
                    "filter": [
                    ]
                }
            },
            "sort": [
                {
                    "row_time": {
                        "order": "asc"
                    }
                }
            ],
            "size": 1000
        }
        pay_load["size"] = size
        pay_load["sort"].clear()
        pay_load["sort"].append(
            {
                "row_time": {
                    "order": "desc"
                }
            }
        )
        if operator == 'FLINK' or operator == 'FlinkSubmitOperator':
            pay_load.get("query").get("bool").get("filter").append(
                {
                    "term": {
                        "fields.component": "jobmanager"
                    }
                }
            )
            pay_load.get("query").get("bool").get("filter").append(
                {
                    "term": {
                        "fields.daemon": hostname
                    }
                }
            )
        else:
            pay_load.get("query").get("bool").get("filter").append(
                {
                    "term": {
                        "fields.podname": hostname
                    }
                }
            )
        url = '{}/{}/_search?ignore_unavailable=true&allow_no_indices=true'.format(es_url, index)
        resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
        if resp.status_code == 200:
            data = resp.json()
            items = [it['_source']['raw'] for it in data['hits']['hits']]
            items.reverse()
            chunk = '\n'.join(items) + '\n'
            yield chunk.encode()
        else:
            log.info("The return of ES is error with code:  %s", resp.status_code)

def download_executor_log(spark_app, spark_exec_id, start):
    size = abs(start)
    es_url = conf.get('elastic', 'url')
    es_user = conf.get('elastic', 'user')
    es_password = conf.get('elastic', 'password')
    index = 'all-logs-*'
    if size <= 10000:
        pay_load = {
            "_source": ["raw", "row_time"],
            "query": {
                "bool": {
                    "filter": [
                        {
                            "term": {
                                "fields.spark_app": spark_app
                            }
                        },
                        {
                            "term": {
                                "fields.spark_exec_id": spark_exec_id
                            }
                        }
                    ]
                }
            },
            "sort": [
                {
                    "row_time": {
                        "order": "desc"
                    }
                }
            ],
            "size": size
        }

        url = '{}/{}/_search?ignore_unavailable=true&allow_no_indices=true'.format(es_url, index)
        resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
        if resp.status_code == 200:
            data = resp.json()
            items = [it['_source']['raw'] for it in data['hits']['hits']]
            items.reverse()
            chunk = '\n'.join(items) + '\n'
            yield chunk.encode()
    else:
        pay_load = {
            "_source": ["raw", "row_time"],
            "query": {
                "bool": {
                    "filter": [
                        {
                            "term": {
                                "fields.spark_app": spark_app
                            }
                        },
                        {
                            "term": {
                                "fields.spark_exec_id": spark_exec_id
                            }
                        }
                    ]
                }
            },
            "sort": [
                {
                    "row_time": {
                        "order": "asc"
                    }
                }
            ],
            "size": 1000
        }
        url = '{}/{}/_search?scroll=1m'.format(es_url, index)
        resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
        if resp.status_code == 200 and resp.json() and resp.json().get('_scroll_id'):
            data = resp.json()
            scroll_id = data['_scroll_id']
            items = [it['_source']['raw'] for it in data['hits']['hits']]
            chunk = '\n'.join(items) + '\n'
            yield chunk.encode()
            while True:
                pay_load = {
                    "scroll": "1m",
                    "scroll_id": scroll_id
                }
                url = '{}/_search/scroll'.format(es_url)
                resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
                if resp.status_code == 200:
                    data = resp.json()
                    items = [it['_source']['raw'] for it in data['hits']['hits']]
                    if items and len(items) > 0:
                        chunk = '\n'.join(items) + '\n'
                        yield chunk.encode()
                    else:
                        break
                else:
                    break



def format_blood(task):
    if task.operator == OperatorType.DATAFACTORY:
        task_param = task.param
        source_model = task_param['sourceModel']
        sink_model = task_param['sinkModel']
        if source_model['type'] == EtlUtil.SOURCE_TYPE_HIVE:
            hive_model = source_model['hiveModel']
            datasource_id = hive_model['datasourceId']
            datasource = Datasource.get_datasource(datasource_id)
            sql = hive_model['sql']
            args_sql = EtlUtil.sql_cluster_decoder(sql)
            db_table = ''
            for k in args_sql.keys():
                for t in args_sql[k]:
                    db_table += k + ":" + t + ","
            hive_model['ip'] = datasource.ip
            hive_model['port'] = datasource.port
            hive_model['name'] = datasource.name
            hive_model['databaseAndTables'] = datasource.name
            source_model['hiveModel'] = hive_model

        elif source_model['type'] == EtlUtil.SOURCE_TYPE_JDBC:
            jdbc_model = source_model['jdbcModel']
            datasource_id = jdbc_model['datasourceId']
            datasource = Datasource.get_datasource(datasource_id)
            sql = jdbc_model['sql']
            args_sql = EtlUtil.sql_cluster_decoder(sql)
            db_table = ''
            for k in args_sql.keys():
                for t in args_sql[k]:
                    db_table += k + ":" + t + ","
            jdbc_model['ip'] = datasource.ip
            jdbc_model['port'] = datasource.port
            jdbc_model['name'] = datasource.name
            jdbc_model['databaseAndTables'] = db_table
            source_model['hiveModel'] = jdbc_model

        if sink_model['type'] == EtlUtil.SINK_TYPE_HIVE:
            hive_model = sink_model['hiveModel']
            datasource_id = hive_model['datasourceId']
            datasource = Datasource.get_datasource(datasource_id)
            writer_path = EtlUtil.HIVE_WRITER_PATH + hive_model['database'] + ".db/" + hive_model['table']
            hive_model['ip'] = datasource.ip
            hive_model['port'] = datasource.port
            hive_model['name'] = datasource.name
            hive_model['hdfsPath'] = writer_path
            sink_model['hiveModel'] = hive_model

        elif sink_model['type'] == EtlUtil.SINK_TYPE_JDBC:
            jdbc_model = sink_model['jdbcModel']
            datasource_id = jdbc_model['datasourceId']
            datasource = Datasource.get_datasource(datasource_id)
            jdbc_model['ip'] = datasource.ip
            jdbc_model['port'] = datasource.port
            jdbc_model['name'] = datasource.name
            source_model['hiveModel'] = jdbc_model

        task_param['sourceModel'] = source_model
        task_param['sinkModel'] = sink_model
        task.blood = task_param
    return task


@provide_session
def get_by_start_date(project_id, start_time, end_time, session=None):
    rs = {
        "scheduled": 0,
        "none": 0,
        "queued": 0,
        "running": 0,
        "success": 0,
        "shutdown": 0,
        "failed": 0,
        "up_for_retry": 0,
        "up_for_reschedule": 0,
        "upstream_failed": 0,
        "skipped": 0,
        "sensing": 0
    }
    tis = TaskInstance.get_by_start_date(project_id, dates.from_milliseconds(start_time),
                                         dates.from_milliseconds(end_time),
                                         session)
    if tis:
        for ti in tis:
            if ti.state is None:
                rs["none"] = rs["none"] + 1
            else:
                rs[ti.state] = rs[ti.state] + 1
    return rs


def get_task_metrics(start_time, end_time, type, project_id):
    pay_load = {
        "query": {
            "bool": {
                "filter": {
                    "range": {
                        "date": {
                            "gte": start_time,
                            "lte": end_time
                        }
                    }
                },
                "must": []
            }
        },
        "size": 0,
        "sort": [
            {
                "date": {
                    "order": "asc"
                }
            }
        ],
        "aggs": {
            "dataGroup": {
                "date_histogram": {
                    "field": "date",
                    "interval": "minute",
                    "extended_bounds": {
                        "min": start_time,
                        "max": end_time
                    }
                },
                "aggs": {
                    "cpu": {
                        "max": {
                            "field": "cpu"
                        }
                    },
                    "memory": {
                        "max": {
                            "field": "memory"
                        }
                    },
                    "count": {
                        "max": {
                            "field": "count"
                        }
                    }
                }
            }
        }
    }

    if type:
        pay_load.get("query").get("bool").get("must").append({
            "term": {
                "type": type
            }
        })
    if project_id:
        pay_load.get("query").get("bool").get("must").append({
            "term": {
                "project_id": project_id
            }
        })

    es_url = conf.get('elastic', 'url')
    es_user = conf.get('elastic', 'user')
    es_password = conf.get('elastic', 'password')
    index = 'idata_task_metrics*'
    url = '{}/{}/_search'.format(es_url, index)
    resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
    if resp.status_code == 200 and resp.json() and resp.json():
        datas = resp.json()
        aggs = datas.get("aggregations", None)
        if aggs is None:
            return []
        data_group = aggs.get("dataGroup", None)
        if data_group is None:
            return []
        buckets = data_group.get("buckets", None)
        if buckets is None:
            return []
        rs = []
        for data in buckets:
            r = {
                "date": 0,
                "memory": 0,
                "cpu": 0,
                "count": 0
            }
            r['date'] = data.get("key")
            if data.get("memory").get("value") is not None:
                r['memory'] = data.get("memory").get("value")
            if data.get("count").get("value") is not None:
                r['count'] = data.get("count").get("value", 0)
            if data.get("cpu").get("value") is not None:
                r['cpu'] = data.get("cpu").get("value", 0)
            rs.append(r)
        return rs


@provide_session
def get_stream_tasks(project_id, name, operator, page, session=None):
    proxy_url = conf.get('yarn', 'proxy_url')
    app_url = conf.get('yarn', 'app_url')
    spark_run_proxy_url = conf.get('yarn', 'spark_app_run_proxy_url')
    flink_run_proxy_url = conf.get('yarn', 'flink_app_run_proxy_url')
    app_end_proxy_url = conf.get('yarn', 'app_end_proxy_url')
    if page == 0:
        rs = []
        tasks = Task.get_stream(project_id=project_id, name=name, operator=operator, session=session)
        for task in tasks:
            _task_instance = None
            task_instance = TaskInstance.get_by_id(task.id, session)
            if task_instance:
                _task_instance = task_instance.to_json()
                CommonUtil.set_proxy_url(_task_instance, spark_run_proxy_url, flink_run_proxy_url, app_end_proxy_url, app_url,
                              proxy_url)
                start_date = _task_instance.get("start_date", None)
                duration = _task_instance.get("duration", None)
                if duration is None:
                    if start_date:
                        duration = (dates.to_milliseconds(datetime.now()) - start_date) / 1000
                    else:
                        duration = 0
                    _task_instance['duration'] = duration
            rs.append(task.to_json(task_instance=_task_instance))
        return rs
    else:
        rs = []
        result = Task.get_stream(project_id=project_id, page=page, name=name, operator=operator, session=session)
        for task in result.items:
            _task_instance = None
            task_instance = TaskInstance.get_by_id(task.id, session)
            if task_instance:
                _task_instance = task_instance.to_json()
                CommonUtil.set_proxy_url(_task_instance, spark_run_proxy_url, flink_run_proxy_url, app_end_proxy_url, app_url,
                              proxy_url)
                start_date = _task_instance.get("start_date", None)
                duration = _task_instance.get("duration", None)
                if duration is None:
                    if start_date:
                        duration = (dates.to_milliseconds(datetime.now()) - start_date) / 1000
                    else:
                        duration = 0
                    _task_instance['duration'] = duration
            rs.append(task.to_json(task_instance=_task_instance))
        result.items = rs
        return result.to_json()


def get_datafactory_metrics(start_time, end_time, t_id, t_name):
    name = t_name + "_" + str(t_id)
    start_time_s = start_time/1000
    end_time_s = end_time/1000
    s_t_s = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(start_time_s))
    e_t_s = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(end_time_s))
    pay_load = {
        "query": {
            "bool": {
                "filter": {
                    "range": {
                        "timestamp": {
                            "gte": s_t_s,
                            "lte": e_t_s
                        }
                    }
                },
                "must": [
                    {
                        "term": {
                            "flume_Import_componentName": name
                        }
                    }
                ]
            }
        },
        "size": 1000,
        "sort": [
            {
                "date": {
                    "order": "asc"
                }
            }
        ]
    }

    es_url = conf.get('elastic', 'url')
    es_user = conf.get('elastic', 'user')
    es_password = conf.get('elastic', 'password')
    index = 'idata_datafactory_metrics-2*'
    url = '{}/{}/_search?scroll=1m'.format(es_url, index)
    resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
    rs =[]
    if resp.status_code == 200 and resp.json() and resp.json():
        datas = resp.json()
        items = datas.get("hits", None)
        scroll_id = datas.get("_scroll_id", None)
        if items is None:
            return []
        hits = items.get("hits", None)
        if hits is None or len(hits) == 0:
            return []
        for hit in hits:
            r = {
                "date": 0,
                "count": 0,
                "size": 0,
                "delay": 0,
                "name": ''
            }
            data = hit.get("_source", None)
            if data:
                r['date'] = data.get("timestamp", '')
                r['count'] = data.get("flume_Import_eventAcceptedCount", '')
                r['size'] = data.get("flume_Import_eventBytes", '')
                r['delay'] = data.get("flume_Import_delay", '')
                r['name'] = data.get("flume_Import_componentName", '')
            rs.append(r)
        spay_load = {
            "scroll": "1m",
            "scroll_id": scroll_id
        }
        surl = '{}/_search/scroll'.format(es_url)
        while True:
            resp = requests.post(surl, json=spay_load, auth=HTTPBasicAuth(es_user, es_password))
            if resp.status_code == 200 and resp.json() and resp.json():
                datas = resp.json()
                items = datas.get("hits", None)
                if items is None:
                    break
                hits = items.get("hits", None)
                if hits is None or len(hits) == 0:
                    break
                for hit in hits:
                    r = {
                        "date": 0,
                        "count": 0,
                        "size": 0,
                        "delay": 0,
                        "name": ''
                    }
                    data = hit.get("_source", None)
                    if data:
                        r['date'] = data.get("timestamp", '')
                        r['count'] = data.get("flume_Import_eventAcceptedCount", '')
                        r['size'] = data.get("flume_Import_eventBytes", '')
                        r['delay'] = data.get("flume_Import_delay", '')
                        r['name'] = data.get("flume_Import_componentName", '')
                    rs.append(r)
            else:
                break
        return rs
