import logging
import time
from multiprocessing import Process

import requests
from requests.auth import HTTPBasicAuth

from airflow.patsnap.util.etl_util import EtlUtil
from airflow.configuration import conf
from airflow.executors.executor_loader import ExecutorLoader
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.kube_config import KubeConfig
from airflow.kubernetes.kubernetes_helper_functions import create_pod_id
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.kubernetes.pod_launcher import PodLauncher
from airflow.models import TaskInstance, DagPickle
from airflow.models.idata_datasource import Datasource
from airflow.models.idata_task import Task
from airflow.models.idata_task_file import TaskFile
from airflow.utils import dates
from airflow.utils.session import provide_session
from airflow.utils.state import State

log = logging.getLogger(__name__)


def start_executor(ti, pickle_id):
    executor = ExecutorLoader.get_default_executor()
    executor.start()
    executor.queue_task_instance(
        ti,
        pickle_id=pickle_id,
        ignore_all_deps=True,
        ignore_depends_on_past=True,
        ignore_task_deps=True,
        ignore_ti_state=True
    )
    executor.heartbeat()
    while executor.running:
        time.sleep(5)
        executor.heartbeat()

    executor.end()


def run_in_process(task: Task):
    '''
    run task by api from task define in database
    :param task:
    :return:
    '''

    operator = task.create_operator()
    pickle = DagPickle(operator.dag)
    pickle.save()

    log.info('pickle the DAG pickle_id {}'.format(pickle.id))
    ti = TaskInstance(operator, dates.sh_now())
    ti.refresh_from_db()
    ti.init_run_context()

    p = Process(target=start_executor, args=(ti, pickle.id))
    p.start()

    return ti.to_json()


@provide_session
def run_by_scheduler(task: Task, session=None):
    operator = task.create_operator()
    pickle = DagPickle(operator.dag)
    pickle.save()
    log.info('pickle the DAG pickle_id {}'.format(pickle.id))
    ti = TaskInstance(operator, dates.sh_now(), external_trigger=1)
    ti.state = State.SCHEDULED
    session.add(ti)
    return ti.to_json()


def run_in_pod(task: Task):
    '''
    run task by api from task define in database
    :param task:
    :return:
    '''

    operator = task.create_operator()
    pickle = DagPickle(operator.dag)
    pickle.save()

    log.info('pickle the DAG pickle_id {}'.format(pickle.id))

    exec_date = dates.sh_now()

    kube_config = KubeConfig()
    kube_client = get_kube_client()

    command = ['airflow', 'tasks', 'run', operator.dag_id, operator.task_id, exec_date.isoformat(), '--local',
               '--ignore-depends-on-past']

    base_worker_pod = PodGenerator.deserialize_model_file(kube_config.pod_template_file)

    pod = PodGenerator.construct_pod(
        namespace=kube_config.kube_namespace,
        scheduler_job_id='0',
        pod_id=create_pod_id(operator.dag_id, operator.task_id),
        dag_id=operator.dag_id,
        task_id=operator.task_id,
        kube_image=kube_config.kube_image,
        try_number=1,
        date=exec_date,
        args=command,
        pod_override_object=None,
        base_worker_pod=base_worker_pod,
        task_type=operator,
        bash_bind_machine=kube_config.bash_bind_machine,
    )

    log.info("Kubernetes running for command %s", command)

    launcher = PodLauncher(kube_client=kube_client)
    launcher.run_pod_async(pod, **kube_config.kube_client_request_args)

    ti = TaskInstance(operator, exec_date)
    return ti.to_json()


@provide_session
def insert_task(task, session=None):
    # 格式化血缘
    task = format_blood(task)
    task.insert()
    if task.files:
        for path in task.files.split(','):
            TaskFile(task_id=task.id, file_path=path).insert(session=session)


@provide_session
def update_task(task, session=None):
    # 格式化血缘
    task = format_blood(task)
    task.update(session=session)


@provide_session
def add_task_file(task_id, file_path, session=None):
    TaskFile(task_id=task_id, file_path=file_path).insert(session=session)
    task = Task.get_task(task_id, session=session)
    if task.files:
        rs = task.files.split(',')
        rs.append(file_path)
        task.files = ','.join(rs)
    else:
        task.files = file_path
    task.update(session=session)


@provide_session
def delete_task_file(task_id, file_path, session=None):
    TaskFile(task_id=task_id, file_path=file_path).delete(session=session)
    task = Task.get_task(task_id, session=session)
    if task.files:
        rs = task.files.split(',')
        rs.remove(file_path)
        if len(rs) > 0:
            task.files = ','.join(rs)
        else:
            task.files = None
    task.update(session=session)


def get_task_log(hostname):
    pay_load = {
        "_source": ["message"],
        "query": {
            "bool": {
                "filter": {
                    "term": {
                        "fields.podname": hostname
                    }
                }
            }
        },
        "sort": [
            {
                "@timestamp": {
                    "order": "desc"
                }
            }
        ],
        "size": 200
    }

    rs = ''
    es_url = conf.get('elastic', 'url')
    es_user = conf.get('elastic', 'user')
    es_password = conf.get('elastic', 'password')
    index = 'idata-task-log-*'
    url = '{}/{}/_search?ignore_unavailable=true&allow_no_indices=true'.format(es_url, index)
    resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
    if resp.status_code == 200:
        data = resp.json()
        items = [it['_source']['message'] for it in data['hits']['hits']]
        items.reverse()
        rs = '\n'.join(items) + '\n'
    else:
        log.info("The return of ES is error with code:  %s", resp.status_code)

    return rs


def download_task_log(hostname):
    pay_load = {
        "_source": ["message"],
        "query": {
            "bool": {
                "filter": {
                    "term": {
                        "fields.podname": hostname
                    }
                }
            }
        },
        "sort": [
            {
                "@timestamp": {
                    "order": "asc"
                }
            }
        ],
        "size": 1000
    }

    es_url = conf.get('elastic', 'url')
    es_user = conf.get('elastic', 'user')
    es_password = conf.get('elastic', 'password')
    index = 'idata-task-log-*'
    url = '{}/{}/_search?scroll=1m'.format(es_url, index)
    resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
    if resp.status_code == 200 and resp.json() and resp.json().get('_scroll_id'):
        data = resp.json()
        scroll_id = data['_scroll_id']
        items = [it['_source']['message'] for it in data['hits']['hits']]
        chunk = '\n'.join(items) + '\n'
        yield chunk.encode()

        while True:
            pay_load = {
                "scroll": "1m",
                "scroll_id": scroll_id
            }
            url = '{}/_search/scroll'.format(es_url)
            resp = requests.post(url, json=pay_load, auth=HTTPBasicAuth(es_user, es_password))
            if resp.status_code == 200:
                data = resp.json()
                items = [it['_source']['message'] for it in data['hits']['hits']]
                if items and len(items) > 0:
                    chunk = '\n'.join(items) + '\n'
                    yield chunk.encode()
                else:
                    break
            else:
                break


def format_blood(task):
    task_param = task.param
    source_model = task_param['sourceModel']
    sink_model = task_param['sinkModel']
    if source_model['type'] == EtlUtil.SOURCE_TYPE_HIVE:
        hive_model = source_model['hiveModel']
        datasource_id = hive_model['datasourceId']
        datasource = Datasource.get_datasource(datasource_id)
        sql = hive_model['sql']
        args_sql = EtlUtil.sql_cluster_decoder(sql)
        db_table = ''
        for k in args_sql.keys():
            for t in args_sql[k]:
                db_table += k + ":" + t + ","
        hive_model['ip'] = datasource.ip
        hive_model['port'] = datasource.port
        hive_model['name'] = datasource.name
        hive_model['databaseAndTables'] = datasource.name
        source_model['hiveModel'] = hive_model

    elif source_model['type'] == EtlUtil.SOURCE_TYPE_JDBC:
        jdbc_model = source_model['jdbcModel']
        datasource_id = jdbc_model['datasourceId']
        datasource = Datasource.get_datasource(datasource_id)
        sql = jdbc_model['sql']
        args_sql = EtlUtil.sql_cluster_decoder(sql)
        db_table = ''
        for k in args_sql.keys():
            for t in args_sql[k]:
                db_table += k + ":" + t + ","
        jdbc_model['ip'] = datasource.ip
        jdbc_model['port'] = datasource.port
        jdbc_model['name'] = datasource.name
        jdbc_model['databaseAndTables'] = db_table
        source_model['hiveModel'] = jdbc_model

    if sink_model['type'] == EtlUtil.SINK_TYPE_HIVE:
        hive_model = sink_model['hiveModel']
        datasource_id = hive_model['datasourceId']
        datasource = Datasource.get_datasource(datasource_id)
        writer_path = EtlUtil.HIVE_WRITER_PATH + hive_model['database'] + ".db/" + hive_model['table']
        hive_model['ip'] = datasource.ip
        hive_model['port'] = datasource.port
        hive_model['name'] = datasource.name
        hive_model['hdfsPath'] = writer_path
        sink_model['hiveModel'] = hive_model

    elif sink_model['type'] == EtlUtil.SINK_TYPE_JDBC:
        jdbc_model = sink_model['jdbcModel']
        datasource_id = jdbc_model['datasourceId']
        datasource = Datasource.get_datasource(datasource_id)
        jdbc_model['ip'] = datasource.ip
        jdbc_model['port'] = datasource.port
        jdbc_model['name'] = datasource.name
        source_model['hiveModel'] = jdbc_model

    task_param['sourceModel'] = source_model
    task_param['sinkModel'] = sink_model
    task.blood = task_param
    return task