import logging
import time
from datetime import datetime

from kubernetes import client
from kubernetes.client.rest import ApiException
from sqlalchemy.orm.session import Session

from airflow.models import TaskInstance, DagRun
from airflow.models.dag import DagModel
from airflow.exceptions import AirflowException
from airflow.jobs.backfill_job import BackfillJob
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.kube_config import KubeConfig
from airflow.kubernetes.kubernetes_helper_functions import create_pod_id
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.kubernetes.pod_launcher import PodLauncher
from airflow.utils import dates
from airflow.utils.session import provide_session
from airflow.utils.state import State
from airflow.utils import timezone

log = logging.getLogger(__name__)


def run_in_pod(dag_id, start_date: datetime, end_date: datetime, is_synchronous, synchronous_num):
    kube_config = KubeConfig()
    kube_client = get_kube_client()
    if is_synchronous:
        command = ['airflow', 'dags', 'backfill', dag_id, '-s', start_date.isoformat(), '-e', end_date.isoformat(), '--synchronous', str(synchronous_num),
                   '--reset-dagruns', '--local']
    else:
        command = ['airflow', 'dags', 'backfill', dag_id, '-s', start_date.isoformat(), '-e', end_date.isoformat(),
                   '--reset-dagruns', '--local']

    base_worker_pod = PodGenerator.deserialize_model_file(kube_config.pod_template_file)

    pod = PodGenerator.construct_pod(
        namespace=kube_config.kube_namespace,
        scheduler_job_id='0',
        pod_id=create_pod_id(dag_id, 'backfill'),
        dag_id=dag_id,
        task_id='backfill',
        kube_image=kube_config.kube_image,
        try_number=1,
        date=start_date,
        args=command,
        pod_override_object=None,
        base_worker_pod=base_worker_pod,
    )

    log.info("Kubernetes running for command %s", command)

    launcher = PodLauncher(kube_client=kube_client)
    launcher.run_pod_async(pod, **kube_config.kube_client_request_args)


def delete_pod(pod_id: str) -> None:
    delete_resp = None
    try:
        kube_client = get_kube_client()
        kube_config = KubeConfig()
        log.info("Deleting pod %s in namespace %s", pod_id, kube_config.kube_namespace)
        delete_resp = kube_client.delete_namespaced_pod(
            pod_id,
            kube_config.kube_namespace,
            body=client.V1DeleteOptions(grace_period_seconds=5),
            **kube_config.kube_client_request_args,
        )

        log.info("Deleting pod delete_resp: %s ", delete_resp)
    except ApiException as e:
        log.error("delete_pod exception: %s ", e)
        # If the pod is already deleted
        if e.status != 404:
            raise

    return delete_resp


@provide_session
def check(dag_id: str, session: Session = None):
    dag = DagModel.get_dagmodel(dag_id)
    if not dag:
        raise AirflowException('未找到工作流， 请确认已正确保存')
    if dag.is_paused:
        raise AirflowException('工作流未开启时间调度， 不能执行 backfill')

    job = session.query(BackfillJob).filter(BackfillJob.dag_id == dag_id,
                                            BackfillJob.state == State.RUNNING).first()
    if job is not None:
        raise AirflowException('该工作流已经有 Backfill 任务在运行')


@provide_session
def kill_pod(job_id: int, session: Session = None):
    job = session.query(BackfillJob).filter(BackfillJob.id == job_id).first()
    if not job:
        raise AirflowException('job_id not found')
    delete_pod(job.hostname)


@provide_session
def set_task_failed(job_id: int, session: Session = None):
    time.sleep(3)
    retry = 0
    while retry < 3:
        job = session.query(BackfillJob).filter(BackfillJob.id == job_id).first()
        if job.state in State.finished:
            dag_runs = session.query(DagRun) \
                .filter(DagRun.dag_id == job.dag_id).filter(DagRun.start_date >= job.start_date).filter(DagRun.state == State.FAILED).all()
            for dag_run in dag_runs:
                qs = session.query(TaskInstance).filter(TaskInstance.dag_id == job.dag_id).filter(TaskInstance.execution_date == dag_run.execution_date).filter( TaskInstance.state.notin_(list(State.finished))).all()
                for q in qs:
                    q.state = State.FAILED
                    session.merge(q)
            return
        else:
            time.sleep(3)
            retry += 1
    log.error("retry >3 ,job state not in finished")


@provide_session
def set_backfill_job_failed(job_id: int, session: Session = None):
    time.sleep(5)
    job = session.query(BackfillJob).filter(BackfillJob.id == job_id).first()
    log.info("after sleep 3, set_backfill_job_failed job.state : {}".format(job.state))
    job.state = State.FAILED
    job.end_date = timezone.utcnow()
    session.merge(job)


@provide_session
def query_job(dag_id: str, session: Session = None):
    jobs = session.query(BackfillJob).filter(BackfillJob.dag_id == dag_id).order_by(
        BackfillJob.start_date.desc()).limit(10).all()
    rs = [{'id': job.id, 'dag_id': dag_id, 'state': job.state, 'hostname': job.hostname,
           'start_date': dates.to_milliseconds(job.start_date),
           'end_date': dates.to_milliseconds(job.end_date)} for job in jobs]
    return rs

