import copy
import json
import random

import numpy
from flask import Markup
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from sqlalchemy import (
    Boolean,
    Column,
    Enum,
    ForeignKey,
    Integer,
    String,
    Text,
)
from sqlalchemy.orm import relationship

from myapp.app import app, db
from myapp.models.base import MyappModelBase
from myapp.models.helpers import AuditMixinNullable
from myapp.models.model_job import Task
from myapp.third.k8s import py_k8s

metadata = Model.metadata
conf = app.config


class ReserveTemplate(Model, AuditMixinNullable, MyappModelBase):
    __tablename__ = 'reserve_template'
    id = Column(Integer, primary_key=True)
    name = Column(String(100), nullable=False, unique=True)
    describe = Column(String(200), nullable=False)
    project_id = Column(Integer, ForeignKey('project.id'), default=5, nullable=True)  # 定义外键
    project = relationship('Project', foreign_keys=[project_id])
    dag_json = Column(Text, nullable=False, default='{}')
    namespace = Column(String(100), default='pipeline')
    global_env = Column(String(500), default='')
    schedule_type = Column(Enum('once', 'crontab'), nullable=False, default='once')
    cron_time = Column(String(100))  # 调度周期
    cronjob_start_time = Column(String(300), default='')
    pipeline_file = Column(Text(655360), default='')
    pipeline_argo_id = Column(String(100))
    version_id = Column(String(100))
    run_id = Column(String(100))
    node_selector = Column(String(100), default='cpu=true,train=true')
    image_pull_policy = Column(Enum('Always', 'IfNotPresent'), nullable=False, default='Always')
    parallelism = Column(Integer, nullable=False, default=1)  # 同一个pipeline，最大并行的task数目
    alert_status = Column(
        String(100), default='Pending,Running,Succeeded,Failed,Terminated'
    )  # 哪些状态会报警Pending,Running,Succeeded,Failed,Unknown,Waiting,Terminated
    alert_user = Column(String(300), default='')
    expand = Column(Text(65536), default='[]')
    depends_on_past = Column(Boolean, default=False)
    max_active_runs = Column(Integer, nullable=False, default=3)  # 最大同时运行的pipeline实例
    expired_limit = Column(
        Integer, nullable=False, default=1
    )  # 过期保留个数，此数值有效时，会优先使用，覆盖max_active_runs的功能
    parameter = Column(Text(65536), default='{}')

    def __repr__(self):
        return self.name

    @property
    def pipeline_url(self):
        pipeline_url = '/pipeline_modelview/web/' + str(self.id)
        return Markup(f'<a target=_blank href="{pipeline_url}">{self.describe}</a>')

    @property
    def run_pipeline(self):
        pipeline_run_url = '/pipeline_modelview/run_pipeline/' + str(self.id)
        return Markup(f'<a target=_blank href="{pipeline_run_url}">run</a>')

    @property
    def log(self):
        if self.run_id:
            pipeline_url = '/pipeline_modelview/web/log/%s' % self.id
            return Markup(f'<a target=_blank href="{pipeline_url}">日志</a>')
        else:
            return Markup('日志')

    @property
    def pod(self):
        url = '/pipeline_modelview/web/pod/%s' % self.id
        return Markup(f'<a target=_blank href="{url}">pod</a>')

    @renders('dag_json')
    def dag_json_html(self):
        dag_json = self.dag_json or '{}'
        return Markup('<pre><code>' + dag_json + '</code></pre>')

    @renders('expand')
    def expand_html(self):
        return Markup('<pre><code>' + self.expand + '</code></pre>')

    @renders('parameter')
    def parameter_html(self):
        return Markup('<pre><code>' + self.parameter + '</code></pre>')

    @renders('pipeline_file')
    def pipeline_file_html(self):
        pipeline_file = self.pipeline_file or ''
        return Markup('<pre><code>' + pipeline_file + '</code></pre>')

    # @renders('describe')
    # def describe_html(self):
    #     return Markup('<pre><code>' + self.pipeline_file + '</code></pre>')

    # 获取pipeline中的所有task
    def get_tasks(self, dbsession=db.session):
        return dbsession.query(Task).filter_by(pipeline_id=self.id).all()

    # @pysnooper.snoop()
    def delete_old_task(self, dbsession=db.session):
        try:
            expand_tasks = json.loads(self.expand) if self.expand else []
            tasks = dbsession.query(Task).filter_by(pipeline_id=self.id).all()
            tasks_id = [
                int(expand_task['id'])
                for expand_task in expand_tasks
                if expand_task.get('id', '').isdecimal()
            ]
            for task in tasks:
                if task.id not in tasks_id:
                    db.session.delete(task)
                    db.session.commit()
        except Exception as e:
            print(e)

    # 获取当期运行时workflow的数量
    def get_workflow(self):
        back_crds = []
        try:
            k8s_client = py_k8s.K8s(self.project.cluster.get('KUBECONFIG', ''))
            crd_info = conf.get('CRD_INFO', {}).get('workflow', {})
            if crd_info:
                crds = k8s_client.get_crd(
                    group=crd_info['group'],
                    version=crd_info['version'],
                    plural=crd_info['plural'],
                    namespace=self.namespace,
                    label_selector='pipeline-id=%s' % str(self.id),
                )
                for crd in crds:
                    if crd.get('labels', '{}'):
                        labels = json.loads(crd['labels'])
                        if labels.get('pipeline-id', '') == str(self.id):
                            back_crds.append(crd)
            return back_crds
        except Exception as e:
            print(e)
        return back_crds

    @property
    def run_instance(self):
        url = r'/workflow_modelview/list/?_flt_2_labels="pipeline-id"%3A+"' + '%s"' % self.id
        # print(url)
        return Markup(f"<a href='{url}'>{self.schedule_type}</a>")  # k8s有长度限制

    # 这个dag可能不对，所以要根据真实task纠正一下
    def fix_dag_json(self, dbsession=db.session):
        if not self.dag_json:
            return '{}'
        dag = json.loads(self.dag_json)
        # 如果添加了task，但是没有保存pipeline，就自动创建dag
        if not dag:
            tasks = self.get_tasks(dbsession)
            if tasks:
                dag = {}
                for task in tasks:
                    dag[task.name] = {}
                dag_json = json.dumps(dag, indent=4, ensure_ascii=False)
                return dag_json
            else:
                return '{}'

        # 清理dag中不存在的task
        if dag:
            tasks = self.get_tasks(dbsession)
            all_task_names = [task.name for task in tasks]
            # 先把没有加入的task加入到dag
            for task in tasks:
                if task.name not in dag:
                    dag[task.name] = {}

            # 把已经删除了的task移除dag
            dag_back = copy.deepcopy(dag)
            for dag_task_name in dag_back:
                if dag_task_name not in all_task_names:
                    del dag[dag_task_name]

            # 将已经删除的task从其他task的上游依赖中删除
            for dag_task_name in dag:
                upstream_tasks = (
                    dag[dag_task_name]['upstream'] if 'upstream' in dag[dag_task_name] else []
                )
                new_upstream_tasks = []
                for upstream_task in upstream_tasks:
                    if upstream_task in all_task_names:
                        new_upstream_tasks.append(upstream_task)

                dag[dag_task_name]['upstream'] = new_upstream_tasks

            # def get_downstream(dag):
            #     # 生成下行链路图
            #     for task_name in dag:
            #         dag[task_name]['downstream'] = []
            #         for task_name1 in dag:
            #             if task_name in dag[task_name1].get("upstream", []):
            #                 dag[task_name]['downstream'].append(task_name1)
            #     return dag
            #
            # dag = get_downstream(dag)
            dag_json = json.dumps(dag, indent=4, ensure_ascii=False)

            return dag_json

    # 自动聚焦到视图中央
    # @pysnooper.snoop()
    def fix_position(self):
        expand_tasks = json.loads(self.expand) if self.expand else []
        if not expand_tasks:
            expand_tasks = []
        x = []
        y = []
        for item in expand_tasks:
            if 'position' in item:
                if item['position'].get('x', 0):
                    x.append(int(item['position'].get('x', 0)))
                    y.append(int(item['position'].get('y', 0)))
        x_dist = 400 - numpy.mean(x) if x else 0
        y_dist = 300 - numpy.mean(y) if y else 0
        for item in expand_tasks:
            if 'position' in item:
                if item['position'].get('x', 0):
                    item['position']['x'] = int(item['position']['x']) + x_dist
                    item['position']['y'] = int(item['position']['y']) + y_dist

        return expand_tasks

    # 生成前端锁需要的扩展字段
    def fix_expand(self, dbsession=db.session):
        # 补充expand 的基本节点信息（节点和关系）
        tasks_src = self.get_tasks(dbsession)
        tasks = {}
        for task in tasks_src:
            tasks[str(task.id)] = task

        expand_tasks = json.loads(self.expand) if self.expand else []
        if not expand_tasks:
            expand_tasks = []
        expand_copy = copy.deepcopy(expand_tasks)

        # 已经不存在的task要删掉
        for item in expand_copy:
            # 节点类型
            if 'data' in item:
                if item['id'] not in tasks:
                    expand_tasks.remove(item)

            # 上下游关系类型
            else:
                # if item['source'] not in tasks or item['target'] not in tasks:
                expand_tasks.remove(item)  # 删除所有的上下游关系，后面全部重新

        # 增加新的task的位置
        for task_id in tasks:
            exist = False
            for item in expand_tasks:
                if 'data' in item and item['id'] == str(task_id):
                    exist = True
                    break
            if not exist:
                # if task_id not in expand_tasks:
                expand_tasks.append(
                    {
                        'id': str(task_id),
                        'type': 'dataSet',
                        'position': {
                            'x': random.randint(100, 1000),
                            'y': random.randint(100, 1000),
                        },
                        'data': {
                            # "taskId": task_id,
                            # "taskName": tasks[task_id].name,
                            'name': tasks[task_id].name,
                            'label': tasks[task_id].label,
                        },
                    }
                )

        # 重写所有task的上下游关系
        dag_json = json.loads(self.dag_json)
        for task_name in dag_json:
            upstreams = dag_json[task_name].get('upstream', [])
            if upstreams:
                for upstream_name in upstreams:
                    upstream_task_id = [
                        task_id for task_id in tasks if tasks[task_id].name == upstream_name
                    ][0]
                    task_id = [task_id for task_id in tasks if tasks[task_id].name == task_name][0]
                    if upstream_task_id and task_id:
                        expand_tasks.append(
                            {
                                'source': str(upstream_task_id),
                                'arrowHeadType': 'arrow',
                                'target': str(task_id),
                                # "targetHandle": None,
                                'id': self.name
                                + '__edge-%snull-%snull' % (upstream_task_id, task_id),
                            }
                        )
        return expand_tasks

    # @pysnooper.snoop()
    def clone(self):
        return ReserveTemplate(
            name=self.name.replace('_', '-'),
            project_id=self.project_id,
            dag_json=self.dag_json,
            describe=self.describe,
            namespace=self.namespace,
            global_env=self.global_env,
            schedule_type='once',
            cron_time=self.cron_time,
            pipeline_file='',
            pipeline_argo_id=self.pipeline_argo_id,
            node_selector=self.node_selector,
            image_pull_policy=self.image_pull_policy,
            parallelism=self.parallelism,
            alert_status='',
            expand=self.expand,
            parameter=self.parameter,
        )
