import os
from dao.project_url import url
from myadmin.models import Cluster
# from datetime import datetime # 原来用的注释掉
from django.utils import timezone as datetime # 不想大面积修改就这么做


url = url()


def get_cluster():
    init_cluster()  # 扫描前先将数据库中所有集群置为状态0
    os.system("kubectl get ns | grep 'hadoop\|spark' > %s/static/cluster/ns_tmp.txt" % url)
    # 扫描集群（ns）
    os.system("awk '{print $1}' %s/static/cluster/ns_tmp.txt > %s/static/cluster/ns.txt" % (url, url))
    # 扫描状态
    os.system("awk '{print $2}' %s/static/cluster/ns_tmp.txt > %s/static/cluster/status.txt" % (url, url))
    with open("{}/static/cluster/ns.txt".format(url), 'r') as n, open("{}/static/cluster/status.txt".format(url), 'r') as s:
        ns_list = n.readlines()
        status_list = s.readlines()
    n.close()
    s.close()

    for ns, status in zip(ns_list, status_list):
        if Cluster.objects.filter(name=ns.rstrip()).exists():
            ob = Cluster.objects.get(name=ns.rstrip())
            ob.status = 1 if status.rstrip() == 'Active' else 2
            ob.update_at = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            ob.save()
        else:
            ob = Cluster()
            ob.name = ns.rstrip()
            ob.status = 1 if status.rstrip() == 'Active' else 2
            ob.create_at = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            ob.update_at = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            ob.save()


def init_cluster():
    qs = Cluster.objects.filter(status__gt=0)
    for ob in qs:
        ob.status = 0
        ob.save()