#! /usr/bin/env python
# -*- coding: utf-8 -*-

import sys
import io
import subprocess
from multiprocessing import Pool, Lock
from os import utime, devnull
import os.path
from time import mktime
from collections import defaultdict

# module_weight = [{1: 'k8s'}, {2: 'k8suser'}, {2: 'infra'}, {4: 'apm'}, {4: 'apigw'}, {4: 'nsf'}, {4: 'gtxs'}, {4: 'cicd'}, {3: 'prometheus'}, {4: 'ncs'}, {3: 'platform'}, {4: 'paas'}]
module_weight = [{1: 'k8sctrl'}, {2: 'k8suser'}, {2: 'infra'}, {3: 'platform1'}, {4: 'apm'}, {4: 'gtxs'}, {4: 'ncs'}, {4: 'nsf1'}, {4: 'apigw1'}, {5: 'prometheus1'}]

dd = defaultdict(list)
for task in module_weight:
    for key, value in task.items():
        dd[key].append(value)

sorted_module = dict(dd)

def create_directory(path):
    if not os.access(path, os.F_OK):
        os.makedirs(path)

def parallel_run(modules):
    child_processes = []
    for module in modules:
        create_directory("./log")
        with io.open("./log/" + module + ".log", mode='wb') as out:
            p = subprocess.Popen("bash ./" + str(module) + "/main.sh", stdout=out, stderr=out, shell=True)
            child_processes.append(p)    # start this one, and immediately return to start another

    # now you can join them together
    for cp in child_processes:
        cp.wait()                         # this will block on each child process until it exits

for weight, modules in sorted_module.items():
    print("start deploy: " + str(modules) + ", with weight is " + str(weight))
    #parallel_run(modules)
    pwd = os.getcwd()
    for module in modules:
        cmd = "cd %s/%s && ./main.sh"%(pwd,str(module))
        print(cmd)
        res = os.system(cmd)
        if res != 0:
            sys.exit(1)