import os.path as osp
from tjob.utils import cond_mkdir, save_yaml, load_yaml
import shutil
import subprocess
from .machines import MACHINES
import os

_CACHE = osp.join(osp.expanduser("~"), ".tjob")  # cache directory
_CUR_PATH = osp.abspath(__file__)

_CONF = osp.join(_CACHE, "conf.yaml")
_LOCAL_CONF = osp.join(_CACHE, "local.submit.dlc.yaml")

_custom_init = osp.join(_CACHE, "custom.sh")
_clean_gpu = osp.join(_CACHE, "clean_gpu.sh")

# used in PAIHandler
_DLC_PARAMS = osp.join(_CACHE, "dlc.params")
_SUBMIT_JOBS = osp.join(_CACHE, "submit.jobs")
_DEBUG_MODE = os.environ.get("TJOB_DEBUG", "false").lower() == "true"

MACHINE_LIST = [
    "a30",
    "2080",
    "4090",
    "a100",
]  # supported machine list, a30p is a30-private

CUSTOM_FLAG = osp.exists(_custom_init)
DLC = osp.realpath(osp.join(_CUR_PATH, "../..", "exec/dlc"))
DEFAULT_DOCKER = (
    "reg.deeproute.ai/perception-test/vision-training-framework:cu11_v0.2.5"
)
NEXUS = "https://nexus.deeproute.ai/repository/pypi-group/simple"

ACCESS_KEY_ID = "online"
ACCESS_KEY_SECRET = "TssAkr6qO5iTEWwJVjdA"
ENDPOINT = "pai-proxy.deeproute.cn:32080"
PROTOCOL = "http"
API_PRODUCT_DLC = "dlc"
API_PRODUCT_DSW = "dsw"
API_PRODUCT_FLOW = "flow"

DEEKEEPER_SCRIPT_PATH = osp.join(_CACHE, "deekeeper.sh")


def deekeeper_enabled() -> bool:
    return os.path.exists(DEEKEEPER_SCRIPT_PATH)


def get_default_dlc_file(machine):
    return osp.join(_CACHE, f"{machine}.submit.dlc.yaml")


cond_mkdir(_CACHE)


def dump_pai_key_passwd(user, passwd):
    if osp.exists(_CONF):
        meta_data = load_yaml(_CONF)
    else:
        meta_data = {}

    meta_data.update({"pai.user": user, "pai.password": passwd})
    save_yaml(_CONF, meta_data)


def set_dlc_login(username, password) -> int:
    web = "pai-proxy.deeproute.cn:32080"
    protocol = "http"
    cfg_cmd = f"{DLC} config -e {web} --username {username} --password {password} --protocol {protocol} --region region"  # noqa
    return_code = subprocess.call(
        cfg_cmd,
        shell=True,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        encoding="utf-8",
    )
    return return_code


def set_dlc_params(dlc_params):
    for machine in MACHINE_LIST:
        params = gen_dlc_default_params(machine=machine, **dlc_params)
        save_yaml(osp.join(_CACHE, f"{machine}.submit.dlc.yaml"), params)


def set_local_params():
    save_yaml(_LOCAL_CONF, gen_local_default_params())


def gen_local_default_params(name="hello-local", worker_image=None):
    worker_image = worker_image if worker_image else DEFAULT_DOCKER
    local_params = {
        "name": name,
        "interactive": True,  # add -it
        "worker_image": worker_image,
    }
    return local_params


def gen_dlc_default_params(
    machine="a30",
    name="hello-pai",
    worker_count=1,
    worker_gpu=1,
    data_sources=None,
    worker_image=None,
):
    # TODO(yuezhang): machine should rename as cluster.
    worker_image = worker_image if worker_image else DEFAULT_DOCKER
    dlc_params = {
        "name": name,  # job name
        "worker_gpu_type": MACHINES[machine].gpu_type,
        "worker_count": worker_count,  # number of machines
        "worker_gpu": worker_gpu,  # number of gpu
        "kind": "PyTorchJob",  # pytorch / batch / tf
        "priority": 4,
        "workspace_id": MACHINES[machine].workspace_id,
        "worker_image": worker_image,
        "data_sources": data_sources,
        "interactive": False,
        # "machine": machine,
    }
    # note: yuezhang remove `machine` key
    # but user init previous, it still be kept
    return dlc_params


def update_dlc_params(base_file: str, **kwargs):
    base_params = load_yaml(base_file)
    machine = base_file.split("/")[-1].split(".")[0]
    for k, v in kwargs.items():
        # if None wont update
        if v:
            base_params.update({k: v})
    #  update cpu & gpu mem
    if "worker_gpu" not in base_params:
        raise ValueError("worker_gpu should be in base_params")
    _gpu_scale_gpu = max(1, int(base_params["worker_gpu"]))
    PAI_MEM = int(os.environ.get("PAI_MEM", 0))
    PAI_SHM = int(os.environ.get("PAI_SHM", 0))
    if "worker_cpu" not in base_params:
        base_params["worker_cpu"] = MACHINES[machine].num_cpu_per_gpu * _gpu_scale_gpu
    if "worker_memory" not in base_params:
        base_params["worker_memory"] = (
            PAI_MEM or MACHINES[machine].gpu_mem_per_gpu * _gpu_scale_gpu
        )
    if "worker_shared_memory" not in base_params:
        base_params["worker_shared_memory"] = (
            PAI_SHM or MACHINES[machine].gpu_mem_per_gpu * _gpu_scale_gpu
        )
    # if gpu is 0, remove `worker_gpu_type`, use `num_cpu` update gpu
    if int(base_params["worker_gpu"]) == 0:
        base_params.pop("worker_gpu_type")
        scale = max(
            1, int(base_params["worker_cpu"]) // MACHINES[machine].num_cpu_per_gpu
        )
        # user can set `PAI_SHM=30 PAI_MEM=10 tjob submit xxx`
        # to update memory and shared memory
        base_params["worker_memory"] = (
            PAI_MEM or MACHINES[machine].gpu_mem_per_gpu * scale
        )
        base_params["worker_shared_memory"] = (
            PAI_SHM or MACHINES[machine].gpu_mem_per_gpu * scale
        )
    return base_params


def set_deekeeper(access, secret, cmd=None):
    if access is None or secret is None:
        if cmd:
            cmd.blue_text("[Init] deekpeekper not set")
        return

    if access == "" or secret == "":
        if cmd:
            cmd.blue_text("[Init] deekpeekper not set")
        return

    script = ""
    script += "echo set deekpeer\n"
    script += f"export CLEARML_API_ACCESS_KEY={access}\n"
    script += f"export CLEARML_API_SECRET_KEY={secret}\n"
    script += (
        'export CLEARML_WEB_HOST="https://prod-clearml-webserver.srv.deeproute.cn"\n'
    )
    script += (
        'export CLEARML_API_HOST="https://prod-clearml-apiserver.srv.deeproute.cn"\n'
    )
    script += (
        'export CLEARML_FILES_HOST="https://prod-clearml-fileserver.srv.deeproute.cn"\n'
    )

    with open(DEEKEEPER_SCRIPT_PATH, "w") as rsh:
        rsh.write(script)


def set_clear_gpu():
    script = ""
    script += "echo clear GPU\n"
    script += "export pypid=$(nvidia-smi | grep 'python' | awk '{ print $3 }') \n"
    script += "echo $pypid \n"
    script += "if [ $pypid ]; then echo 'kill $pypid'; kill -9 -- $pypid; fi"
    with open(_clean_gpu, "w") as rsh:
        rsh.write(script)


def clear_envs(path):
    shutil.rmtree(path)


def map_dlcParams2opt():
    # mapping betwwn opt.k1 <-> dlc.k2
    # when k1 != k2
    _map = dict(
        worker_count="num_machine",
        worker_gpu="num_gpu",
        worker_cpu="num_cpu",
        worker_image="image",
    )
    return _map


# clean gpu
if not os.path.exists(_clean_gpu):
    set_clear_gpu()
