import os

from tjob.handler.cloud_handler import CloudHandler, parse_cloud_job_params
from tjob.initialize.setup_envs_cloud import (
    Framework,
    Batch,
    get_cloud_access,
    get_cloud_default_conf,
    get_default_namespace,
)
from tjob.utils.io_utils import save_yaml
from .base_command import BaseCommand
from tjob.handler import PAIHandler, LocalHandler
from cleo.helpers import argument, option
from tjob.initialize.setup_envs import _CACHE, MACHINE_LIST
from tjob.meta import JobManger
from tjob.utils.parse_file import CommonParser
from tjob.utils.pipeline_utils import gen_pipeline
from tjob.utils import get_curtime


class SubmitCommand(BaseCommand):
    name = "submit"
    description = "Submit your job to runner [pai/local]."
    arguments = [
        argument(
            "files",
            description="Send exectue files to different platfrom, can be a list. "
            + "Use blank to seperate. Supported file endswith: [*.py, *.sh, *.bash].\n"
            + "NOTE: *.py can modify the following submit options in PAI "
            + "when not set in command line.",
            multiple=True,
            default=None,
            optional=True,
        )
    ]

    options = [
        option(
            "platform",
            "p",
            flag=False,
            description="Which platfrom you want to submit? [pai, local, cloud]",
            default="pai",
        ),
        option(
            "resubmit",
            "r",
            flag=True,
            description="Use last time parameters to send PAI",
        ),
        option(
            "image",
            "i",
            flag=False,
            description="Which docker image you want to use in PAI?",
        ),
        option(
            "data_sources",
            "d",
            flag=False,
            description="Which data_source you want to use in PAI?",
        ),
        option(
            "monitor",
            "m",
            default=None,
            flag=False,
            description="Set Feishu's username, will send notification "
            + "when RUNNING/FAILED in PAI.",
        ),
        option(
            "node_names",
            flag=False,
            description="Which nodes you want to use in PAI? "
            + "If set will scheduling on specific nodes. "
            + "Support RegExp, e.g. `eflops[1-9]`"
            + "It will request machine's name with eflops1, eflops2, ... ,eflops9.",
            default=None,
        ),
        option(
            "name",
            flag=False,
            description="Which job name you want to use in PAI?",
            default=None,
        ),
        option(
            "cluster",
            "c",
            flag=False,
            description="Which cluster you want to submit in PAI?\n"
            + "If None, use `a30` as default.\n"
            + "Supported list: ['a30', '4090', 'a100', '2080']",
            default=None,
        ),
        option(
            "num_machine",
            flag=False,
            description="How many machine you want request in PAI?",
            default=None,
        ),
        option(
            "num_gpu",
            flag=False,
            description="How many gpu you want request for each machine in PAI?"
            + "\nMax is 8",
            default=None,
        ),
        option(
            "num_cpu",
            flag=False,
            description="How many cpu you want request for each machine in PAI? "
            + "\nMax is depends on `cluster`.",
            default=None,
        ),
        option(
            "priority",
            flag=False,
            description="Priority of your job in PAI.",
            default=None,
        ),
        option(
            "cloud_cluster",
            flag=False,
            description="params yaml file, only support in cloud",
            default=None,
        ),
        option(
            "params_file",
            flag=False,
            description="params yaml file, only support in cloud",
            default=None,
        ),
        option(
            "jupyter.enabled",
            flag=True,
            description="whether to run jupyter, only support in cloud",
        ),
        option(
            "jupyter.working_dir",
            flag=False,
            description="lab's root working directory, only support in cloud",
            default=None,
        ),
        option(
            "jupyter.port",
            flag=False,
            description="jupyter's service port, only support in cloud",
            default=None,
        ),
        option(
            "jupyter.auto_install",
            flag=False,
            description="whether to install jupyter lab, only support in cloud",
            default=False,
        ),
        option(
            "framework",
            flag=False,
            description="optional values: batch/pytorch, indicates to submit a normal batch job or a pytorch job. "
                        "Default value is pytorch. Only support in cloud",
            default=None,
        ),
        option(
            "batch.min_success",
            flag=False,
            description="the number of succeeded jobs to finish this job, only support in cloud",
            default=None,
        ),
        option(
            "batch.min_available",
            flag=False,
            description="the number of succeeded jobs to mark this job as Running, only support in cloud",
            default=None,
        ),
        option(
            "batch.restart_policy",
            flag=False,
            description="whether to restart if pods failed, only support in cloud",
            default=None,
        ),
        option(
            "is_dev",
            flag=False,
            description="whether this is a dev job, true or false"
                        "dev jobs will be provided limited resources, only support in cloud",
            default=None,
        ),
    ]

    def handle(self):
        platfrom = self.option("platform").lower()
        self.blue_text(f"Submit to {platfrom} platform")

        # user option in terminal first
        parser = CommonParser(self.argument("files"))  # get file args
        commands = parser.command
        dlc_kwargs = parser.dlc_kwargs
        # for each job's params:  cmd > pyfile > init.params
        cluster = self.option("cluster") or dlc_kwargs.get("cluster")
        num_machine = self.option("num_machine") or dlc_kwargs.get("num_machine")
        num_gpu = self.option("num_gpu") or dlc_kwargs.get("num_gpu")
        num_cpu = self.option("num_cpu") or dlc_kwargs.get("num_cpu")
        priority = self.option("priority") or dlc_kwargs.get("priority")
        worker_image = self.option("image") or dlc_kwargs.get("image")
        name = self.option("name") or dlc_kwargs.get("name")

        if platfrom.lower() == "cloud":
            memPerWorker = os.environ.get("PAI_MEM", 0)
            params_file = self.option("params_file")
            cloudCluster = self.option("cloud_cluster")
            shmSize = os.environ.get("PAI_SHM", 0)
            run_jupyter = self.option("jupyter.enabled")
            jupyter_working_dir = self.option("jupyter.working_dir")
            jupyter_service_port = self.option("jupyter.port")
            auto_install_jupyter = self.option("jupyter.auto_install")
            framework = self.option("framework")
            batch_min_success = self.option("batch.min_success")
            batch_min_available = self.option("batch.min_available")
            batch_restart_policy = self.option("batch.restart_policy")
            is_dev = self.option("is_dev")
            if is_dev is None:
                raise ValueError(f"None parameter 'is_dev', 'is_dev' must be set!")

            validPolicies = ["Never", "OnFailure", "Always"]
            if not batch_restart_policy is None:
                if batch_restart_policy not in validPolicies:
                    raise ValueError(f"Invalid restart policy [{batch_restart_policy}], "
                                     f"valid policies: {validPolicies}. Default value is: Never.")

            jobFramework = None
            if framework == "batch":
                if batch_min_success is None and num_machine is None:
                    batch_min_success = 0
                jobFramework = Framework(
                    batch=Batch(
                        minSuccess=batch_min_success if batch_min_success is not None else num_machine,
                        minAvailable=batch_min_available if batch_min_available is not None else 1,
                        restartPolicy=batch_restart_policy if batch_restart_policy is not None else "Never",
                    )
                )

            cj_params = parse_cloud_job_params(
                name=name,
                command=commands,
                gpuType=cluster,
                count=num_machine,
                gpuPerWorker=num_gpu,
                cpuPerWorker=num_cpu,
                memPerWorker=memPerWorker,
                image=worker_image,
                cloudCluster=cloudCluster,
                shmSize=shmSize,
                priority=priority,
                params_file=params_file,
                jupyterEnabled=run_jupyter,
                jupyterWorkingDir = jupyter_working_dir,
                jupyterServicePort = jupyter_service_port,
                autoInstallJupyter = auto_install_jupyter,
                framework = jobFramework,
                isDev=is_dev,
            )
            ch = CloudHandler()
            cj_params_posted = ch.submit(cj_params)
            self.blue_text(f"Submit to {platfrom} Done")
            # /:workspace/clusters/:cluster/projects/:namespace/applications/training-tasks/:jobName  # noqa
            access = get_cloud_access()
            config = get_cloud_default_conf()
            detail = (
                access["webURL"]
                + f"/{config['cloudWorkspace']}/clusters/{cj_params_posted.cloudCluster}/projects/{get_default_namespace(access)}/applications/training-tasks/{cj_params_posted.name}"  # noqa
            )
            self.raw_text(f"cloud job detail url: <info>{detail}")
            params_path = os.path.join(_CACHE, f"{cj_params_posted.name}.params.yaml")
            save_yaml(params_path, cj_params_posted.model_dump())
            self.raw_text("<w>cloud job params saved: " + params_path)
            cloud_path = os.path.join(_CACHE, f"{cj_params_posted.name}.job.yaml")
            save_yaml(cloud_path, cj_params_posted.to_cloud_job())
            self.raw_text("<w>cloud job yaml saved: " + cloud_path)
            return

        if name in JobManger().jobs:
            warn_to_user = (
                f"{name} exists in JobManger. tjob create a new name with timestamp."
            )
            self.red_text(warn_to_user)
            name = name + "." + str(get_curtime())
            self.line(f"<w>Submit name : <comment>{name} <w>")

        data_sources = self.option("data_sources") or dlc_kwargs.get("data_sources")
        resubmit = self.option("resubmit") or dlc_kwargs.get("resubmit")
        node_names = self.option("node_names") or dlc_kwargs.get("node_names")

        if self.option("verbose"):
            self.raw_text("Get dlc kwargs in submit file:" + str(dlc_kwargs) + "\n")
            self.raw_text("Commands: " + str(commands) + "\n")

        if platfrom.lower() == "pai":
            if cluster and cluster not in MACHINE_LIST:
                self.line_error(
                    f"ONLY support cluster: {MACHINE_LIST} But get: {cluster}"
                )
                raise ValueError(f"cluster: [{cluster}] not in PAI")
            if resubmit:
                # resubmit only allow modify name

                if self.option("verbose"):
                    PAIHandler.show_params()
                ret = PAIHandler.resubmit(name=name)

            else:
                # need give key and name
                submit = PAIHandler(
                    cluster=cluster if cluster else "a30",
                    worker_count=num_machine,
                    worker_gpu=num_gpu,
                    worker_cpu=num_cpu,
                    name=name,
                    data_sources=data_sources,
                    worker_image=worker_image,
                    priority=priority,
                    node_names=node_names,
                )
                submit.info.command = commands
                ret = submit.submit(commands)
                self.line_debug(submit.dlc_params)

            job_id = None
            msg = ""
            for line in ret.stdout:
                msg += str(line)
                if "http" in str(line):
                    self.blue_text(
                        "<w>job url: <w> <info>"
                        + str(line).strip().split("|")[-2].strip()
                    )
                    job_id = str(line).strip().split("|")[1].strip()
                    self.blue_text("<w>job id : <w> " + job_id)

                # if "Creating" in str(line):
                #     if self.wait_job_finish:
                #         job_status = self.wait_for_task_finish(job_id)
                #         return job_status
                #     break

            if job_id is not None:
                submit_info = submit.info
                submit_info.id = job_id
                submit_info.dlc_params = dict(submit.dlc_params)

                name = submit.dlc_params["name"]
                JobManger().add(submit_info)

                if self.option("verbose"):
                    self.raw_text(f"add {name}: {submit_info} in JobManger")

                self.green_text("[Submit PAI] Done")
                monitor_user = self.option("monitor") or dlc_kwargs.get("monitor")

                if monitor_user:
                    self.red_text(
                        f"[Monitor] Send message to Feishu Username: {monitor_user}"
                    )
                    from tjob.monitor import PaiMonitor

                    PaiMonitor(
                        job_id, monitor_user, query_interval_time=10, max_round=5
                    ).monitor()
            else:
                txt_msg = f"[Submit Failed] {msg}"
                self.red_text(txt_msg)

        elif platfrom.lower() == "local":
            submit = LocalHandler(worker_image=worker_image, name=name)
            self.line_debug(submit._cmd)
            name = submit.info.name
            # print(submit.info)

            if self.option("verbose"):
                self.raw_text(f"add {name}: {submit.info} in JobManger")
            submit.info.command = commands
            JobManger().add(submit.info)
            ret = submit.submit(commands)

            self.blue_text(f" Job ID: {JobManger().fetch_id(name)}")
            self.green_text("[Submit Local] Done")


class SubmitPipeCommand(BaseCommand):
    # compare to submit user cannot specific the dlc settings in ci.
    # But can set in *.py or *.yaml
    name = "submit pipeline"
    description = "Submit your pipeline to PAI."

    arguments = [
        argument(
            "files",
            description="The JOB files in pipeline. Supported file endswith: "
            + "[*.py, *.sh, *.bash].",
            multiple=True,
        )
    ]

    options = [
        option(
            "deps",
            "-d",
            description="JOB deps in pipeline. The format should be: "
            + "[[], [0], [1]]. It means job_1 require job_0 finish "
            + "and job_2 require job_1 finish",
            flag=False,
            default=None,
        ),
        option(
            "name",
            flag=False,
            description="Which pipeline name you want to use in PAI?",
            default=None,
        ),
    ]

    def handle(self):
        parser_list = []
        for file in self.argument("files"):
            parser_list.append(CommonParser(file))
        deps = None
        if self.option("deps"):
            deps = eval(self.option("deps"))
        pipe_yaml = gen_pipeline(parser_list, deps)
        ret = PAIHandler.pipeline_submit(
            pipeline_yaml=pipe_yaml, name=self.option("name")
        )
        for line in ret.stdout:
            self.blue_text(str(line.decode("utf-8").strip()))
