# Standard library imports
from datetime import datetime
import json
import logging
import os
import pathlib
import time
import uuid

# Third-party library imports
from kubernetes import client
import yaml

# Local application imports
from myapp.biz.pipeline.dag import change_to_multi_node_yaml, dag_to_pipeline, is_npu, run_pipeline
from myapp.config import PIPELINE_NAMESPACE
from myapp.configs.training import ENGINE_CONFIG
from myapp.const.response import BizError
from myapp.models.model_auto_learning import AutoLearning
from myapp.models.model_job import Pipeline, Task, Workflow
from myapp.models.user_attributes import UserAttribute
from myapp.third.k8s.pv_pvc import create_pv, create_pvc
from myapp.third.k8s.py_karmada import (
    get_k8s_client_by_region,
    get_propagation_yaml,
    propagation_job,
    propagation_job_without_namespace,
)
from myapp.utils.device_resource import DeviceResource
from myapp.utils.resource import get_resource, is_multi_node
from myapp.utils.storage import storage_mgr


# Initialize logger for this module.
log = logging.getLogger(__name__)



def render_params(inputs, outputs, parameters, job_name, username):
    """
    Renders command-line parameters for a job based on inputs, outputs, and other parameters.

    Args:
        inputs (list): A list of input dictionaries, each containing 'name' and 'path'.
        outputs (list): A list of output dictionaries, each containing 'name' and 'local_dir'.
        parameters (list): A list of parameter dictionaries, each containing 'name' and 'value'.
        job_name (str): The name of the job.
        username (str): The username associated with the job.

    Returns:
        str: A formatted string of command-line arguments.
    """
    # Initialize an empty string to build the command.
    commd = ''
    # Iterate through each input and append it to the command string.
    for _input in inputs:
        commd += f'--{_input["name"]}="/mnt/publish-data/{_input["path"]}" '
    # Iterate through each output, create necessary directories, and append to the command string.
    for _output in outputs:
        # Construct the job-specific path for outputs.
        job_path = f'{username}/jobs/{job_name}'
        # Join the job path with the local directory specified in the output.
        path = os.path.join(job_path, _output['local_dir'])
        # Ensure the path ends with a slash if it's a directory.
        if not path.endswith('/'):
            path += '/'
        # Create the directory for the output.
        make_dir(path)
        commd += f'--{_output["name"]}="/mnt/publish-data/{path}" '
    # Iterate through each parameter and append it to the command string.
    for param in parameters:
        commd += f'--{param["name"]}="{param["value"]}" '
    # Return the stripped command string.
    return commd.strip()



def get_command(command, boot_file):
    """
    Determines the command to execute, prioritizing a boot file if provided.

    Args:
        command (str): The base command string.
        boot_file (str): The path to a boot file, if any.

    Returns:
        str: The command to be executed.
    """
    # If a boot file is provided, execute it with python.
    if boot_file:
        return f'python {boot_file}'
    # Otherwise, return the original command.
    return command



def make_dir(path):
    """
    Creates a directory at the specified path, handling potential exceptions.

    Args:
        path (str): The path of the directory to create.
    """
    try:
        # Attempt to create the directory using pathlib, with parents and existing directories allowed.
        pathlib.Path(f'/mnt/publish-data/{path}').mkdir(parents=True, exist_ok=True)
    except Exception:
        try:
            # If pathlib fails, try creating the directory using storage_mgr.
            storage_mgr.create_dir(path)
        except Exception as e:
            # Log any exceptions that occur during directory creation.
            logging.error(e)



def get_image_url(engine_name, engine_version):
    """
    Retrieves the image URL for a given engine name and version from the ENGINE_CONFIG.

    Args:
        engine_name (str): The name of the engine.
        engine_version (str): The version of the engine.

    Returns:
        str or None: The image URL if found, otherwise None.
    """
    # Get the engine items for the specified engine name.
    engine_items = ENGINE_CONFIG.get(engine_name)
    # Iterate through the engine items to find a matching version.
    for item in engine_items:
        if item.get('version') == engine_version:
            # Return the image URL if a match is found.
            return item.get('image_url')
    # Return None if no matching image URL is found.
    return None



def start_training(req, db_session):
    """
    Initiates a training pipeline based on the provided request.

    This function handles the creation and configuration of a training pipeline,
    including setting up the image, command, resources, and Kubernetes objects
    like Pipeline, Task, and Workflow.

    Args:
        req (dict): A dictionary containing the request parameters for the training job.
        db_session: The database session object.

    Returns:
        int: The ID of the created pipeline.

    Raises:
        BizError: If a preset framework does not exist.
    """
    # Extract user and task information from the request.
    user_id = req.get('user_id', 1)
    task_id = req.get('task_id')
    username = req.get('username')

    # Process configuration parameters from the request.
    image_url = req['image_url']
    # If image_url is not provided, try to get it from ENGINE_CONFIG.
    if not image_url:
        image_url = get_image_url(req['engine_name'], req['engine_version'])
        if not image_url:
            # Raise an error if the preset framework does not exist.
            raise BizError(1000, '预置框架不存在')
    # Determine the command to run, prioritizing a boot file.
    command = get_command(req['command'], req['boot_file'])
    # Get any trailing commands.
    trailing_command = req.get('trailing_command')
    # Extract inputs, outputs, and parameters.
    inputs = req['inputs']
    outputs = req['outputs']
    parameters = req['parameters']
    # Get resource configuration based on flavor ID.
    resource = get_resource(req['flavor_id'])
    # Get auto-stop setting.
    auto_stop = req['auto_stop']
    # Get algorithm details.
    algorithm = req.get('algorithm', {})
    # Format the pipeline name.
    pipeline_name = f"{req['job_name']}".replace('_', '-').lower()
    # Get and update node selector.
    node_selector = req.get('node_selector', {})
    node_selector['train'] = 'true'

    # Convert node selector dictionary to a list of strings.
    list_node_selector = []
    for key, value in node_selector.items():
        list_node_selector.append(f'{key}={value}')

    # Create a new Pipeline object.
    pipeline = Pipeline()
    pipeline.created_by_fk = user_id
    pipeline.name = pipeline_name
    pipeline.describe = req['name']
    pipeline.project_id = 5
    pipeline.display_name = ''
    pipeline.namespace = f'{PIPELINE_NAMESPACE}-{user_id}'
    # Add the pipeline to the database session.
    db_session.add(pipeline)
    # Flush the session to get the pipeline ID.
    db_session.flush()
    # Update pipeline name with its ID.
    pipeline_name = f'c{pipeline.id}-{pipeline.name}'
    pipeline.name = pipeline_name
    pipeline.region = req['region']
    # Construct the full command string.
    cmd = command + ' ' + render_params(inputs, outputs, parameters, req['job_name'], username)
    if trailing_command:
        cmd = cmd + ' ' + trailing_command
    # Dump the command to JSON.
    cmd = json.dumps(cmd)
    logging.info(cmd)

    # Get GPU resource details.
    resource_gpu = resource.get('gpu')
    dr = DeviceResource.from_str(resource_gpu)

    # Create a new Task object.
    task = Task()
    task.created_by_fk = user_id
    task.name = f'task-{int(time.time())}'
    task.job_template_id = 1
    task.pipeline_id = pipeline.id
    task.label = f"{req['kind']}-{req['name']}"
    task.node_selector = ','.join(list_node_selector)
    task.volume_mount = (
        'kubeflow-user-workspace(pvc):/mnt,kubeflow-archives(pvc):/archives,'
        'kubeflow-publish-data-pvc(pvc):/mnt/publish-data,(memory)10G:/dev/shm'
    )
    task.resource_cpu = resource.get('cpu')
    task.resource_gpu = json.dumps(
        {
            'type': dr.typ,
            'num': dr.num,
            'mem': dr.mem,
            'node_num': resource.get('node_num', 1),
            'node_selector': resource.get('node_selector'),
        }
    )
    task.resource_memory = resource.get('mem')
    task.timeout = auto_stop * 3600
    task.region = req['region']

    # Set task arguments.
    task.args = f"""{{
    "images": "{image_url}",
    "workdir": "/mnt/publish-data/{req['code_path']}",
    "command": {cmd}
    }}"""
    task.expand = ''
    # Set node count if greater than 1.
    node_count = req['node_count']
    if node_count and int(node_count) > 1:
        task.expand = '{"node_count":%s}' % node_count
    # Add the task to the database session.
    db_session.add(task)
    # Flush the session to get the task ID.
    db_session.flush()
    logging.info(f'task: {task.id}')
    # Add the task to the Pipeline's expand field.
    pipeline.expand = (
        f'[{{"id": "{task.id}", "type": "dataSet", "position": {{"x": 400.0, "y": 300.0}}, "data":'
        f' {{"name": "{task.name}", "label": "{task.label}"}}}}]'
    )
    pipeline.dag_json = '{}'

    # Create a new Workflow object.
    crd_name = pipeline_name + uuid.uuid4().hex[:5]
    status = 'Pending'
    snapshot = {'dag_json': {}, 'pipeline_expand': ''}
    info_json = {'pipeline_name': pipeline.name, 'describe': pipeline.describe, 'has_push': ''}
    namespace = f'{PIPELINE_NAMESPACE}-{user_id}'
    now = datetime.now()
    workflow = Workflow(
        name=crd_name,
        namespace=namespace,
        status=status,
        display_status=status,
        display_name=pipeline.describe,
        algorithm=json.dumps(algorithm, indent=4, ensure_ascii=False),
        info_json=json.dumps(info_json, indent=4, ensure_ascii=False),
        status_more='{}',
        snapshot=json.dumps(snapshot, indent=4, ensure_ascii=False),
        username=username,
        pipeline_id=pipeline.id,
        create_time=now.strftime('%Y-%m-%d %H:%M:%S'),
        region=req['region'],
    )
    # Add the workflow to the database session.
    db_session.add(workflow)
    # Flush the session to get the workflow ID.
    db_session.flush()

    # Synthesize the workflow pipeline file.
    pipeline.pipeline_file = dag_to_pipeline(
        pipeline,
        db_session,
        task_type=req['task_type'],
        kind=req['kind'],
        envs=req['environments'],
        crd_name=crd_name,
    )
    # Load the pipeline JSON from the generated file.
    pipeline_json = yaml.safe_load(pipeline.pipeline_file)
    # Set the metadata name and remove generateName.
    pipeline_json['metadata']['name'] = crd_name
    pipeline_json['metadata'].pop('generateName', None)

    # If it's a multi-node GPU environment and not NPU, convert to multi-node YAML.
    if is_multi_node(resource_cfg=resource) is True and is_npu(task=task) is False:
        # GPU环境，多机多卡训练，使用argo + volcano，需要转换yaml
        pipeline_json = change_to_multi_node_yaml(
            pipeline_json=pipeline_json, resource_cfg=resource
        )

    # Run the workflow.
    logging.info(f'提交任务流: {pipeline.name}')
    pipeline.pipeline_argo_id = crd_name

    # Update AutoLearning task with training job ID if task_id is provided.
    if task_id:
        db_session.query(AutoLearning).filter_by(id=int(task_id)).update(
            {'training_job_id': workflow.id}
        )

    # Commit all changes to the database.
    db_session.commit()
    # Initialize the namespace.
    # 初始化namespace
    init_namespace(user_id, db_session, region=req['region'])

    # Launch the Pipeline.
    # 拉起Pipeline
    pipeline.delete_old_task(db_session)
    run_pipeline(
        cluster=pipeline.project.cluster,
        workflow_json=pipeline_json,
        namespace=namespace,
        region=req['region'],
    )

    # Return the pipeline ID.
    return pipeline.id



def restart_training(workflow, dbsession):
    """
    Restarts a training workflow.

    Args:
        workflow (Workflow): The workflow object to restart.
        dbsession: The database session object.

    Raises:
        Exception: If the pipeline associated with the workflow does not exist.
    """
    # Update workflow duration and status.
    workflow.history_duration += workflow.duration_time
    workflow.status = 'Pending'
    workflow.display_status = 'Pending'
    workflow.foreign_key = ''
    # Commit changes to the database.
    dbsession.commit()
    # Retrieve the pipeline associated with the workflow.
    pipeline = dbsession.query(Pipeline).filter_by(id=workflow.pipeline_id).first()
    if not pipeline:
        # Raise an exception if the pipeline does not exist.
        raise Exception(f'pipeline不存在，pipeline_id:{workflow.pipeline_id}')
    # Load the pipeline JSON from the pipeline file.
    pipeline_json = yaml.safe_load(pipeline.pipeline_file)
    # Set the metadata name and remove generateName.
    pipeline_json['metadata']['name'] = workflow.name
    pipeline_json['metadata'].pop('generateName', None)

    # Launch the Pipeline.
    # 拉起Pipeline
    pipeline.delete_old_task(dbsession)
    run_pipeline(pipeline.project.cluster, pipeline_json, namespace=workflow.namespace)



def init_namespace(user_id, db_session, region='default'):
    """
    Initializes the Kubernetes namespace and related resources for a given user.

    This includes creating the namespace, Persistent Volumes (PVs),
    Persistent Volume Claims (PVCs), Secrets, Service Accounts, and Role Bindings.
    It also handles propagation for non-default regions.

    Args:
        user_id (int): The ID of the user.
        db_session: The database session object.
        region (str, optional): The region for Kubernetes client. Defaults to 'default'.

    Raises:
        Exception: If user attributes do not exist.
        client.exceptions.ApiException: For Kubernetes API errors.
    """
    # Retrieve user attributes from the database.
    user_attr = (
        db_session.query(UserAttribute)
        .filter(UserAttribute.user_id == user_id)
        .filter(UserAttribute.region == region)
        .first()
    )
    # Raise an exception if user attributes are not found.
    if user_attr is None:
        raise Exception(f'用户attr不存在, user_id: {user_id}')
    # If the pipeline is already initialized, return.
    if user_attr.pipeline_is_initialized == 1:
        return

    # Initialize Kubernetes client for the specified region.
    # 初始化namespace
    k8s_client = get_k8s_client_by_region(region=region)

    # Construct the namespace name.
    namespace = f'{PIPELINE_NAMESPACE}-{user_id}'
    # Create the Kubernetes namespace.
    k8s_client.create_namespace(namespace)

    # Create Persistent Volumes (PVs).
    create_pv(
        'pipeline-kubeflow-archives', user_id, '/data/k8s/kubeflow/pipeline/archives', k8s_client
    )
    create_pv(
        'pipeline-kubeflow-publish-data',
        user_id,
        '/data/k8s/kubeflow/pipeline/publish-data',
        k8s_client,
    )
    create_pv(
        'pipeline-kubeflow-user-workspace',
        user_id,
        '/data/k8s/kubeflow/pipeline/workspace',
        k8s_client,
    )

    # Create Persistent Volume Claims (PVCs).
    create_pvc('kubeflow-archives', namespace, 'pipeline-kubeflow-archives', k8s_client)
    create_pvc(
        'kubeflow-publish-data-pvc', namespace, 'pipeline-kubeflow-publish-data', k8s_client
    )
    create_pvc(
        'kubeflow-user-workspace', namespace, 'pipeline-kubeflow-user-workspace', k8s_client
    )

    # Handle PV and PVC propagation for non-default regions.
    if region != 'default':
        # List of PV names to propagate.
        pv_name_list = [
            f'pipeline-kubeflow-archives-{user_id}',
            f'pipeline-kubeflow-publish-data-{user_id}',
            f'pipeline-kubeflow-user-workspace-{user_id}',
        ]
        # Propagate each PV.
        for pv_name in pv_name_list:
            propagation_job_without_namespace(
                region=region,
                k8s_client=k8s_client,
                name=pv_name,
                suffix=region,
                api_version='v1',
                kind='PersistentVolume',
            )

        # List of PVC names to propagate.
        pvc_name_list = [
            'kubeflow-archives',
            'kubeflow-publish-data-pvc',
            'kubeflow-user-workspace',
        ]
        # Propagate each PVC.
        for pvc_name in pvc_name_list:
            propagation_job(
                region=region,
                k8s_client=k8s_client,
                name=pvc_name,
                suffix=region,
                namespace=namespace,
                api_version='v1',
                kind='PersistentVolumeClaim',
            )

    # Create a new Kubernetes Secret.
    # 新建secrect
    secret = client.V1Secret(
        api_version='v1',
        kind='Secret',
        metadata=client.V1ObjectMeta(
            name='my-minio-cred',  # Secret 的名称
            namespace=namespace,  # Secret 将被创建的命名空间
        ),
        data={
            'accesskey': 'bWluaW8=',  # "admin" 的 Base64 编码
            'secretkey': 'bWluaW8yMDIy',  # "password" 的 Base64 编码
        },
        type='Opaque',  # Secret 的类型
    )
    try:
        # Attempt to create the namespaced secret.
        k8s_client.v1.create_namespaced_secret(namespace, body=secret)
    except client.exceptions.ApiException as e:
        # If the secret already exists, log an info message; otherwise, re-raise the exception.
        if e.status == 409:
            log.info(
                f'Secret {secret.metadata.name} already exists '
                f'in namespace {secret.metadata.namespace}'
            )
        else:
            raise e

    # Handle secret propagation for non-default regions.
    if region != 'default':
        try:
            # Get the propagation YAML for the secret.
            propagation_yaml = get_propagation_yaml(
                cluster=region,
                name='my-minio-cred',
                namespace=namespace,
                api_version='v1',
                kind='Secret',
                suffix=region,
            )
            logging.info(f'[propagation_yaml] {propagation_yaml}')
            # Create the Custom Resource Definition (CRD) for propagation policy.
            k8s_client.create_crd(
                group='policy.karmada.io',
                version='v1alpha1',
                plural='propagationpolicies',
                namespace=namespace,
                body=propagation_yaml,
            )
        except client.exceptions.ApiException as e:
            # If the propagation policy already exists, log an info message; otherwise, re-raise the exception.
            if e.status == 409:
                log.info(f'Secret my-minio-cred already exists in namespace {namespace}')
            else:
                raise e

    # Create a new Kubernetes ServiceAccount.
    # 新建service_account
    service_account = client.V1ServiceAccount(
        api_version='v1',
        kind='ServiceAccount',
        metadata=client.V1ObjectMeta(
            name='pipeline-runner',  # ServiceAccount 的名称
            namespace=namespace,
        ),
    )
    try:
        # Attempt to create the namespaced service account.
        k8s_client.v1.create_namespaced_service_account(namespace, body=service_account)
    except client.exceptions.ApiException as e:
        # If the service account already exists, log an info message; otherwise, re-raise the exception.
        if e.status == 409:
            log.info(
                f'ServiceAccount {service_account.metadata.name} already exists in '
                f'namespace {service_account.metadata.namespace}'
            )
        else:
            raise e

    # Handle service account propagation for non-default regions.
    if region != 'default':
        propagation_job(
            region=region,
            k8s_client=k8s_client,
            name='pipeline-runner',
            suffix=region,
            namespace=namespace,
            api_version='v1',
            kind='ServiceAccount',
        )

    # Create a new Kubernetes RoleBinding.
    # 新建role binding
    role_binding = client.V1RoleBinding(
        api_version='rbac.authorization.k8s.io/v1',
        kind='RoleBinding',
        metadata=client.V1ObjectMeta(name='pipeline-runner-binding', namespace=namespace),
        subjects=[client.V1Subject(name='pipeline-runner', kind='ServiceAccount')],
        role_ref=client.V1RoleRef(
            name='pipeline-runner', kind='ClusterRole', api_group='rbac.authorization.k8s.io'
        ),
    )
    try:
        # Attempt to create the namespaced role binding.
        k8s_client.rabc_api.create_namespaced_role_binding(namespace, body=role_binding)
    except client.exceptions.ApiException as e:
        # If the role binding already exists, log an info message; otherwise, re-raise the exception.
        if e.status == 409:
            log.info(f'RoleBinding {role_binding.metadata.name} already exists')
        else:
            raise e

    # Handle role binding propagation for non-default regions.
    if region != 'default':
        propagation_job(
            region=region,
            k8s_client=k8s_client,
            name='pipeline-runner-binding',
            suffix=region,
            namespace=namespace,
            api_version='rbac.authorization.k8s.io/v1',
            kind='RoleBinding',
        )

    # Update the user attribute to mark pipeline as initialized.
    db_session.query(UserAttribute).filter(
        UserAttribute.user_id == user_id,
        UserAttribute.pipeline_is_initialized == 0,
        UserAttribute.region == region,
    ).update({'pipeline_is_initialized': 1})
    # Commit changes to the database.
    db_session.commit()
