#!/usr/bin/env python
# Import standard Python libraries for JSON, logging, and operating system interactions.
import json
import logging
import os

# Import the YAML library for parsing YAML files.
import yaml

# Import core application components from the 'myapp' package.
# This includes the Flask app instance, the AppBuilder object, the database session, and the security manager.
from myapp.app import app, appbuilder, db, security_manager
# Import constants defined within the application.
from myapp.const.base import SharedMntDir
from myapp.const.user import INTERNAL_USER_NAME
# Import database models.
from myapp.models.model_dataset import Dataset
from myapp.models.model_job import Images, Job_Template, Repository
from myapp.models.model_team import Project, Project_User
# Import functions for interacting with third-party services like an annotation/labeling tool.
from myapp.third.annotation.api import create_label_task, import_dataset_to_project
# Import a helper function for managing permissions.
from myapp.third.auth.permission import add_permission_for_role
# Import functions for managing message queue topics.
from myapp.third.mq.admin import create_topic_if_not_exist
from myapp.third.mq.config import SlowTopicName, TopicName
# Import the Nacos client for service discovery and configuration management.
import myapp.third.nacos.client as nacos_cli
# Import utility functions for exception logging and region management.
from myapp.utils.exception import log_exception
from myapp.utils.region import get_regions_info, get_non_default_regions, get_region_key
from myapp.utils.region_storage import StorageMgrFactory

# Create a shorthand reference to the application's configuration.
conf = app.config


# This function acts as an application factory, which is a common pattern in Flask.
# It's used by the Flask CLI to get an instance of the application.
def create_app(script_info=None):
    return app


# This decorator registers a function that provides variables to the 'flask shell' context.
# It makes 'app' and 'db' automatically available when running 'flask shell', which is useful for debugging.
@app.shell_context_processor
def make_shell_context():
    return {'app': app, 'db': db}


# This decorator registers the 'init' function as a command-line command.
# It can be run with 'flask init'.
# https://dormousehole.readthedocs.io/en/latest/cli.html
@app.cli.command('init')
# @pysnooper.snoop()
def init():
    # 新建内置用户
    # This block creates a special internal user required for system operations.
    with log_exception:
        # Execute a raw SQL INSERT statement to create the user with a specific ID (0).
        # This is done to ensure a consistent, known user for automated tasks.
        db.session.execute(
            f'INSERT INTO kubeflow.ab_user (id, first_name, last_name, username, password,'
            f' active, email, last_login, login_count, fail_login_count, created_on, changed_on,'
            f' created_by_fk, changed_by_fk, org,'
            f' nickname, info, phone, expired_time, status, taichu)'
            f"VALUES (0, '{INTERNAL_USER_NAME}', '{INTERNAL_USER_NAME}',"
            f" '{INTERNAL_USER_NAME}', '', 1, '{INTERNAL_USER_NAME}@qq.com',"
            f" '2024-10-11 14:50:12',"
            f" 701, 0, '2023-03-23 15:59:05', '2023-03-23 15:59:05', 1,"
            f" 1, null, '默认昵称', '', null, 0, 1, '')"
        )
        # Commit the transaction to save the new user to the database.
        db.session.commit()
    # In some database setups, inserting with a specific ID might not work as expected.
    # This is a fallback to ensure the user with the internal username has its ID set to 0.
    with log_exception:
        db.session.execute(
            f"UPDATE kubeflow.ab_user SET id = 0 WHERE username = '{INTERNAL_USER_NAME}'"
        )
        # Commit the update.
        db.session.commit()
    # 初始化创建项目组
    try:
        """Inits the Myapp application"""
        # Tell Flask-AppBuilder to scan for new permissions defined in the views.
        appbuilder.add_permissions(update_perms=True)  # update_perms为true才会检测新权限
        # Synchronize roles and permissions stored in the database with definitions in the code.
        security_manager.sync_role_definitions()

        # Define a helper function to create new project groups.
        def add_project(project_type, name, describe, expand=None):
            # Initialize 'expand' to an empty dictionary if not provided.
            expand = expand or {}
            # Query the database to see if a project with the same name and type already exists.
            project = (
                db.session.query(Project).filter_by(name=name).filter_by(type=project_type).first()
            )
            # If the project does not exist, create it.
            if project is None:
                try:
                    # Create a new Project model instance.
                    project = Project()
                    project.type = project_type
                    project.name = name
                    project.describe = describe
                    # Store additional metadata in the 'expand' field as a JSON string.
                    project.expand = json.dumps(expand, ensure_ascii=False, indent=4)
                    # Add the new project to the database session.
                    db.session.add(project)
                    # Commit the session to save the project.
                    db.session.commit()

                    # Create a link between the new project and the admin user (user_id=1).
                    project_user = Project_User()
                    project_user.project = project
                    # Assign the 'creator' role to the user for this project.
                    project_user.role = 'creator'
                    project_user.user_id = 1
                    # Add the project-user association to the session.
                    db.session.add(project_user)
                    # Commit the change.
                    db.session.commit()
                    # Print a success message to the console.
                    print('add project %s' % name)
                except Exception as e:
                    # If an error occurs, print the exception and roll back the transaction.
                    print(e)
                    db.session.rollback()

        # 添加一些默认的记录
        # Use the helper function to create default organizational projects.
        add_project('org', '推荐中心', '推荐项目组')
        add_project('org', '多媒体中心', '多媒体项目组')
        add_project('org', '搜索中心', '搜索项目组')
        add_project('org', '广告中心', '广告项目组')
        add_project('org', 'public', '公共项目组')

        # Use the helper function to create default projects for job templates, organized by category.
        add_project(
            'job-template',
            '基础命令',
            'python/bash等直接在服务器命令行中执行命令的模板',
            {'index': 1},
        )
        add_project(
            'job-template', '数据导入导出', '集群与用户机器或其他集群之间的数据迁移', {'index': 2}
        )
        add_project(
            'job-template',
            '数据处理',
            '数据的单机或分布式处理任务,ray/spark/hadoop/volcanojob',
            {'index': 3},
        )
        add_project(
            'job-template', '机器学习', '传统机器学习，lr/决策树/gbdt/xgb/fm等', {'index': 4}
        )
        add_project('job-template', '深度学习', '', {'index': 5})
        add_project(
            'job-template',
            '深度学习框架',
            '深度框架训练，tf/pytorch/mxnet/mpi/horovod/kaldi等',
            {'index': 6},
        )
        add_project(
            'job-template', 'tf分布式', 'tf相关的训练，模型校验，离线预测等功能', {'index': 7}
        )
        add_project(
            'job-template',
            'pytorch分布式',
            'pytorch相关的训练，模型校验，离线预测等功能',
            {'index': 8},
        )
        add_project(
            'job-template', 'xgb分布式', 'xgb相关的训练，模型校验，离线预测等功能', {'index': 9}
        )
        add_project('job-template', '模型服务化', '模型服务化部署相关的组件模板', {'index': 10})
        add_project('job-template', '推荐类模板', '推荐领域常用的任务模板', {'index': 11})
        add_project('job-template', '多媒体类模板', '音视频图片文本常用的任务模板', {'index': 12})
        add_project('job-template', '搜索类模板', '向量搜索常用的任务模板', {'index': 13})

    except Exception as e:
        # Catch any exceptions during project creation and print them.
        print(e)

    # Define a comprehensive helper function to create or update job templates.
    def create_template(
        repository_id,
        project_name,
        image_name,
        image_describe,
        job_template_name,
        type='common',
        job_template_old_names=None,
        job_template_describe='',
        job_template_command='',
        job_template_args=None,
        job_template_volume='',
        job_template_account='',
        job_template_expand=None,
        job_template_env='',
        gitpath='',
    ):
        # Initialize old names list if not provided.
        job_template_old_names = job_template_old_names or []
        # create_template
        # Return if the repository ID is not provided, as it's a required dependency.
        if not repository_id:
            return
        # Check if the Docker image already exists in the database.
        images = db.session.query(Images).filter_by(name=image_name).first()
        # Find the parent project for this job template.
        project = (
            db.session.query(Project)
            .filter_by(name=project_name)
            .filter_by(type='job-template')
            .first()
        )
        # If the image doesn't exist and the project does, create the image record.
        if images is None and project:
            try:
                images = Images()
                images.name = image_name
                images.describe = image_describe
                images.created_by_fk = 1
                images.changed_by_fk = 1
                images.project_id = project.id
                images.repository_id = repository_id
                images.gitpath = gitpath
                images.image_version = '1'
                db.session.add(images)
                db.session.commit()
                print('add images %s' % image_name)
            except Exception as e:
                print(e)
                db.session.rollback()

        # Check if the job template already exists by its new name.
        job_template = db.session.query(Job_Template).filter_by(name=job_template_name).first()
        # If not found, check if it exists under any of its old names (for backward compatibility/renaming).
        if not job_template:
            for old_name in job_template_old_names:
                job_template = db.session.query(Job_Template).filter_by(name=old_name).first()
                if job_template:
                    break

        
        # Get the parent project again to ensure it exists.
        project = (
            db.session.query(Project)
            .filter_by(name=project_name)
            .filter_by(type='job-template')
            .first()
        )
        # Proceed only if the project and the associated image exist.
        if project and images.id:
            # If the job template is new, create it.
            if job_template is None:
                try:
                    job_template = Job_Template()
                    job_template.name = job_template_name.replace('_', '-')
                    job_template.type = type
                    job_template.describe = job_template_describe
                    job_template.entrypoint = job_template_command
                    job_template.volume_mount = job_template_volume
                    job_template.accounts = job_template_account
                    job_template_expand['source'] = 'github'
                    job_template.expand = (
                        json.dumps(job_template_expand, indent=4, ensure_ascii=False)
                        if job_template_expand
                        else '{}'
                    )
                    job_template.created_by_fk = 1
                    job_template.changed_by_fk = 1
                    job_template.project_id = project.id
                    job_template.images_id = images.id
                    job_template.version = 'Release'
                    job_template.env = job_template_env
                    job_template.args = (
                        json.dumps(job_template_args, indent=4, ensure_ascii=False)
                        if job_template_args
                        else '{}'
                    )
                    db.session.add(job_template)
                    db.session.commit()
                    print('add job_template %s' % job_template_name.replace('_', '-'))
                except Exception as e:
                    print(e)
                    db.session.rollback()
            # If the job template already exists, update its properties.
            else:
                try:
                    job_template.name = job_template_name.replace('_', '-')
                    job_template.type = type
                    job_template.describe = job_template_describe
                    job_template.entrypoint = job_template_command
                    job_template.volume_mount = job_template_volume
                    job_template.accounts = job_template_account
                    job_template_expand['source'] = 'github'
                    job_template.expand = (
                        json.dumps(job_template_expand, indent=4, ensure_ascii=False)
                        if job_template_expand
                        else '{}'
                    )
                    job_template.created_by_fk = 1
                    job_template.changed_by_fk = 1
                    job_template.project_id = project.id
                    job_template.images_id = images.id
                    job_template.version = 'Release'
                    job_template.env = job_template_env
                    job_template.args = (
                        json.dumps(job_template_args, indent=4, ensure_ascii=False)
                        if job_template_args
                        else '{}'
                    )
                    db.session.commit()
                    print('update job_template %s' % job_template_name.replace('_', '-'))
                except Exception as e:
                    print(e)
                    db.session.rollback()

    # 初始化创建仓库镜像模板任务流
    try:
        print('begin init repository')
        # repository = db.session.query(Repository).filter_by(name='hubsecret').first()
        # Find the first available repository to associate with the templates.
        repository = db.session.query(Repository).first()
        # If no repository exists, create a default one.
        if repository is None:
            try:
                repository = Repository()
                repository.name = 'hubsecret'
                repository.server = 'registry.docker-cn.com'
                repository.user = 'yourname'
                repository.password = 'yourpassword'
                repository.hubsecret = 'hubsecret'
                repository.created_by_fk = 1
                repository.changed_by_fk = 1
                db.session.add(repository)
                db.session.commit()
                print('add repository hubsecret')
            except Exception as e:
                print(e)
                db.session.rollback()

        print('begin init job_templates')
        # Load the definitions for the built-in job templates from a JSON file.
        job_templates = json.load(open('myapp/init-job-template.json'))
        logging.info(
            f'[repository_id] name: {repository.name}, id: {repository.id}, '
            f'server: {repository.server}, user: {repository.user}'
        )
        # Iterate through the loaded definitions.
        for job_template_name in job_templates:
            job_template = job_templates[job_template_name]
            # Assign the ID of the default repository to each template.
            job_template['repository_id'] = repository.id
            # Call the helper function to create or update the template in the database.
            create_template(**job_template)

    except Exception as e:
        logging.error(e)

    # 添加 内置数据集
    # This block initializes default datasets for each configured region.
    with log_exception:
        # Get a list of all available regions.
        regions = get_regions_info()
        # Loop through each region.
        for r in regions:
            # Call the function to create datasets for the current region.
            create_dataset(r)

    # 创建 kafka topic
    # Ensure that the necessary Kafka topics for the application exist.
    with log_exception:
        create_topic_if_not_exist(TopicName)
    with log_exception:
        create_topic_if_not_exist(SlowTopicName)

    # 内置nacos数据
    # This block populates the Nacos configuration center with default settings.
    with log_exception:
        # Walk through the directory containing default Nacos config files.
        for root, _, files in os.walk('myapp/etc/nacos'):
            for file in files:
                # The subdirectory name is used as the Nacos 'group'.
                group = os.path.basename(root)
                # The filename is used as the Nacos 'data_id'.
                data_id = file
                # Check if the configuration already exists in Nacos to avoid overwriting.
                content = nacos_cli.get_config(data_id, group, fmt='txt')
                if content:
                    continue
                # If it doesn't exist, read the file and publish it to Nacos.
                with open(os.path.join(root, file)) as f:
                    content = f.read()
                    nacos_cli.set_config(data_id, group, content)
                    logging.info(f'init nacos config {data_id} {group} success')

    # 注册权限
    # This block dynamically registers permissions based on custom decorators in view classes.
    # It iterates through all views managed by Flask-AppBuilder.
    for view in appbuilder.baseviews:
        with log_exception:
            # Inspect all attributes of the view class.
            for func_name in dir(view):
                # Skip private/magic methods.
                if func_name.startswith('__'):
                    continue
                if not callable(getattr(view, func_name)):
                    continue

                func = getattr(view, func_name)
                # Check for the custom '_urls' attribute, which defines the endpoint path.
                if not hasattr(func, '_urls'):
                    continue

                # Check for the custom '_roles' attribute, which defines which roles need this permission.
                if not hasattr(func, '_roles'):
                    continue

                roles = func._roles
                if not roles:
                    continue
                if len(roles) == 0:
                    pass

                urls = func._urls
                if not urls:
                    continue
                if len(urls) == 0:
                    continue

                
                # Get the base route for the view (e.g., '/api/v1/jobs').
                route_base = view.route_base
                if route_base is None:
                    continue

                
                # Construct the full URL path for the permission.
                url_path = f'{route_base}{urls[0][0]}'

                # Determine the action (HTTP method). Default to '*' for all methods.
                act = '*'
                methods = urls[0][1]
                if len(methods) == 1:
                    act = methods[0]

                # For each role specified in the decorator, add the permission.
                for role in roles:
                    with log_exception:
                        add_permission_for_role(role, url_path, act)


# This function creates the initial set of datasets for a given region.
def create_dataset(reg):
    # Extract the unique key for the region.
    region_key = reg['key']
    # Check if internal datasets already exist for this region to prevent re-initialization.
    cnt = db.session.query(Dataset).filter_by(source = 'internal').filter_by(region = region_key).count()  # 空白数据集才初始化
    if cnt > 0:
        print(f'skip init dataset for region:{region_key}')
        return

    # Define the local path where the example data is stored.
    local_dir = os.path.join(SharedMntDir, 'train_data/llm/examples')
    try:
        print(f'init region minio:{region_key},train_data/llm/examples')
        # Upload the local directory to the object storage (e.g., Minio) for the specified region.
        StorageMgrFactory.region(region_key).upload_dir(
            local_dir,
            'train_data/llm/examples')
    except Exception as e:
        # Print an error if the upload fails.
        print(f'初始化{local_dir}失败')

    print(f'begin init dataset of region: {region_key}')
    # Load dataset metadata from a YAML configuration file.
    with open('myapp/init-dataset.yaml', encoding='utf-8') as f:
        data = yaml.safe_load(f)

    # Iterate through the dataset definitions from the YAML file.
    for item in data:
        with log_exception:
            # Check if a dataset with the same name already exists in the region.
            record = db.session.query(Dataset).filter_by(region = region_key).filter_by(name=item['name']).first()
            # If it doesn't exist, create it.
            if not record:
                record = Dataset()
                # Populate the Dataset model instance with data from the YAML item.
                for k, v in item.items():
                    setattr(record, k, v)

                
                # Set default ownership and region information.
                record.owner = 'admin'
                record.region = region_key
                record.created_by_fk = 1
                record.changed_by_fk = 1
                db.session.add(record)
                db.session.commit()
                print('add dataset %s' % item.get('name', ''))


            # 创建标注任务
            # After creating the dataset record, create a corresponding task in the external labeling tool.
            task_id = create_label_task(item['name'], 'admin')
            # Import the data from this dataset into the newly created labeling project.
            import_dataset_to_project(
                task_id, record.id, record.label_type, 'admin', source='', title=item['name']
            )
            # Store the ID of the labeling task in the dataset's 'expand' field for future reference.
            record.expand = json.dumps({'label_task_id': int(task_id)})
            db.session.commit()
