# --- START OF FILE app.py ---

# 避免多进程同时启动对系统cpu负载过高
# time.sleep(random.randint(1,10))
# Import standard libraries
import copy
import logging
# Import TimedRotatingFileHandler for log rotation based on time
from logging.handlers import TimedRotatingFileHandler
import os
import sys
import time
import traceback
import uuid

# Import Flask and related extensions
from flask import Flask, Response, abort, g, redirect, request
from flask_appbuilder import SQLA, AppBuilder
from flask_compress import Compress
from flask_migrate import Migrate
from flask_sqlalchemy import get_debug_queries
from flask_talisman import Talisman
from flask_wtf.csrf import CSRFProtect
# Import OpenTelemetry for distributed tracing
from opentelemetry import trace
# Import Prometheus client for metrics
import prometheus_client
# Import psutil to inspect system processes
import psutil
# Import Werkzeug exceptions and middleware
from werkzeug.exceptions import HTTPException
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.routing import BaseConverter
# Import wtforms_json for form data handling
import wtforms_json

# Import custom application modules
from myapp.const.error import CommonErrorCode
from myapp.const.response import BizError
from myapp.security import MyappSecurityManager
from myapp.third.AlertBot import funcs as alert_funcs
from myapp.third.auth.sdk import is_expired
from myapp.tp import Protector
from myapp.tracer import init_tracer
from myapp.utils.core import pessimistic_connection_handling,register_region_filter
from myapp.utils.ctx import get_request_id
from myapp.utils.env import is_private
from myapp.utils.exception import ignore_exception, log_exception
from myapp.utils.log import DBEventLogger, JsonFormatter
from myapp.utils.storage import check_user_storage
from myapp.utils.region import get_region_obj_by_key


# Initialize wtforms_json extension
wtforms_json.init()

# 在这个文件里面只创建app，不要做view层面的事情。

# Define the application directory
APP_DIR = os.path.dirname(__file__)

# Get the configuration module from an environment variable, with a default value
CONFIG_MODULE = os.environ.get('MYAPP_CONFIG', 'myapp.config')
# Determine if the application is running in production mode based on the STAGE environment variable
PRODUCTION_MODE = os.getenv('STAGE', 'prod') == 'prod'
# Print the current production mode status
print(f'production mode: {PRODUCTION_MODE}')

# Create the Flask application instance
app = Flask(__name__)  # ,static_folder='/mnt',static_url_path='/mnt'

# Define the host for the trace collector
trace_host = 'clickhouse-streaming-collector.tracing:4317'
# Initialize a flag to check if the app is running under Gunicorn
is_gunicorn = False
# Use a try-except block to safely check the parent process
with log_exception:
    # Get the parent process ID
    ppid = os.getppid()
    # Check if a parent process exists
    if ppid > 0:
        # Get the parent process object
        parent_process = psutil.Process(ppid)
        # Check if the parent process name is 'gunicorn'
        if parent_process.name().strip() == 'gunicorn':
            is_gunicorn = True

# 为gunicorn初始化 链路追踪模块
# Use a try-except block to safely initialize the tracer
with log_exception:
    # Initialize the OpenTelemetry tracer if running under Gunicorn
    if is_gunicorn:
        init_tracer(app, trace_host)


# 编写正则转化的类
# Define a custom URL converter for using regular expressions in routes
class RegexConverter(BaseConverter):
    def __init__(self, url_map, *items):
        # Call the parent constructor
        super().__init__(url_map)
        # Store the regular expression
        self.regex = items[0]


# 初始化转换器
# Register the custom regex converter with the application's URL map
app.url_map.converters['regx'] = RegexConverter

# Load the configuration from the specified module
app.config.from_object(CONFIG_MODULE)
# Create a shorthand for accessing the application configuration
conf = app.config

# Check if a data directory is specified in the configuration
if conf.get('DATA_DIR', ''):
    # Create the data directory if it does not exist
    if not os.path.exists(conf['DATA_DIR']):
        os.makedirs(conf['DATA_DIR'], exist_ok=True)

# Check if blueprints are defined in the configuration
if conf.get('BLUEPRINTS'):
    # Iterate through the blueprints and register them
    for bp in conf.get('BLUEPRINTS'):
        with log_exception:
            # Print a message indicating which blueprint is being registered
            print(f"Registering blueprint: '{bp.name}'")
            # Register the blueprint with the application
            app.register_blueprint(bp)

# If configured, set the logging level for Flask-AppBuilder to ERROR to reduce verbosity
if conf.get('SILENCE_FAB'):
    logging.getLogger('flask_appbuilder').setLevel(logging.ERROR)

# Configure the application logger level based on the debug mode
if app.debug:
    # Set logger to DEBUG level in debug mode
    app.logger.setLevel(logging.DEBUG)  # pylint: disable=no-member
else:
    # In production mode, add log handler to sys.stderr.
    # Add a stream handler to output logs to standard output in production
    app.logger.addHandler(logging.StreamHandler(stream=sys.stdout))  # pylint: disable=no-member
    # Set logger to INFO level in production
    app.logger.setLevel(logging.INFO)  # pylint: disable=no-member

# Initialize the SQLAlchemy database object
db = SQLA(app)

# Check if CSRF protection is enabled in the configuration
if conf.get('WTF_CSRF_ENABLED'):
    # Initialize CSRF protection for the application
    csrf = CSRFProtect(app)
    # Get the list of views to exempt from CSRF protection
    csrf_exempt_list = conf.get('WTF_CSRF_EXEMPT_LIST', [])
    # Iterate through the exempt list and apply the exemption
    for ex in csrf_exempt_list:
        csrf.exempt(ex)

# Apply pessimistic connection handling to the database engine to prevent connection errors
pessimistic_connection_handling(db.engine)

# Initialize Flask-Migrate for database schema migrations
migrate = Migrate(app, db, directory=APP_DIR + '/migrations')

# Logging configuration
# logging.basicConfig(format=app.config.get("LOG_FORMAT"))
# Set the root logger's level from the application configuration
logging.getLogger().setLevel(app.config.get('LOG_LEVEL') if app.config.get('LOG_LEVEL') else 1)

# Set basic logging configuration based on production mode
if PRODUCTION_MODE:
    logging.basicConfig(level=logging.INFO)
else:
    logging.basicConfig(level=logging.DEBUG)

# Get the root logger instance
logger = logging.getLogger()
# Create a handler to stream logs to standard output
logger_handler = logging.StreamHandler(stream=sys.stdout)
# Set a custom JSON formatter for the handler
logger_handler.setFormatter(JsonFormatter(datefmt='%Y-%m-%d %H:%M:%S'))
# Clear existing handlers from the root logger
logger.handlers = []
# Add the new configured handler to the root logger
logger.addHandler(logger_handler)

# 系统日志输出，myapp的输出。在gunicor是使用
# Check if time-based rotating log files are enabled
if conf.get('ENABLE_TIME_ROTATE'):
    # Set the logging level for the root logger
    logging.getLogger().setLevel(conf.get('TIME_ROTATE_LOG_LEVEL'))
    # Create a TimedRotatingFileHandler instance with settings from the config
    handler = TimedRotatingFileHandler(
        conf.get('FILENAME'),
        when=conf.get('ROLLOVER'),
        interval=conf.get('INTERVAL'),
        backupCount=conf.get('BACKUP_COUNT'),
    )
    # Add the rotating file handler to the root logger
    logging.getLogger().addHandler(handler)

# Check if Cross-Origin Resource Sharing (CORS) is enabled
if conf.get('ENABLE_CORS'):
    # Import the CORS extension
    from flask_cors import CORS

    # Initialize CORS with options from the configuration
    CORS(app, **conf.get('CORS_OPTIONS'))

# Check if a proxy fix is enabled (for apps behind a reverse proxy)
if conf.get('ENABLE_PROXY_FIX'):
    # Apply the ProxyFix middleware to correct headers like X-Forwarded-For
    app.wsgi_app = ProxyFix(app.wsgi_app)

# Check if chunked transfer encoding support is enabled
if conf.get('ENABLE_CHUNK_ENCODING'):

    # Define a middleware class to handle chunked encoding
    class ChunkedEncodingFix:
        def __init__(self, app):
            self.app = app

        def __call__(self, environ, start_response):
            # Setting wsgi.input_terminated tells werkzeug.wsgi to ignore
            # content-length and read the stream till the end.
            # Check if the transfer encoding is chunked
            if environ.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked':
                # Signal to Werkzeug to handle the input stream correctly
                environ['wsgi.input_terminated'] = True
            # Pass the request to the next application in the WSGI stack
            return self.app(environ, start_response)

    # Apply the chunked encoding fix middleware
    app.wsgi_app = ChunkedEncodingFix(app.wsgi_app)

# Check if additional custom middleware are defined in the configuration
if conf.get('ADDITIONAL_MIDDLEWARE'):
    # Iterate through the middleware and apply them to the WSGI app
    for middleware in conf.get('ADDITIONAL_MIDDLEWARE'):
        app.wsgi_app = middleware(app.wsgi_app)

# Determine the security manager class to use, defaulting to MyappSecurityManager
custom_sm = conf.get('CUSTOM_SECURITY_MANAGER') or MyappSecurityManager
# Ensure that the custom security manager is a subclass of MyappSecurityManager
if not issubclass(custom_sm, MyappSecurityManager):
    raise Exception(
        """Your CUSTOM_SECURITY_MANAGER must now extend MyappSecurityManager,
         not FAB's security manager.
         See [4565] in UPDATING.md"""
    )

# 创建appbuilder
# Use the application context to perform initializations
with app.app_context():
    # 创建所有表
    # db.create_all()
    # 创建fab
    # Initialize the Flask-AppBuilder instance
    appbuilder = AppBuilder(
        app,
        db.session,
        base_template='myapp/base.html',
        # indexview=MyIndexView,  # 首页
        # Use the specified custom security manager
        security_manager_class=custom_sm,  # 自定义认证方式
        # Run `myapp init` to update FAB's perms,设置为true就可以自动更新了，才能自动添加新建权限
        # Enable automatic permission updates for Flask-AppBuilder
        update_perms=True,
    )

# Create a shorthand for the security manager instance
security_manager = appbuilder.sm

# Get the results backend configuration (e.g., for Celery)
results_backend = conf.get('RESULTS_BACKEND')

# Event Logger
# Initialize the event logger, defaulting to DBEventLogger
event_logger = conf.get('EVENT_LOGGER', DBEventLogger)()

# Flask-Compress
# Enable response compression if configured
if conf.get('ENABLE_FLASK_COMPRESS'):
    Compress(app)

# Enable Talisman for HTTP security headers if configured
if conf.get('TALISMAN_ENABLED'):
    # Get the Talisman configuration
    talisman_config = conf.get('TALISMAN_CONFIG')
    # Initialize Talisman with the application and configuration
    Talisman(app, **talisman_config)

# Hook that provides administrators a handle on the Flask APP
# after initialization
# Get the Flask app mutator function from the configuration
flask_app_mutator = conf.get('FLASK_APP_MUTATOR')
# If a mutator function is defined, call it with the app object
if flask_app_mutator:
    flask_app_mutator(app)

# Get an OpenTelemetry tracer for the current module
tracer = trace.get_tracer(__name__)


# Define a function to run before each request
@app.before_request
# Start a new span for tracing this function
@tracer.start_as_current_span('add_request_id')
def add_request_id():
    # 记录请求开始时间
    # Record the start time of the request in the global 'g' object
    g.start = time.time()

    # Handle the '/ping' endpoint for health checks
    if request.path == '/ping':
        return Response('pong', status=200)

    # In non-production mode, set the root logger level to DEBUG
    if not PRODUCTION_MODE:
        logging.root.setLevel(logging.DEBUG)

    # Safely get the user ID and store it in the 'g' object
    with ignore_exception:
        g.user_id = g.user.id

    # Get the 'X-Forwarded-For' header to determine the real client IP
    forwarded_for = request.headers.get('X-Forwarded-For')
    # Store the remote address in the 'g' object
    g.remote_addr = request.remote_addr
    # If 'X-Forwarded-For' exists, use the first IP in the list
    if forwarded_for:
        g.remote_addr = forwarded_for.split(',')[0].strip()
    # upstream_service_name = request.headers.get('upstream-service-name')

    # logging.info(
    #     f'X-FORWARDED-FOR: {forwarded_for}, remote_addr: {g.remote_addr}, '
    #     f'upstream-service-name: {upstream_service_name}'
    # )

    # Safely get or generate a request ID
    with log_exception:
        # Use the 'x_request_id' header if present
        if 'x_request_id' in request.headers:
            request_id = request.headers.get('x_request_id')
        # Otherwise, generate a new UUID for the request ID
        else:
            request_id = str(uuid.uuid4())
        # Store the request ID in the 'g' object
        g.x_request_id = request_id

    # with log_exception:
    #     db.session.commit()


# Define a list of paths that do not require a login check
not_check_login_path = ['/static', '/logout', '/login', '/health', '/register', '/send_sms_code']

# Define a list of paths that do not require a license check
not_check_license_path = [
    '/static',
    '/logout',
    '/login',
    '/health',
    '/register',
    '/send_sms_code',
    '/license/register',
    '/license/info',
    '/common/envs',
    '/user/info/',
]
# If running under Gunicorn, initialize the license protector
if is_gunicorn:
    protector = Protector(kill_all_when_failed=False, host='license-server.infra')


# Define another function to run before each request for license checking
@app.before_request
# Start a new span for tracing this function
@tracer.start_as_current_span('check_license')
def check_license():
    # Skip license check for paths in the exemption list
    if any(path in request.path for path in not_check_license_path):
        return

    # Check if license checking is enabled via environment variable
    enable = os.environ.get('LICENSE_CHECK_ENABLE', 'true')
    if enable == 'false':
        return

    # If the license is not valid, abort the request with a 427 status code
    if not protector.accept():
        abort(
            Response(
                '{ "status": 427,"message":"license无效，请输入正确的license"}',
                status=427,
                headers={'Content-Type': 'application/json'},
            )
        )

# not_check_region_path = []
# Define a list of paths that require a region check
check_region_path =  ['/image/search']

# Define a function to run before each request to check the region
@app.before_request
# Start a new span for tracing this function
@tracer.start_as_current_span('check_region')
def check_region():
    # if any(path in request.path for path in not_check_region_path):
    #     return

    # if not any(path in request.path for path in check_region_path):
    #     return

    # Get the 'Region' header, defaulting to 'default'
    region_key = request.headers.get('Region', 'default')
    # If the region key is missing, abort the request
    if region_key is None:
        abort(
            Response(
                '{ "status": 427,"message":"Region不能为空"}',
                status=427,
                headers={'Content-Type': 'application/json'},
            )
        )

    # Get the region object based on the key
    region_info = get_region_obj_by_key(region_key)
    # If the region is not valid, abort the request
    if region_info is None:
        abort(
            Response(
                '{ "status": 427,"message":"非法的Region"}',
                status=427,
                headers={'Content-Type': 'application/json'},
            )
        )

    # Store the region information in the 'g' object
    g.region = region_info


# Define a function to run before each request to check user login status
@app.before_request
# Start a new span for tracing this function
@tracer.start_as_current_span('check_login')
def check_login():
    # Define a list of paths that are exempt from the login check
    not_check_login_path = [
        '/static',
        '/logout',
        '/login',
        '/health',
        '/register',
        '/send_sms_code',
        '/envs',
        '/auth/callback',
        '/auth/login',
        '/license/info',
    ]

    # Skip the check for exempted paths
    if any(path in request.path for path in not_check_login_path):
        return

    # 检查casdoor token是否过期
    # Get the Authorization header from the request
    authorization_value = request.headers.get('Authorization')
    # Check if the token is expired
    if is_expired(authorization_value):
        # Log the event and abort with a 401 Unauthorized status
        logging.info('expired Authorization token')
        abort(401)

    # 目前外网的流量都会经过dashboard-frontend，在其中设置t-source为external来识别外部流量
    # 本地开发中不鉴权
    # Get the 't-source' header to identify the traffic source
    t_source = request.headers.get('t-source')
    # Skip authentication for non-external traffic
    if t_source != 'external':
        return

    # Check if the user object exists in the 'g' object
    if not hasattr(g, 'user'):
        abort(401)

    # Check if the user is anonymous
    if g.user.is_anonymous:
        abort(401)

    # Check if the user has a valid ID
    if g.user.get_id and not g.user.get_id():
        abort(401)

    # try:
    #     db.session.commit()
    # except Exception as db_e:
    #     logging.error(f'{repr(db_e)}\n{traceback.format_exc()}')


# Define a function to run before each request to check user storage
@app.before_request
# Start a new span for tracing this function
@tracer.start_as_current_span('check_storage')
def check_storage():
    # This check is only performed in private deployments
    if is_private():
        # Define paths that require a storage check
        need_check_storage_path = [
            '/auto_learning/task/add',
            '/dataset_modelview/api/',
            '/notebook/add',
            '/pipeline_modelview_v2/api/',
            '/workflow_modelview/api/training',
            '/training_model_modelview/api/add',
            '/service_modelview/api/add',
        ]
        path = request.path
        # Perform the check for specific paths and POST method
        if path in need_check_storage_path and request.method == 'POST':
            # Check if the user has sufficient storage
            if not check_user_storage(g.user.id):
                # Log the error and raise a business error if storage is insufficient
                logging.error('用户存储不足')
                raise BizError(2000, '存储空间已满，请联系管理员')


# Define a function to run after each request
@app.after_request
# Start a new span for tracing this function
@tracer.start_as_current_span('myapp_after_request')
def myapp_after_request(resp):
    # 打印sql语句及其耗时
    # Iterate through the debug queries recorded by SQLAlchemy
    for query in get_debug_queries():
        # Log slow queries (duration >= 0.1s) or all queries in debug mode
        if query.duration >= 0.1 or app.debug:
            logging.warning(f'slow sql: {query.statement}, cost (ms): {query.duration * 1000}')

    # Return the response object
    return resp


# Get the service name from environment variable, format it for Prometheus
srv_name = os.getenv('SERVICE_NAME', 'unknown_service').replace('-', '_')
# Define a Prometheus counter for total requests
request_counter = prometheus_client.Counter(
    f'{srv_name}_request_counter', '请求总次数', ['endpoint', 'path']
)
# Define a Prometheus counter for failed requests
request_failed_counter = prometheus_client.Counter(
    f'{srv_name}_request_failed_counter', '请求失败次数', ['endpoint', 'path']
)
# Define a Prometheus histogram for request duration
request_duration_seconds = prometheus_client.Histogram(
    f'{srv_name}_request_duration_seconds', '请求耗时', ['endpoint', 'path']
)


# 配置影响后操作
# Define a function to run after each request
@app.after_request
# Start a new span for tracing this function
@tracer.start_as_current_span('apply_http_headers')
def apply_http_headers(response):
    """Applies the configuration's http headers to all responses"""
    # Add custom HTTP headers from the configuration to the response
    for k, v in conf.get('HTTP_HEADERS').items():
        response.headers[k] = v

    # response.headers.add("Access-Control-Allow-Origin", "*")
    # Safely execute logging and metrics recording
    with log_exception:
        # Calculate the total request processing time
        cost_time = round(time.time() - g.start, 3)
        # Get the request endpoint
        endpoint = request.endpoint
        # Create a dictionary with extra information for logging
        extra = {
            'id': str(uuid.uuid4()),
            'http_method': request.method,
            'endpoint': endpoint,
            'url_path': request.path,
            'url_query': request.query_string.decode('utf-8'),
            'host': request.host,
            'user_agent': '',
            'remote_addr': g.remote_addr if hasattr(g, 'remote_addr') else '',
            'content_type': request.content_type,
            'status_code': response.status_code,
            'response_code': '0',
            'response_msg': '',
            'cost_time': cost_time,
            'error_msg': '',
        }

        # Add error message to the log if it exists in 'g'
        if hasattr(g, 'error_msg'):
            extra['error_msg'] = g.error_msg
        # Add user agent string to the log if it exists
        if request.user_agent:
            extra['user_agent'] = request.user_agent.string
        # Get the HTTP status code from the response
        status_code = response.status_code
        # Get the JSON response body
        resp = response.json
        # Initialize a default response code
        response_code = 0
        # If the response is JSON, extract message and status
        if resp and 'message' in resp:
            extra['response_msg'] = resp.get('message', '')
            response_code = resp.get('status', -1)
        # Update status and response codes in the log extra
        extra['status_code'] = status_code
        extra['response_code'] = str(response_code)

        # Update Prometheus metrics for non-health-check endpoints
        if (
            request.path not in ['/ping', '/health', '/metrics', '/healthcheck']
            and endpoint is not None
        ):
            # Increment the total request counter
            request_counter.labels(endpoint=request.endpoint, path=request.path).inc()
            # Observe the request duration in the histogram
            request_duration_seconds.labels(endpoint=request.endpoint, path=request.path).observe(
                cost_time
            )
            # Increment the failed request counter if the request was not successful
            if status_code != 200 or (status_code == 200 and response_code != 0):
                request_failed_counter.labels(endpoint=request.endpoint, path=request.path).inc()

        # Log the request details
        logging.info('请求日志埋点', extra=extra)

        # For internal server errors, log the request body for debugging
        if status_code == 500 or response_code == CommonErrorCode.INTERNAL_ERROR.value:
            request_body = ''
            # Create a deep copy of the extra log data
            debug_extra = copy.deepcopy(extra)
            # Get the request body from JSON or form data
            if request.json:
                request_body = request.json
            elif request.form:
                request_body = request.form.to_dict(flat=False)
            # Add the request body to the debug log
            if request_body:
                debug_extra['request_body'] = request_body
            # Log the error with the request body
            logging.error('异常请求体埋点', extra=debug_extra)

    # Return the final response object
    return response


# Define a handler for 404 Not Found errors
@app.errorhandler(404)
def page_not_found(e):
    # Redirect the user to a custom 404 page
    return redirect('/taichu#/404')


# Define a global handler for all other exceptions
@app.errorhandler(Exception)
# Start a new span for tracing this function
@tracer.start_as_current_span('handle_exception')
def handle_exception(e):
    # Log the exception with its traceback
    logging.error(f'{repr(e)}\n{traceback.format_exc()}')
    # If the exception is a Werkzeug HTTPException, handle it specifically
    if isinstance(e, HTTPException):
        # For client errors (4xx), let the default Werkzeug handler take over
        if e.code < 500 or e.code >= 600:
            return e

    # Store the error message in the 'g' object for logging in after_request
    if g:
        g.error_msg = repr(e)

    # Handle custom business logic errors (BizError)
    if isinstance(e, BizError):
        # Safely roll back the current database session
        with ignore_exception:
            db.session.rollback()

        # For non-internal errors, return the specific error details
        if e.code != CommonErrorCode.INTERNAL_ERROR.value:
            return {'result': e.result, 'status': e.code, 'message': e.message}

    # Create a user-friendly error message, including the request ID
    err_msg = f'系统异常，请联系技术支持或提交反馈信息（{get_request_id()}）'
    # In debug mode, append the traceback to the error message
    if app.debug:
        err_msg = f'{err_msg}\n{traceback.format_exc()}'

    # Send an alert notification about the error
    alert_funcs.send_error_msg(request, e)
    # Return a standardized JSON error response
    return {
        'result': None,
        'status': CommonErrorCode.INTERNAL_ERROR.value,
        'message': err_msg,
    }

# 配置werkzeug的日志级别为error，这样就不会频繁的打印访问路径了。
# log = logging.getLogger('werkzeug')
# log.setLevel(logging.ERROR)

# If running under a WSGI server like Gunicorn
if __name__ != '__main__':
    # 如果不是直接运行，则将日志输出到 gunicorn 中
    # Get the Gunicorn error logger
    gunicorn_logger = logging.getLogger('gunicorn.error')
    # Use Gunicorn's handlers for the Flask app logger
    app.logger.handlers = gunicorn_logger.handlers
    # Set the Flask app logger level to match Gunicorn's
    app.logger.setLevel(gunicorn_logger.level)

# 引入视图
# 引入api
# Import the application's views and APIs to register their routes
from myapp import apis, views  # noqa: F401, E402
