import paramiko
import socket
from pathlib import Path
import shutil
from main import Api


class Localhost:
    def __init__(self):
        ...

    def exec_command(self, cmd: str):
        """
           在本地执行命令并返回结果
           :param cmd: 要执行的命令字符串
           :return: 返回包含执行结果的字典 {'suc': bool, 'output': str, 'error': str}
           """
        import subprocess
        try:
            # 使用subprocess运行命令，捕获输出和错误
            process = subprocess.Popen(
                cmd,
                shell=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                universal_newlines=True
            )
            stdout, stderr = process.communicate()

            return None, stdout, stderr
        except Exception as e:
            return {
                'suc': False,
                'output': '',
                'error': str(e),
                'returncode': -1
            }

    def close(self):
        return None


def ssh_connect(settings) -> paramiko.SSHClient | Localhost:
    if settings and 'serverConfig' in settings:
        serverConfig = settings['serverConfig']
    else:
        raise Exception('无法获取服务器信息，请检查服务器配置！')

    if serverConfig['ip'] == 'localhost' or serverConfig['ip'] == '127.0.0.1':
        return Localhost()

    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    try:
        if serverConfig['authType'] == 'password':
            ssh.connect(
                hostname=serverConfig['ip'],
                port=int(serverConfig['port']),
                username=serverConfig['username'],
                password=serverConfig['password'],
                timeout=10
            )
        else:  # key认证
            private_key = paramiko.RSAKey.from_private_key_file(
                serverConfig['privateKey']
            )
            ssh.connect(
                hostname=serverConfig['ip'],
                port=int(serverConfig['port']),
                username=serverConfig['username'],
                pkey=private_key,
                timeout=10
            )
        return ssh
    except paramiko.AuthenticationException:
        raise Exception('认证失败，请检查用户名和密码/密钥')
    except paramiko.SSHException as e:
        raise Exception(f'SSH连接错误: {str(e)}')
    except socket.timeout:
        raise Exception(f'连接超时，请检查网络或服务器是否可达')
    except Exception as e:
        raise Exception(f'未知错误: {str(e)}')


def createUserFolder(settings):
    """
    在服务器的/home目录下创建用户目录（即以用户名命名的目录），如果用户目录存在则不做任何操作，如果创建失败，提示失败原因
    :return: 返回操作结果字典 {'suc': bool, 'msg': str}
    """
    ssh = None
    try:
        ssh = ssh_connect(settings)

        serverConfig = settings['serverConfig']
        # 检查目录是否已存在，这里执行的命令很高级，涉及到很多知识点
        # [ 是test命令的别名，但是使用和test稍微有所不同
        # [命令没有输出，只设置返回码，因此在linux系统里直接执行该命令，是看不到输出结果的
        # && 操作符会检查前一个命令的返回码（退出状态），只有当前一个命令的返回码为0（成功）时，才会执行后面的命令
        # 整个 && 表达式的返回码是 最后一个被执行命令 的返回码
        # || b操作符会检查前面命令的返回码，失败则执行后面的命令
        stdin, stdout, stderr = ssh.exec_command(
            f'[ -d \"/home/{serverConfig["username"]}\" ] && echo \"exists\" || echo \"not exists\"')
        dir_status = stdout.read().decode().strip()

        if dir_status == "exists":
            return {'suc': True, 'msg': f'用户目录/home/{serverConfig["username"]}已存在'}

        # 创建目录
        stdin, stdout, stderr = ssh.exec_command(
            f'sudo mkdir -p /home/{serverConfig["username"]} && sudo chown {serverConfig["username"]}:{serverConfig["username"]} /home/{serverConfig["username"]}')

        if stdout.channel.recv_exit_status() == 0:
            return {'suc': True, 'msg': f'成功创建用户目录/home/{serverConfig["username"]}'}
        else:
            error_msg = stderr.read().decode().strip()
            return {'suc': False, 'msg': f'创建用户目录失败: {error_msg}'}
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息{e}"}
    finally:
        if ssh:
            ssh.close()


def listNetStat(settings):
    """
    列出服务器上所有的网络连接状态
    :return: 返回操作结果字典 {'suc': bool, 'msg': str}
    """
    ssh = None
    try:
        ssh = ssh_connect(settings)
        stdin, stdout, stderr = ssh.exec_command(
            'netstat -tulnp 2>/dev/null || ss -tulnp 2>/dev/null')

        if stdout.channel.recv_exit_status() != 0:
            return {'suc': False, 'msg': stderr.read().decode().strip()}

        output = stdout.read().decode().strip()
        lines = output.split('\n')[2:]  # Skip header lines

        result = []
        for line in lines:
            if not line.strip():
                continue

            parts = line.split()
            protocol = parts[0]
            local_address = parts[3]
            foreign_address = parts[4] if len(parts) > 4 else ''
            state = parts[5] if len(parts) > 5 else ''
            pid_program = ' '.join(parts[6:]) if len(parts) > 6 else ''

            result.append({
                'protocol': protocol,
                'local_address': local_address,
                'foreign_address': foreign_address,
                'state': state,
                'pid_program': pid_program
            })

        return {'suc': True, 'data': result}
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh:
            ssh.close()


def getRunningContainers(settings, ssh=None) -> dict:
    """
    获取服务器上当前运行的Docker容器列表
    :return: 返回包含容器列表的字典 {'suc': bool, 'containers': list, 'msg': str}
    """
    need_close = False
    try:
        if ssh is None:
            ssh = ssh_connect(settings)
            need_close = True
        # 获取运行中的容器列表，使用格式化输出
        stdin, stdout, stderr = ssh.exec_command(
            'docker ps -a --format "{{.ID}}|{{.Names}}|{{.Image}}|{{.Status}}|{{.Ports}}"')
        containers = []
        for line in stdout.read().decode().splitlines():
            if line:
                container_id, name, image, status, ports = line.split('|')
                containers.append({
                    'id': container_id,
                    'name': name,
                    'image': image,
                    'status': status,
                    'ports': ports
                })

        return {
            'suc': True,
            'containers': containers,
            'msg': '获取容器列表成功'
        }

    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh and need_close:
            ssh.close()


def stopContainer(settings, containerId: str) -> dict:
    """
    停止并移除指定的Docker容器
    :param settings:
    :param containerId: 要停止的容器ID
    :return: 返回包含操作结果的字典 {'suc': bool, 'msg': str}
    """
    ssh = None
    try:
        ssh = ssh_connect(settings)

        # 1. 先停止容器
        stdin, stdout, stderr = ssh.exec_command(
            f'docker stop {containerId}')
        exit_status = stdout.channel.recv_exit_status()

        if exit_status != 0:
            error_msg = stderr.read().decode()
            return {"suc": False, "msg": f"停止容器失败: {error_msg}"}

        # 2. 移除已停止的容器
        stdin, stdout, stderr = ssh.exec_command(
            f'docker rm {containerId}')
        exit_status = stdout.channel.recv_exit_status()

        if exit_status == 0:
            return {"suc": True, "msg": f"容器 {containerId} 已成功停止并移除"}
        else:
            error_msg = stderr.read().decode()
            return {"suc": False, "msg": f"移除容器失败: {error_msg}"}
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh:
            ssh.close()


def logContainer(api: Api, containerId: str):
    """
    获取指定容器的日志
    :param api: Api对象用于日志输出
    :param containerId: 容器ID或名称
    :return: 返回操作结果字典 {'suc': bool, 'logs': str, 'msg': str}
    """
    ssh = None
    try:
        ssh = api.ssh_connect()

        # 执行docker logs命令
        api.printToTerm(f"正在获取容器 {containerId} 的日志...", 'info')
        stdin, stdout, stderr = ssh.exec_command(f'docker logs {containerId} 2>&1')

        # 读取日志输出
        logs = stdout.read().decode().strip()
        error = stderr.read().decode().strip()

        if error:
            api.printToTerm(f"获取日志时发生错误: {error}", 'error')
            return {'suc': False, 'msg': error, 'logs': ''}

        api.openTerminal()
        api.printToTerm(f"成功获取容器 {containerId} 的日志如下：", 'success')
        api.printToTerm(logs, 'debug')
        return {
            'suc': True,
            'logs': logs,
            'msg': f'成功获取容器 {containerId} 的日志'
        }

    except Exception as e:
        api.printToTerm(f"获取容器日志异常: {str(e)}", 'error')
        return {
            'suc': False,
            'msg': f'获取容器日志异常: {str(e)}',
            'logs': ''
        }
    finally:
        if ssh:
            ssh.close()


def restartContainer(api: Api, containerId: str):
    ...


def uploadFile(settings: dict, localPath: Path | str, remotePath: Path | str, ssh=None) -> dict:
    """
    通过SSH上传文件到远程服务器
    :param settings: 服务器配置字典
    :param localPath: 本地文件路径
    :param remotePath: 远程文件路径
    :param ssh:
    :return: 返回包含上传状态的字典 {'suc': bool, 'msg': str}
    """

    sftp = None
    need_close = False
    try:
        if ssh is None:
            need_close = True
            ssh = ssh_connect(settings)

        sftp = ssh.open_sftp()
        # 创建远程目录路径（如果不存在）e
        create_remote_folder(remotePath.parent, None, ssh)

        sftp.put(str(localPath), str(remotePath).replace('\\', '/'))  # 默认覆盖同名文件
        return {"suc": True, "msg": "文件上传成功"}

    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if sftp:
            sftp.close()
        if need_close and ssh:
            ssh.close()


def exist_remote_file(sftp, remote_file_path):
    try:
        remote_file_path = str(remote_file_path).replace('\\', '/')
        # 检查文件是否已存在
        sftp.stat(remote_file_path)
        return True
    except IOError:
        # 文件不存在，无需处理
        return False


def exist_remote_folder(sftp, remote_folder_path: str | Path):
    try:
        remote_folder_path = str(remote_folder_path).replace('\\', '/')
        # 检查文件是否已存在
        sftp.stat(remote_folder_path)
        return True
    except IOError:
        # 文件不存在，无需处理
        return False


def remove_remote_folder(remote_folder_path: Path | str, ssh, sftp=None):
    need_close = False
    if sftp is None:
        sftp = ssh.open_sftp()
        need_close = True
    remote_folder_path = str(remote_folder_path).replace('\\', '/')
    if exist_remote_folder(sftp, remote_folder_path):
        # 删除目录下的所有文件
        stdin, stdout, stderr = ssh.exec_command(f'rm -rf {str(remote_folder_path)}')
        exit_status = stdout.channel.recv_exit_status()  # 等待命令执行完成
        if need_close and sftp:
            sftp.close()
        if exit_status != 0:
            error = stderr.read().decode()
            raise Exception(f"删除远程目录失败: {error}")
    return True


def remove_remote_file(sftp, remote_file_path: Path | str):
    file = str(remote_file_path).replace('\\', '/')
    if exist_remote_file(sftp, file):
        sftp.remove(file)
    return True


def create_remote_folder(remote_path: str | Path, settings=None, ssh=None) -> dict:
    """
    在远程服务器上递归创建指定目录的所有层级
    :param settings: 服务器配置
    :param remote_path: 要创建的远程目录路径
    :param ssh: 可选的已有SSH连接
    :return: 返回操作结果字典 {
        'suc': bool,
        'msg': str
    }
    """
    need_close = False
    try:
        if ssh is None:
            ssh = ssh_connect(settings)
            need_close = True

        # 确保路径为Linux格式并去除末尾的/
        remote_path = str(remote_path).replace('\\', '/').rstrip('/')

        # 使用mkdir -p命令递归创建目录
        stdin, stdout, stderr = ssh.exec_command(f'mkdir -p "{remote_path}"')  # 目录存在不会报错
        exit_status = stdout.channel.recv_exit_status()

        if exit_status == 0:
            return {
                'suc': True,
                'msg': f'成功创建目录 {remote_path}'
            }
        else:
            error_msg = stderr.read().decode().strip()
            return {
                'suc': False,
                'msg': f'创建目录失败: {error_msg}'
            }

    except Exception as e:
        return {
            'suc': False,
            'msg': f'创建目录异常: {str(e)}'
        }
    finally:
        if ssh and need_close:
            ssh.close()


def uploadFolder(settings, local_folder: Path, remote_folder: Path, ssh=None):
    """
    将本地local_folder目录下的所有文件上传到云服务器的remote_folder目录中
    :param settings: 服务器配置字典
    :param local_folder: 本地文件夹路径
    :param remote_folder: 远程文件夹路径
    :param ssh: 可以传入已有的ssh连接，则不会再次建立连接，以节约资源
    :return: 返回包含上传状态的字典 {'suc': bool, 'msg': str}
    """
    sftp = None
    need_close = False  # 如果外部传入了ssh，则不要关闭ssh，如果是内部建立的ssh连接，则退出时关闭连接
    try:
        if ssh is None:
            need_close = True
            ssh = ssh_connect(settings)
        sftp = ssh.open_sftp()
        # 远程目录存在，则删除以确保是更新而不是跳过
        remove_remote_folder(remote_folder, ssh)

        # 目录不存在，递归创建
        # 修改为纯字符串操作，避免使用Path对象，因为执行的环境是windows，目标服务器是linux时会导致错误
        remote_path = str(remote_folder).replace('\\', '/')
        current_dir = ''
        for dir_part in remote_path.split('/'):
            if not dir_part:  # 跳过空的部分
                continue
            current_dir = f"{current_dir}/{dir_part}" if current_dir else f"/{dir_part}"
            try:
                sftp.stat(current_dir)
            except IOError:
                sftp.mkdir(current_dir)

        # 压缩本地文件夹内容
        shutil.make_archive('res', 'zip', local_folder)  # 默认覆盖同名文件

        # 上传压缩包到远程服务器
        uploadFile(settings, Path('res.zip'), remote_folder / 'res.zip', ssh=ssh)

        # 检查服务器上是否有unzip命令
        stdin, stdout, stderr = ssh.exec_command('command -v unzip >/dev/null 2>&1 || echo "missing"')
        if stdout.read().decode().strip() == "missing":
            # 根据系统类型选择合适的安装命令
            stdin, stdout, stderr = ssh.exec_command('''
                if [ -x "$(command -v apt)" ]; then
                    sudo apt update && sudo apt install -y unzip
                elif [ -x "$(command -v yum)" ]; then
                    sudo yum install -y unzip
                elif [ -x "$(command -v apk)" ]; then
                    sudo apk add unzip
                else
                    echo "无法安装 unzip，请手动处理"
                    exit 1
                fi
            ''')
            # 等待安装命令执行完成
            exit_status = stdout.channel.recv_exit_status()
            if exit_status != 0:
                error = stderr.read().decode().strip()
                raise Exception(f"安装unzip失败: {error}")

        # 解压缩上传的压缩包
        unzip_command = f'unzip -o "{remote_path}/res.zip" -d "{remote_path}"'
        stdin, stdout, stderr = ssh.exec_command(unzip_command)
        # 等待命令执行完成并获取退出状态码
        exit_status = stdout.channel.recv_exit_status()
        # 读取输出和错误信息
        output = stdout.read().decode().strip()
        error = stderr.read().decode().strip()

        if exit_status == 0:
            print("✅ 解压成功")
            if output:
                print("输出信息：\n", output)
        else:
            print("❌ 解压失败")
            if error:
                print("错误信息：\n", error)
            # 抛出异常以便上层捕获
            raise Exception(f"解压失败，错误代码: {exit_status}, 错误信息: {error}")

        return {'suc': True, 'msg': '文件夹上传成功'}

    except Exception as e:
        return {'suc': False, 'msg': f'文件夹上传失败: {str(e)}'}

    finally:
        if sftp:
            sftp.close()
        if ssh and need_close:
            ssh.close()


def deployMySQL(api: Api, containerConfig: dict, ssh=None):
    need_close = False
    try:
        if ssh is None:
            ssh = api.ssh_connect()
            need_close = True
        mysql_port = containerConfig.get('mysql_port', '3306')
        mysql_root_password = containerConfig.get('mysql_root_password', 'YangKe.08')
        user_name = api.getLinuxUserName()
        api.installDockerImage('mysql:9', ssh)
        # 构建docker-compose.yml文件内容
        docker_compose_content = f"""
services:
  mysql:
    image: mysql:9
    container_name: mysql_container
    restart: unless-stopped
    volumes:
      - "/home/{user_name}/mysql/db_data:/var/lib/mysql"
      - "/home/{user_name}/mysql/run_mysqld:/var/run/mysqld"
      - "/home/{user_name}/mysql/my.cnf:/etc/mysql/conf.d/my.cnf"
    ports:
      - "{mysql_port}:3306"
    environment:
      TZ: Asia/Shanghai
      MYSQL_ROOT_PASSWORD: {mysql_root_password}
      MYSQL_DATABASE: test
      MYSQL_USER: YangKe
      MYSQL_PASSWORD: YangKe.08
      MYSQL_ALLOW_EMPTY_PASSWORD: no
        """

        remove_remote_folder(f"/home/{user_name}/mysql", ssh)

        api.printToTerm(f"创建mysql配置文件")  # 会自动创建父文件夹
        api.uploadFile('./docker-mysql.cnf', f'/home/{user_name}/mysql/my.cnf')

        # 执行命令
        api.printToTerm(f"正在创建docker-compose-mysql.yml文件...", 'info')
        # 把docker_compose_content写入文件
        api.write_remote_file(f'/home/{user_name}/docker-compose-mysql.yml', docker_compose_content, ssh)
        # 执行docker-compose up命令
        api.printToTerm(f"正在启动mysql容器...", 'info')
        stdin, stdout, stderr = ssh.exec_command(
            f'docker compose -f /home/{user_name}/docker-compose-mysql.yml up -d', get_pty=True)
        # 读取输出
        print_debug_info(api, stdout, stderr)

        # 重启Caddy以加载最新的Caddyfile

        api.printToTerm(f"mysql容器启动成功", 'info')
        api.setTaskBar("容器 mysql 部署成功")
        return {"suc": True, "msg": f"容器 mysql 部署成功"}
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh and need_close:
            ssh.close()


def deployCaddy(api: Api, containerConfig: dict, ssh=None) -> dict:
    """
    部署Nextcloud容器
    :param api:
    :param containerConfig: 容器配置字典，包含image, name, env, ports等参数
    :param ssh:
    :return: 返回包含部署结果的字典 {'suc': bool,'msg': str}
    """
    need_close = True
    try:
        if ssh is None:
            ssh = api.ssh_connect()  # 该方法中必须调用api.printToTerm()显示日志信息，只传入settings则功能不够，必须传入api
            need_close = True
        user_name = api.getLinuxUserName()

        site_folder = containerConfig.get('siteFolder')
        server_domain = containerConfig.get('server_domain', '')
        ecs_user_folder = Path(f'/home/{user_name}')
        if site_folder is None or site_folder == '':
            api.printToTerm('siteFolder参数不能为空，请选择网站文件夹！')
            return {'suc': False, 'msg': 'siteFolder参数不能为空'}
        elif not isinstance(site_folder, str):
            api.printToTerm('siteFolder类型错误')
            return {'suc': False, 'msg': 'siteFolder类型错误'}

        ecs_site_folder = ecs_user_folder / 'site'
        ecs_caddyfile_folder = ecs_user_folder / 'caddy_file'
        create_remote_folder(ecs_caddyfile_folder, ssh=ssh)
        api.printToTerm(f'正在将网站文件上传到云服务器上的{ecs_site_folder}文件夹中...')
        # 将本地site_folder里的所有文件（包括子文件夹里的文件）上传到云服务器上的ecs_site_folder文件夹中
        api.uploadFolder(site_folder, ecs_site_folder, ssh=ssh)

        api.printToTerm(f'正在将docker-compose.yml上传到云服务器上的{ecs_user_folder}文件夹中...')
        api.installDockerImage('caddy:latest', ssh)
        # 构建docker-compose.yml文件内容
        docker_compose_content = f"""
services:
  caddy:
    image: caddy:latest
    container_name: caddy_container
    restart: unless-stopped
    volumes:
      - "{str(ecs_caddyfile_folder).replace('\\', '/')}:/etc/caddy"  # Caddyfile所在的目录
      - "/home/{user_name}/site:/srv"  # 网站文件所在的目录
      - "/home/{user_name}/caddy_data:/data"  # https证书所在的目录
    environment:
      TZ: Asia/Shanghai
    network_mode: host
        """
        # 把docker_compose_content写入文件
        api.write_remote_file(f'/home/{user_name}/docker-compose-caddy.yml', docker_compose_content, ssh)

        api.printToTerm(f'正在将Caddyfile上传到云服务器上的{ecs_caddyfile_folder}文件夹中...')
        caddy_file_content = f"""
{{
	email 540673597@qq.com
	debug
}}

{server_domain} {{
	root * /srv
	file_server
}}

{server_domain}:20003 {{
	root * /srv
	file_server
}}
"""
        # 把Caddyfile写入文件
        api.write_remote_file(f'{ecs_caddyfile_folder / "Caddyfile"}',
                              caddy_file_content, ssh=ssh)

        api.printToTerm(f'正在启动Caddy容器...')
        # 执行docker-compose up命令
        stdin, stdout, stderr = ssh.exec_command(
            f'docker compose -f /home/{user_name}/docker-compose-caddy.yml up -d')
        # 读取输出
        exit_status = stdout.channel.recv_exit_status()
        error = stderr.read().decode()

        if exit_status == 0:
            api.printToTerm("✅ 容器服务已启动（可能复用了已有容器）", 'debug')
        elif "conflict" in error or "already exists" in error:
            api.printToTerm("⚠️ 检测到容器已存在，正在强制重建...", 'warn')
            # 先停止并删除容器
            ssh.exec_command(f'docker compose -f /home/{user_name}/docker-compose-caddy.yml down')
            # 再启动
            ssh.exec_command(f'docker compose -f /home/{user_name}/docker-compose-caddy.yml up -d --force-recreate')
        else:
            api.printToTerm(f"❌ 启动失败: {error}", 'error')

        api.setTaskBar('Caddy已完成部署并启动成功')
        return {"suc": True, "msg": f"容器 caddy 部署成功"}
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh and need_close:
            ssh.close()


def findUsedPort(settings, ssh=None):
    """
    查找服务器上已使用的端口
    :param settings: 服务器配置
    :param ssh: 可选，已有的SSH连接
    :return: 返回包含已使用端口列表的字典 {'suc': bool, 'used_ports': list, 'msg': str}
    """
    need_close = False
    try:
        if ssh is None:
            ssh = ssh_connect(settings)
            need_close = True

        # 使用netstat或ss命令获取所有监听中的端口
        stdin, stdout, stderr = ssh.exec_command(
            'netstat -tuln 2>/dev/null || ss -tuln 2>/dev/null')

        if stdout.channel.recv_exit_status() != 0:
            return {'suc': False, 'msg': stderr.read().decode().strip()}

        output = stdout.read().decode().strip()
        lines = output.split('\n')[2:]  # 跳过标题行

        used_ports = set()
        for line in lines:
            if not line.strip():
                continue
            parts = line.split()
            # 提取端口号 (格式可能是 localhost:8080 或 *:8080)
            address = parts[3] if 'netstat' in output else parts[4]
            port = address.split(':')[-1]
            if port.isdigit():
                used_ports.add(int(port))

        return {
            'suc': True,
            'used_ports': sorted(list(used_ports)),
            'msg': '获取已使用端口成功'
        }

    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh and need_close:
            ssh.close()


def findAvailablePort(settings, preferred_port=None, ssh=None):
    """
    查找服务器上可用的端口
    :param settings: 服务器配置
    :param preferred_port: 首选端口号，如果可用则直接返回
    :param ssh: 可选，已有的SSH连接
    :return: 返回包含可用端口号的字典 {'suc': bool, 'port': int, 'msg': str}
    """
    try:
        # 获取已使用端口
        result = findUsedPort(settings, ssh)
        if not result['suc']:
            return result

        used_ports = result['used_ports']
        if preferred_port is None:
            preferred_port = 10000

        # 检查首选端口是否可用
        if preferred_port not in used_ports:
            return {
                'suc': True,
                'port': preferred_port,
                'msg': f'首选端口 {preferred_port} 可用'
            }

        if preferred_port > 30000:
            step = -1
            end = max(0, preferred_port - 10000)
        else:
            step = 1
            end = min(preferred_port + 10000, 65535)

        for port in range(preferred_port, end, step):
            if port not in used_ports:
                return {
                    'suc': True,
                    'port': port,
                    'msg': f'找到可用端口 {port}'
                }

        return {
            'suc': False,
            'msg': '未找到可用端口',
            'port': 0
        }

    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}


def existContainer(api, container_name, ssh=None):
    need_close = False
    if ssh is None:
        ssh = api.ssh_connect()
        need_close = True
    try:
        containers = api.getRunningContainers(ssh, info_list=False, name_list=True, id_list=False)
        if container_name in containers:
            return True
        else:
            return False
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh and need_close:
            ssh.close()


def deployNextCloud(api: Api, containerConfig: dict, ssh=None):
    need_close = False
    try:
        if ssh is None:
            ssh = api.ssh_connect()
            need_close = True
        nextcloud_port = containerConfig.get('nextcloud_port', '20000')  # 用于nextcloud提供http和https访问链接
        nextcloud_version = containerConfig.get('nextcloud_version', '31')
        use_caddy = containerConfig.get('nextcloud_use_caddy', False)
        server_domain = containerConfig.get('server_domain', '')
        root_password = containerConfig.get('mysql_root_password')
        user_name = api.getLinuxUserName()
        port_in_docker = nextcloud_port
        remove_remote_folder(f"/home/{user_name}/nextcloud", ssh)
        create_remote_folder(f"/home/{user_name}/nextcloud", None, ssh)
        bridge_ip = getDockerBridgeIp(api) or '127.17.0.1'
        # ----------------------------- 检查是否运行了caddy容器 -------------------------------
        if use_caddy:
            if not existContainer(api, 'caddy_container', ssh):
                api.setTaskBar('请先启动 Caddy 服务')
            port_in_docker = api.findAvailablePort(10234, ssh).get(
                'port')  # docker-compose.yml中使用随机端口，nextcloud_port留给caddy代理
            caddy_file_folder = 'caddy_file'
            # ------------------------ 修改 Caddyfile -----------------------------
            # 1. 生成新的 Caddyfile配置，# 将用户设置的端口代理到容器的端口
            caddy_file_content = f"""
{server_domain}:{nextcloud_port} {{ 
    redir /.well-known/carddav /remote.php/dav/ 301
    redir /.well-known/caldav /remote.php/dav/ 301
    reverse_proxy localhost:{port_in_docker} {{ 
        header_up X-Forwarded-Proto https
    }}
}}
"""
            # 2. 将新的 Caddyfile 内容写入服务器上的 /home/用户名/caddy_file/Caddyfile 文件，该文件会被映射到容器中的/etc/caddy/CaddyfileNextCloud
            api.write_remote_file(f'/home/{user_name}/{caddy_file_folder}/CaddyfileNextCloud', caddy_file_content, ssh,
                                  False)

            # 3. 在服务器上的 Caddyfile 后增加一行：import /home/用户名/nextcloud/Caddyfile
            caddy_file_path = f'/home/{user_name}/{caddy_file_folder}/Caddyfile'
            # 修改原调用处
            result = write_remote_file_with_check(
                api,
                caddy_file_path,
                'import "CaddyfileNextCloud"\n',  # 注意去掉了前面的换行符
                ssh,
                True
            )
            # ------- 至此，Caddyfile 配置完成，caddy完成对nextcloud的代理 ------------
            # ------------------------ 修改 Caddyfile -----------------------------
        # ----------------------------- 检查是否运行了caddy容器 -------------------------------
        # 构建docker-compose.yml文件内容
        api.installDockerImage(f'nextcloud:{nextcloud_version}', ssh)
        docker_compose_content = f"""
services:
  nextcloud:
    image: nextcloud:{nextcloud_version}
    container_name: nextcloud
    restart: unless-stopped
    volumes:
      - "/home/{user_name}/nextcloud/data:/var/www/html/data"
      - "/home/{user_name}/nextcloud/apps:/var/www/html/custom_apps"
      - "/home/{user_name}/nextcloud/config:/var/www/html/config"
    ports:
      - "{port_in_docker}:80"
    environment:
      TZ: Asia/Shanghai
      NEXTCLOUD_TRUSTED_DOMAINS: {server_domain}
      NEXTCLOUD_DATA_DIR: /var/www/html/data
      OVERWRITEPROTOCOL: https
      TRUSTED_PROXIES: {bridge_ip}
      MYSQL_HOST: {server_domain}
      MYSQL_USER: "root"
      MYSQL_PASSWORD: "{root_password}"
      MYSQL_DATABASE: nextcloud
"""

        # 直接在远程服务器上创建并写入docker-compose文件
        api.printToTerm(f"正在创建docker-compose-nextcloud.yml文件...", 'info')
        api.write_remote_file(f'/home/{user_name}/docker-compose-nextcloud.yml', docker_compose_content, ssh, False)

        # 执行docker-compose up命令
        api.printToTerm(f"正在启动nextcloud容器...", 'info')
        stdin, stdout, stderr = ssh.exec_command(
            f'docker compose -f /home/{user_name}/docker-compose-nextcloud.yml up -d', get_pty=True)
        # 读取输出
        print_debug_info(api, stdout, stderr)

        reloadCaddyfile(api, ssh)
        api.setTaskBar("容器 nextcloud 部署成功")
        return {"suc": True, "msg": f"容器 nextcloud 部署成功"}
    except Exception as e:
        api.printToTerm(f"错误信息: {str(e)}", 'error')
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh and need_close:
            ssh.close()


def forceRemoveContainer(settings, containerId: str) -> dict:
    """
    强制移除指定的Docker容器（无论是否在运行）
    :param settings: 服务器配置
    :param containerId: 要移除的容器ID
    :return: 返回包含操作结果的字典 {'suc': bool, 'msg': str}
    """
    ssh = None
    try:
        ssh = ssh_connect(settings)

        # 先检查容器是否存在
        stdin, stdout, stderr = ssh.exec_command(
            f'docker ps -a --filter "id={containerId}" --format "{{.ID}}"')
        existing_container = stdout.read().decode().strip()

        if not existing_container:
            return {"suc": True, "msg": f"容器 {containerId} 不存在，无需移除"}

        # 使用docker rm -f强制移除
        stdin, stdout, stderr = ssh.exec_command(
            f'docker rm -f {containerId}')
        exit_status = stdout.channel.recv_exit_status()

        if exit_status == 0:
            return {"suc": True, "msg": f"容器 {containerId} 已强制移除"}
        else:
            error_msg = stderr.read().decode()
            # 检查是否是"容器不存在"的错误
            if "No such container" in error_msg:
                return {"suc": True, "msg": f"容器 {containerId} 不存在"}
            return {"suc": False, "msg": f"强制移除容器失败: {error_msg}"}
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh:
            ssh.close()


def forceRemoveContainerByName(settings, containerName: str) -> dict:
    """
    根据容器名称强制移除Docker容器（无论是否在运行）
    :param settings: 服务器配置
    :param containerName: 要移除的容器名称
    :return: 返回包含操作结果的字典 {'suc': bool, 'msg': str}
    """
    ssh = None
    try:
        ssh = ssh_connect(settings)

        # 1. 先检查容器是否存在
        stdin, stdout, stderr = ssh.exec_command(
            f'docker ps -a --filter "name=^{containerName}$" --format "{{.Names}}"')
        existing_container = stdout.read().decode().strip()

        if not existing_container:
            return {"suc": True, "msg": f"容器 {containerName} 不存在，无需移除"}

        # 2. 强制停止并移除容器（使用名称而非ID）
        stdin, stdout, stderr = ssh.exec_command(
            f'docker rm -f {containerName}')
        exit_status = stdout.channel.recv_exit_status()

        if exit_status == 0:
            return {"suc": True, "msg": f"容器 {containerName} 已强制移除"}
        else:
            error_msg = stderr.read().decode()
            # 检查是否是"容器不存在"的错误
            if "No such container" in error_msg:
                return {"suc": True, "msg": f"容器 {containerName} 不存在"}
            return {"suc": False, "msg": f"强制移除容器失败: {error_msg}"}
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh:
            ssh.close()


def deployDockerRegistryHub(api: Api, containerConfig: dict, ssh=None):
    server_port = containerConfig.get('registry_server_port', '10004')
    browser_port = containerConfig.get('registry_browser_port', '20005')
    server_domain = containerConfig.get('server_domain')
    user_name = api.getLinuxUserName()
    caddy_file_folder = 'caddy_file'
    need_close = False
    transfer_port = findAvailablePort(api.settings, 10235, ssh).get('port')
    api.printToTerm(f'Docker镜像仓库浏览器中转端口: {transfer_port}', 'info')
    try:
        if ssh is None:
            ssh = api.ssh_connect()
            need_close = True

        api.installDockerImage('registry:2', ssh)
        api.installDockerImage('joxit/docker-registry-ui:2', ssh)
        api.printToTerm(f"正在启动Docker镜像仓库容器...", 'info')

        forceRemoveContainerByName(api.settings, 'registry')

        # caddy代理https://{server_domain}:{server_port}至http://{server_domain}:{transfer_port}
        caddy_file_content = f"""
{server_domain}:{browser_port} {{ 
    reverse_proxy http://127.0.0.1:{transfer_port} {{ 
        header_up X-Forwarded-Proto https
    }}
}}
        """
        api.write_remote_file(f'/home/{user_name}/{caddy_file_folder}/CaddyfileRegBrowser', caddy_file_content, ssh,
                              False)

        # 在Caddyfile中添加f'import CaddyfileRegBrowser'
        caddy_file_path = f'/home/{user_name}/{caddy_file_folder}/Caddyfile'
        # 修改原调用处
        result = write_remote_file_with_check(
            api,
            caddy_file_path,
            'import "CaddyfileRegBrowser"\n',  # 注意去掉了前面的换行符
            ssh,
            True
        )
        api.printToTerm(f'写入import "CaddyfileRegBrowser"，返回：{result.get('msg')}', 'debug')
        api.printToTerm(f"正在启动Docker镜像仓库浏览器容器...", 'info')

        forceRemoveContainerByName(api.settings, 'registry-browser')

        cert_file = api.find_cert_file(f'/home/{user_name}/caddy_data', ssh)
        if cert_file is None:
            api.printToTerm(
                f"未找到 caddy 申请的证书文件，一般是提供的域名无法解析到当前服务器，当前域名为：{server_domain}", 'error')
            return {"suc": False, "msg": "未找到证书文件"}
        cert_folder = str(cert_file.parent).replace("\\", "/")
        cert_file_base = cert_file.stem
        reg_config_content = f"""
version: 0.1
log:
  accesslog:
    disabled: false
storage:
  delete:
    enabled: true
  filesystem:
    rootdirectory: /var/lib/registry
http:
  addr: :443
  secret: "registry-shared-secret"
  otlp:
    disabled: true
  tls:
    certificate: /certs/{cert_file_base}.crt
    key: /certs/{cert_file_base}.key
  headers:
    Access-Control-Allow-Origin:
      - "*"
    Access-Control-Allow-Methods:
      - "GET, OPTIONS, HEAD, DELETE"
    Access-Control-Allow-Headers:
      - "Content-Type, Authorization, Accept, Cache-Control"
    Access-Control-Expose-Headers:
      - "Docker-Content-Digest"
"""

        docker_compose_content = f"""
services:
  registry:
    image: registry:2
    container_name: registry
    restart: unless-stopped
    command: ["serve", "/etc/docker/registry/config.yml"]
    ports:
      - "{server_port}:443"
    volumes:
      - /home/{user_name}/dockerreg/data:/var/lib/registry 
      - /home/{user_name}/dockerreg/registry-config.yml:/etc/docker/registry/config.yml:ro
      - {cert_folder}:/certs:ro
    networks:
      - registry_net
    environment:
      TZ: Asia/Shanghai

  registry-ui:
    image: joxit/docker-registry-ui:2
    restart: unless-stopped
    ports:
      - {transfer_port}:80
    environment:
      SINGLE_REGISTRY: false
      REGISTRY_TITLE: Docker仓库
      DELETE_IMAGES: true
      SHOW_CONTENT_DIGEST: true
      NGINX_PROXY_PASS_URL: "https://{server_domain}:{server_port}"
      NGINX_TRY_FILES: true
      REGISTRY_URL: "https://{server_domain}:{server_port}"
      SHOW_CATALOG_NB_TAGS: true
      CATALOG_MIN_BRANCHES: 1
      CATALOG_MAX_BRANCHES: 1
      TAGLIST_PAGE_SIZE: 100
      REGISTRY_SECURED: false
      CATALOG_ELEMENTS_LIMIT: 1000
      REGISTRY_HTTP_HEADERS_Access-Control-Allow-Origin: '[*]'
      REGISTRY_HTTP_HEADERS_Access-Control-Expose-Headers: '[Docker-Content-Digest]'
      TZ: Asia/Shanghai
    container_name: registry-ui
    networks:
      - registry_net
    depends_on:
      - registry

networks:
  registry_net:
    driver: bridge
"""
        api.write_remote_file(f'/home/{user_name}/dockerreg/registry-config.yml', reg_config_content, ssh, False)
        api.write_remote_file(f'/home/{user_name}/dockerreg/docker-compose.yml', docker_compose_content, ssh, False)
        api.printToTerm(f"正在启动Docker镜像仓库容器...", 'info')
        stdin, stdout, stderr = ssh.exec_command(
            f'docker compose -f "/home/{user_name}/dockerreg/docker-compose.yml" up -d 2>&1')
        print_debug_info(api, stdout, stderr)
        reloadCaddyfile(api, ssh)
        api.registry_url = f"https://{server_domain}:{server_port}"
        api.printToTerm(f"Docker镜像仓库浏览器地址: https://{server_domain}:{server_port}", 'success')
        api.setTaskBar(f"Docker镜像仓库启动成功，https://{server_domain}:{server_port}")
        return {"suc": True, "msg": "Docker镜像仓库部署成功"}
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh and need_close:
            ssh.close()


# def deployLoginRegService(api, containerConfig, ssh):
#     # caddy_container_name: 'caddy_container',
#     # login_reg_port: login_reg_port.value,
#     # mysql_host: 'localhost',
#     # mysql_port: mysql_port.value,
#     #
#     # server_domain: server_domain.value
#     mysql_host = containerConfig.get('mysql_host')
#     mysql_port = containerConfig.get('mysql_port')
#     mysql_root_password = containerConfig.get('mysql_root_password')
#     server_domain = containerConfig.get('server_domain')
#     server_port = containerConfig.get('login_reg_port')
#     transfer_port = api.findAvailablePort(10236).get('port')
#     user_name = api.getLinuxUserName()
#     caddy_file_folder = "caddy_file"
#     need_close = False
#     try:
#         if ssh is None:
#             ssh = api.ssh_connect()
#             need_close = True
#         if not existContainer(api, 'mysql_container', ssh) and mysql_host == "localhost":
#             return {'suc': False, 'msg': '未找到Caddy容器，请先启动Caddy容器'}
#
#         res = api.installDockerImage('yangke03/login-reg:latest', ssh)
#         if not res.get('suc'):
#             return res
#
#         config_content = f"""
# {{
#   "database": {{
#     "host": "{server_domain}",
#     "port": {mysql_port},
#     "user": "root",
#     "password": "{mysql_root_password}",
#     "database": "login_reg"
#   }},
#   "server": {{
#     "cors": true
#   }}
# }}
# """
#         api.write_remote_file(f'/home/{user_name}/login-reg/config/settings.json', config_content, ssh)
#         forceRemoveContainerByName(api.settings, 'login-reg')
#
#         cmd = f'docker run --name login-reg -d -p {transfer_port}:5000 -v /home/{user_name}/login-reg/config:/app/config yangke03/login-reg:latest'
#         stdin, stdout, stderr = ssh.exec_command(cmd)
#         print_debug_info(api, stdout, stderr)
#
#         # 通过caddy代理登录注册服务到https
#         caddy_content = f"""
# {server_domain}:{server_port} {{
#     reverse_proxy {server_domain}:{transfer_port} {{
#         header_up X-Forwarded-Proto https
#     }}
# }}
# """
#         api.write_remote_file(f'/home/{user_name}/{caddy_file_folder}/CaddyfileLogin', caddy_content, ssh)
#         # 在Caddyfile中添加f'import CaddyfileLogin'
#         caddy_file_path = f'/home/{user_name}/{caddy_file_folder}/Caddyfile'
#         # 修改原调用处
#         result = write_remote_file_with_check(
#             api,
#             caddy_file_path,
#             'import "CaddyfileLogin"\n',
#             ssh,
#             True
#         )
#         api.printToTerm(f'写入import "CaddyfileLogin"，返回：{result.get('msg')}', 'debug')
#
#         reloadCaddyfile(api, ssh)
#
#         api.printToTerm(f"登录注册服务容器启动成功...", 'success')
#         return {"suc": True, "msg": "登录注册服务部署成功"}
#     except Exception as e:
#         return {'suc': False, 'msg': f"错误信息: {str(e)}"}
#     finally:
#         if ssh and need_close:
#             ssh.close()

def deployLoginRegService(api, containerConfig, ssh):
    """
    部署登录注册服务
    """
    mysql_host = containerConfig.get('mysql_host')
    mysql_port = containerConfig.get('mysql_port')
    mysql_root_password = containerConfig.get('mysql_root_password')
    server_domain = containerConfig.get('server_domain')
    server_port = containerConfig.get('login_reg_port')
    transfer_port = api.findAvailablePort(10236).get('port')
    user_name = api.getLinuxUserName()
    caddy_file_folder = "caddy_file"
    need_close = False
    try:
        if ssh is None:
            ssh = api.ssh_connect()
            need_close = True
        if not existContainer(api, 'mysql_container', ssh) and mysql_host == "localhost":
            return {'suc': False, 'msg': '未找到Caddy容器，请先启动Caddy容器'}

        res = api.installDockerImage('yangke03/login-reg:latest', ssh)
        if not res.get('suc'):
            return res

        config_content = f"""
{{
  "database": {{
    "host": "{server_domain}",
    "port": {mysql_port},
    "user": "root",
    "password": "{mysql_root_password}",
    "database": "login_reg"
  }},
  "server": {{
    "cors": true
  }}
}}
"""
        api.write_remote_file(f'/home/{user_name}/login-reg/config/settings.json', config_content, ssh)
        forceRemoveContainerByName(api.settings, 'login-reg')

        # 添加健康检查和自动重启脚本
        health_check_script = f"""#!/bin/bash
# login-reg 健康检查脚本
CONTAINER_NAME="login-reg"
HEALTH_CHECK_URL="https://sges.yangke.site:5001/?Action=GetPoolInfo"
LOG_FILE="/home/{user_name}/login-reg/health_check.log"

echo "$(date): 开始健康检查" >> $LOG_FILE

# 检查容器是否在运行
if ! docker ps --filter "name=$CONTAINER_NAME" --format "{{{{.Names}}}}" | grep -q "$CONTAINER_NAME"; then
    echo "$(date): 容器未运行，正在启动..." >> $LOG_FILE
    docker start $CONTAINER_NAME >> $LOG_FILE 2>&1
    exit 1
fi

# 检查服务是否响应
RESPONSE=$(curl -k -s -m 10 --connect-timeout 10 "$HEALTH_CHECK_URL" 2>/dev/null)
if [ $? -ne 0 ]; then
    echo "$(date): 无法连接到服务，准备重启容器" >> $LOG_FILE
    echo "响应: $RESPONSE" >> $LOG_FILE
    docker restart $CONTAINER_NAME >> $LOG_FILE 2>&1
    exit 1
fi

# 检查响应内容是否正确
if echo "$RESPONSE" | grep -q '"Success":true'; then
    echo "$(date): 服务正常运行" >> $LOG_FILE
    exit 0
else
    echo "$(date): 服务响应异常，准备重启容器" >> $LOG_FILE
    echo "响应: $RESPONSE" >> $LOG_FILE
    docker restart $CONTAINER_NAME >> $LOG_FILE 2>&1
    exit 1
fi
"""

        # 将健康检查脚本写入服务器
        api.write_remote_file(f'/home/{user_name}/login-reg/health_check.sh', health_check_script, ssh)

        # 给脚本添加执行权限
        stdin, stdout, stderr = ssh.exec_command(f'chmod +x /home/{user_name}/login-reg/health_check.sh')
        stdout.channel.recv_exit_status()

        cmd = f'docker run --name login-reg -d --restart=unless-stopped -p {transfer_port}:5000 -v /home/{user_name}/login-reg/config:/app/config yangke03/login-reg:latest'
        stdin, stdout, stderr = ssh.exec_command(cmd)
        print_debug_info(api, stdout, stderr)

        # 通过caddy代理登录注册服务到https
        caddy_content = f"""
{server_domain}:{server_port} {{
    reverse_proxy {server_domain}:{transfer_port} {{
        header_up X-Forwarded-Proto https
    }}
}}
"""
        api.write_remote_file(f'/home/{user_name}/{caddy_file_folder}/CaddyfileLogin', caddy_content, ssh)
        # 在Caddyfile中添加f'import CaddyfileLogin'
        caddy_file_path = f'/home/{user_name}/{caddy_file_folder}/Caddyfile'
        # 修改原调用处
        result = write_remote_file_with_check(
            api,
            caddy_file_path,
            'import "CaddyfileLogin"\n',  # 注意去掉了前面的换行符
            ssh,
            True
        )
        api.printToTerm(f'写入import "CaddyfileLogin"，返回：{result.get("msg")}', 'debug')

        # 添加定时任务，每5分钟执行一次健康检查
        # 先删除可能存在的旧任务，再添加新任务，避免重复
        cron_job = f"*/5 * * * * /home/{user_name}/login-reg/health_check.sh >/dev/null 2>&1"
        stdin, stdout, stderr = ssh.exec_command(
            f'(crontab -l 2>/dev/null | grep -v "/home/{user_name}/login-reg/health_check.sh" | cat; echo "{cron_job}") | crontab -'
        )
        stdout.channel.recv_exit_status()

        reloadCaddyfile(api, ssh)

        api.printToTerm(f"登录注册服务容器启动成功，已配置自动健康检查和重启机制...", 'success')
        return {"suc": True, "msg": "登录注册服务部署成功"}
    except Exception as e:
        return {'suc': False, 'msg': f"错误信息: {str(e)}"}
    finally:
        if ssh and need_close:
            ssh.close()


def reloadCaddyfile(api: Api, ssh=None, container_name='caddy_container'):
    """
        重载Caddy容器配置
        :param api: Api对象用于日志输出
        :param ssh: 可选，已有的SSH连接
        :param container_name: caddy容器名
        :return: 返回操作结果字典 {'suc': bool, 'msg': str}
        """
    need_close = False
    try:
        if ssh is None:
            ssh = api.ssh_connect()
            need_close = True
        if not existContainer(api, container_name, ssh):
            return {'suc': False, 'msg': '未找到Caddy容器，请先启动Caddy容器'}

        api.printToTerm("正在验证Caddyfile配置...", 'info')
        # 1. 先验证配置，-w /是为了防止报错：OCI runtime exec failed: exec failed: unable to start container process: current working directory is outside of container mount namespace root -- possible container breakout detected: unknown
        stdin, stdout, stderr = ssh.exec_command(
            f'docker exec -w / {container_name} caddy validate --config /etc/caddy/Caddyfile'
        )
        exit_status = stdout.channel.recv_exit_status()
        if exit_status != 0:
            error_msg = stderr.read().decode()
            return {'suc': False, 'msg': f'配置验证失败: {error_msg}'}

        api.printToTerm("✅ 配置验证通过，正在重载...", 'info')
        # 2. 重载配置
        stdin, stdout, stderr = ssh.exec_command(
            f'docker exec -w / {container_name} caddy reload --config /etc/caddy/Caddyfile'
        )
        exit_status = stdout.channel.recv_exit_status()

        if exit_status == 0:
            api.printToTerm("✅ Caddy配置重载成功", 'success')
            return {'suc': True, 'msg': 'Caddy配置重载成功'}
        else:
            error_msg = stderr.read().decode()
            api.printToTerm(f"❌ 配置重载失败: {error_msg}", 'error')
            return {'suc': False, 'msg': f'配置重载失败: {error_msg}'}

    except Exception as e:
        api.printToTerm(f"❌ 重载过程异常: {str(e)}", 'error')
        return {'suc': False, 'msg': f'重载过程异常: {str(e)}'}
    finally:
        if ssh and need_close:
            ssh.close()


def write_remote_file_with_check(api, file_path, content, ssh, append=False):
    """
    写入远程文件前检查内容是否已存在
    :param api: Api对象
    :param file_path: 远程文件路径
    :param content: 要写入的内容
    :param ssh: SSH连接
    :param append: 是否追加模式
    :return: 操作结果 {'suc': bool, 'msg': str, 'action': 'skipped'|'appended'|'created'}
    """
    try:
        # 检查文件是否存在
        stdin, stdout, stderr = ssh.exec_command(f'test -f "{file_path}" && echo "exists" || echo "not exists"')
        file_exists = stdout.read().decode().strip() == "exists"

        if file_exists:
            # 检查内容是否已存在

            command = f'''grep -Fxq -- "{content.strip().replace('"', '\\"')}" "{file_path}" && echo "exists" || echo "not exists"'''
            stdin, stdout, stderr = ssh.exec_command(command)
            res = stdout.read().decode().strip()

            if res == "exists":
                return {'suc': True, 'msg': f'内容已存在于 {file_path}', 'action': 'skipped'}

        # 调用原始写入方法
        api.write_remote_file(file_path, content, ssh, append)
        action = 'appended' if append and file_exists else 'created'
        return {'suc': True, 'msg': f'成功写入 {file_path}', 'action': action}

    except Exception as e:
        return {'suc': False, 'msg': f'写入文件失败: {str(e)}', 'action': 'failed'}


def print_debug_info(api: Api, stdout, stderr):
    """
    实时打印调试信息

    该函数用于从给定的 stdout 和 stderr 通道中实时读取输出和错误信息，
    并使用 api 对象的 printToTerm 方法将信息打印出来。

    参数:
    - api: Api 对象，用于调用 printToTerm 方法来打印信息。
    - stdout: 输出通道，用于读取命令的正常输出。
    - stderr: 错误通道，用于读取命令的错误输出。
    """
    # 循环直到 stdout 通道的退出状态就绪
    while not stdout.channel.exit_status_ready():
        # 如果 stdout 通道有数据可读
        if stdout.channel.recv_ready():
            # 从 stdout 通道读取最多 1024 字节的数据并解码
            output = stdout.channel.recv(1024).decode()
            # 如果读取到的数据不为空，则按 debug 级别打印
            if output:
                api.printToTerm(output.strip(), 'debug')

        # 如果 stderr 通道有错误数据可读
        if stderr.channel.recv_stderr_ready():
            # 从 stderr 通道读取最多 1024 字节的数据并解码
            error = stderr.channel.recv_stderr(1024).decode()
            # 如果读取到的错误数据不为空，则添加 "[错误] " 前缀并按 error 级别打印
            if error:
                api.printToTerm(f"[错误] {error.strip()}", 'error')


def getDockerBridgeIp(api):
    """
    获取 Docker bridge 网关 IP，相当于 host.docker.internal
    :return: 返回包含网关 IP 的字典 {'suc': bool, 'ip': str, 'msg': str}
    """
    ssh = None
    try:
        ssh = api.ssh_connect()  # 假设 self.ssh_connect() 已定义，用于建立 SSH 连接

        # 执行命令获取 bridge 网络的网关 IP
        stdin, stdout, stderr = ssh.exec_command(
            'docker network inspect bridge --format \'{{(index .IPAM.Config 0).Gateway}}\''
        )
        exit_code = stdout.channel.recv_exit_status()
        gateway_ip = stdout.read().decode().strip()

        if exit_code != 0:
            error_msg = stderr.read().decode().strip()
            api.printToTerm(f"获取 Docker bridge 网关 IP 失败: {error_msg}")
            return None

        return gateway_ip
    except Exception as e:
        api.printToTerm(f"获取 Docker bridge 网关 IP 错误: {str(e)}")
        return None
    finally:
        if ssh:
            ssh.close()
