#!/usr/bin/python3
import argparse
import io
import ipaddress
import json
import logging
import os
import sys
import time
from logging.config import dictConfig

import paramiko

try:
  from typing import Dict, List, Tuple, Optional, Union, Any, NoReturn, Callable, IO
except ImportError:
  pass

VERSION = '1.0.0'
SSH_TIMEOUT = 10
DEPLOY_USERNAME = 'root'
DEPLOY_PACKAGE = '/etc/dsms/dsms-deploy'
LOG_DIR = '/var/log/dsms/dsms-deploy'
CLUSTER_CONF = 'cluster.conf'
ADD_NODE_CONF = 'add-node.conf'
DEPEND_LIST = 'depend_list'
LEADER_NODE = 'NODE1'
SSH_PATH = '/root/.ssh'
INIT_DIR = '/home/my-cluster'
SERVER_CHECK_RETRY = 10

update_hosts = """
#!/bin/bash
IP=$1
HOSTNAME=$2
if [ ! -n "$IP" ];then
	echo "Please fill in parameter 1 as the IP address!"
	exit 1
fi
if [ ! -n "$HOSTNAME" ];then
	echo "Please fill in parameter 2 as HOSTNAME!"
	exit 1
fi
ITEM="$IP $HOSTNAME"
HOSTS="/etc/hosts"
#update /etc/hosts file
add_hosts=true
while read line || [[ -n ${line} ]]; do
	if [ "$ITEM" == "$line" ];then
		add_hosts=false
		break
	else
		i=1
		for host in $line;do
			if [ $i -eq 2 ];then
				if [ $host == $HOSTNAME ];then
					#Delete the hosts entry that was the same as the current host but with a different ip
					sed -i "/${line}/d" $HOSTS
				fi
			fi
			i=`expr $i + 1`
		done
		continue
	fi
done < $HOSTS

if [ $add_hosts == true ];then
	echo "$ITEM" >> $HOSTS
fi
"""

# Log and console output config
logging_config = {
  'version': 1,
  'disable_existing_loggers': True,
  'formatters': {
    'paramiko': {
      'format': '%(asctime)s %(thread)x %(levelname)s %(message)s'
    },
    'dsms-storage': {
      'format': '%(asctime)s %(thread)x %(levelname)s DSMS-DEPLOY: %(message)s'
    },
    'console': {
      'format': '%(asctime)s %(message)s'
    },
  },
  'handlers': {
    'console': {
      'level': 'INFO',
      'class': 'logging.StreamHandler',
      'formatter': 'console',

    },
    'log_file': {
      'level': 'DEBUG',
      'class': 'logging.handlers.RotatingFileHandler',
      'formatter': 'dsms-storage',
      'filename': '%s/dsms-deploy.log' % LOG_DIR,
      'maxBytes': 100 * 1024 * 1024,
      'backupCount': 10,
    },
    'paramiko_log_file': {
      'level': 'INFO',
      'class': 'logging.handlers.RotatingFileHandler',
      'formatter': 'paramiko',
      'filename': '%s/dsms-deploy.log' % LOG_DIR,
      'maxBytes': 100 * 1024 * 1024,
      'backupCount': 10,
    }
  },
  'loggers': {
    '': {
      'level': 'DEBUG',
      'handlers': ['console', 'log_file'],
    },
    'paramiko': {
      'level': 'INFO',
      'handlers': ['paramiko_log_file'],
      'propagate': False,
    }
  }
}


class termcolor:
  yellow = '\033[93m'
  red = '\033[31m'
  end = '\033[0m'


class DsmsDeployException(Exception):
  """
  Custom exception class
  """

  def __init__(self, message):
    self.message = message

  def __str__(self):
    return '{}{}{}'.format(termcolor.red, f"DsmsDeployException: {self.message}", termcolor.end)


class Node:
  def __init__(self, ip, hostname, ssh_port, root_passwd, mon, repo_key_file,
      repo_url, public_network, cluster_network, leader):
    self.ip = ip
    self.hostname = hostname
    self.ssh_port = ssh_port
    self.root_passwd = root_passwd
    self.mon = mon
    self.repo_key_file = repo_key_file
    self.repo_url = repo_url
    self.public_network = public_network
    self.cluster_network = cluster_network
    self.leader = leader


def dsms_require_root() -> None:
  """Exit if the process is not running as root."""
  if os.geteuid() != 0:
    sys.stderr.write('ERROR: dsms-deploy should be run as root\n')
    sys.exit(1)


def find_nth(findstr, index, n):
  start = findstr.find(index)
  while start >= 0 and n > 1:
    start = findstr.find(index, start + len(index))
    n -= 1
  return start


def valid_node(nodes, type):
  for key, value in nodes.items():
    for field in ['ip', 'hostname', 'ssh_port', 'root_passwd']:
      if not getattr(value, field, None):
        raise DsmsDeployException(f'server {key} {field} is require field')
    if type == 'new':
      for field in ['public_network', 'cluster_network']:
        if not getattr(value, field, None):
          raise DsmsDeployException(f'init server {key} server {field} is require field')
    try:
      # Verify that the IP address format is correct
      ipaddress.ip_address(value.ip)
    except ValueError:
      raise DsmsDeployException(f'server {key} IP address illegal')

    try:
      # Verify that the ssh port format is correct
      port = int(value.ssh_port)
      if not 1 <= port <= 65535:
        raise ValueError
    except ValueError:
      raise DsmsDeployException(f'server {key} SSH port {value.ssh_port} illegal')

    # Verify SSH connectivity
    try:
      with paramiko.SSHClient() as ssh:
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(value.ip, port=value.ssh_port, username=DEPLOY_USERNAME, password=value.root_passwd, timeout=SSH_TIMEOUT)
        try:
          with ssh.open_sftp() as sftp:
            sftp.listdir('.')
        except Exception:
          raise DsmsDeployException(f'server {key} SFTP config error, Please check the server config /etc/ssh/sshd_config')
    except DsmsDeployException as e:
      raise DsmsDeployException(e.message)
    except paramiko.AuthenticationException as e:
      raise DsmsDeployException(f'server {key} SSH Authentication fail, Please check the root passwd')
    except Exception as e:
      raise DsmsDeployException(f'server {key} SSH connect fail, Please check the ssh port')


def read_cluster_conf(conf_path):
  nodes = {}
  with open(conf_path) as f:
    for line in f:
      if line.strip() and not line.startswith("#") and line.startswith("NODE"):
        key, value = line.strip().split('=')
        if not value:
          continue
        node_name, attr_name = key.split('_', 1)
        node = nodes.get(node_name)
        if not node:
          node = Node('', '', '', '', '', '', '', '', '', '')
          nodes[node_name] = node
        setattr(node, attr_name.lower(), value)
        if node_name == LEADER_NODE:
          setattr(node, 'leader', True)
      if line.strip() and not line.startswith("#") and not line.startswith("NODE"):
        key, value = line.strip().split('=')
        if not value:
          continue
        for node in nodes.values():
          setattr(node, key.lower(), value)
  valid_node(nodes, 'new')
  return nodes


def read_add_conf(conf_path):
  nodes = {}
  with open(conf_path) as f:
    for line in f:
      if line.strip() and not line.startswith("#") and line.startswith("NEW_NODE"):
        key, value = line.strip().split('=')
        if not value:
          continue
        node_name = key[:find_nth(key, '_', 2)]
        attr_name = key[find_nth(key, '_', 2) + 1:]
        node = nodes.get(node_name)
        if not node:
          node = Node('', '', '', '', '', '', '', '', '', '')
          nodes[node_name] = node
        setattr(node, attr_name.lower(), value)
      if line.strip() and not line.startswith("#") and line.startswith("REPO"):
        key, value = line.strip().split('=')
        if not value:
          continue
        for node in nodes.values():
          setattr(node, key.lower(), value)
  valid_node(nodes, 'add')
  return nodes


def run_ssh_command(server, username, password, command):
  logger.debug(f"{server} execute start: `{command}`")
  ssh_client = paramiko.SSHClient()
  ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
  ssh_client.connect(hostname=server, username=username, password=password,
                     timeout=SSH_TIMEOUT, allow_agent=False,
                     look_for_keys=False)

  stdin, stdout, stderr = ssh_client.exec_command(command)
  # Create a class file object
  output_file = io.StringIO()

  # Real-time prints execution and writes to a class file object
  for line in stdout:
    logger.debug(line.strip())
    output_file.write(line)
  for line in stderr:
    logger.debug(line.strip())
    output_file.write(line)

  exit_code = stdout.channel.recv_exit_status()
  # Save the execution to a variable
  output = output_file.getvalue()
  ssh_client.close()

  if exit_code == 0:
    logger.debug(f"{server} execute end: `{command}` success")
    return output
  else:
    logger.debug('{}{}{}'.format(termcolor.red, f"{server} execute end: `{command}` failed", termcolor.end))
    raise DsmsDeployException(output)


def run_ftp_command(local_file, server, username, password, remote_dir,
    extract_command=None):
  """
  Copy the local file to the remote server and execute extract command

  :param local_file: local file path
  :param server: remote server address
  :param username: remote server login username
  :param password: remote server login password
  :param remote_dir: remote destination directory
  :param extract_command: extract command, optional parameter
  :return: command execute result
  """

  # create ssh client
  client = paramiko.SSHClient()
  client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
  client.connect(server, username=username, password=password,
                 allow_agent=False, look_for_keys=False)

  # create sftp client
  sftp = client.open_sftp()

  # put local file to remote server
  remote_file = os.path.join(remote_dir, os.path.basename(local_file))
  sftp.put(local_file, remote_file)

  # close sftp client
  sftp.close()

  # execute extract command
  if extract_command:
    command = f'{extract_command} {remote_file} -C {remote_dir}'
    stdin, stdout, stderr = client.exec_command(command)
    # Create a class file object
    output_file = io.StringIO()

    # Real-time prints execution and writes to a class file object
    for line in stdout:
      logger.info(line.strip())
      output_file.write(line)
    for line in stderr:
      logger.info(line.strip())
      output_file.write(line)

    exit_code = stdout.channel.recv_exit_status()
    error = stderr.read().decode()
    # Save the execution to a variable
    output = output_file.getvalue()
    client.close()

    if exit_code == 0:
      logger.info(f"{server} execute: {command} success")
      return output
    else:
      logger.info(error)
      logger.error('{}{}{}'.format(termcolor.red, f"{server} execute: {command} failed", termcolor.end))
      raise DsmsDeployException(f"{server} execute: {command} failed")


def generate_remote_ssh_key(ssh_client):
  """
  Generate an SSH key pair on the remote server

  :param ssh_client: remote server ssh client
  :return: remote server public key
  """

  # check if the public key already exists on the remote server
  _, stdout, _ = ssh_client.exec_command(f'ls {SSH_PATH}/id_rsa.pub')
  public_key = stdout.read().decode().strip()

  if public_key:
    # if the public key already exists, the current public key is read and returned
    pass
  else:
    # if there is no public key, a new key pair is generated
    _, stdout, stderr = ssh_client.exec_command(
      f'ssh-keygen -t rsa -N "" -f {SSH_PATH}/id_rsa')

    # wait for the key to be generated
    while not ssh_key_generated(ssh_client):
      continue

  _, stdout, _ = ssh_client.exec_command(f'cat {SSH_PATH}/id_rsa.pub')
  public_key = stdout.read().decode().strip()
  return public_key


def ssh_key_generated(ssh_client):
  """
  Check whether the SSH key is generated

  :param ssh_client:remote server ssh client
  :return: True complete，False not yet
  """
  _, stdout, _ = ssh_client.exec_command(f'ls {SSH_PATH}/id_rsa.pub')
  public_key = stdout.read().decode().strip()
  return bool(public_key)


def setup_ssh_trust(nodes):
  for i, node in enumerate(nodes.values()):
    try:
      ssh_client = paramiko.SSHClient()
      ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
      ssh_client.connect(hostname=node.ip, username=DEPLOY_USERNAME, password=node.root_passwd, allow_agent=False,
                         look_for_keys=False)

      # Check that the id_rsa.pub file exists, if so, read the public key directly
      public_key = generate_remote_ssh_key(ssh_client)
      if public_key:
        logger.info(f'remote server :{node.hostname} Public key get success')
      else:
        raise DsmsDeployException(f"remote server {node.hostname} generate remote ssh key failed")

      # copy the leader node's public key to the other node's authorization file
      for j, other_node in enumerate(nodes.values()):
        try:
          other_ssh = paramiko.SSHClient()
          other_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
          other_ssh.connect(other_node.ip, username=DEPLOY_USERNAME,
                            password=other_node.root_passwd,
                            allow_agent=False,
                            look_for_keys=False)
          sftp = other_ssh.open_sftp()
          # Check if remote file exists
          try:
            sftp.stat(f'{SSH_PATH}/authorized_keys')
          except FileNotFoundError:
            other_ssh.exec_command(f'mkdir -p {SSH_PATH}')
            other_ssh.exec_command(f'touch {SSH_PATH}/authorized_keys')
          authorized_keys = sftp.file(f'{SSH_PATH}/authorized_keys', 'a+')
          authorized_keys.seek(0)
          keys = authorized_keys.read().decode().splitlines()
          # verify whether it has been added first
          if public_key not in keys:
            authorized_keys.write(public_key + '\n')
            logger.info(f'remote server :{other_node.hostname} add {node.hostname} public key success')
          else:
            logger.info(f'remote server :{other_node.hostname} exist {node.hostname} public key')
          authorized_keys.close()
          sftp.close()
          other_ssh.close()
        except Exception as e:
          raise DsmsDeployException(f"remote server :{other_node.hostname} add {node.hostname} public key failed")

        ssh_client.connect(hostname=node.ip, username=DEPLOY_USERNAME,
                           password=node.root_passwd,
                           allow_agent=False, look_for_keys=False)
        # prevent confirmation on first login
        _, stdout, _ = ssh_client.exec_command(
          f'ssh -o StrictHostKeyChecking=no root@{other_node.hostname}')
    except Exception as e:
      raise DsmsDeployException(f'Cluster node {node.hostname} grant credit failed {node.hostname}: {str(e)}')
  return True


def configure_cluster(nodes):
  """
  Configure the server cluster so that each node trusts each other and adds the corresponding

  :param nodes: cluster nodes
  :return: execute result
  """
  logger.info("Config cluster nodes start")

  client = paramiko.SSHClient()
  client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

  try:
    for i, node in enumerate(nodes.values()):
      ip = node.ip
      hostname = node.hostname

      client.connect(ip, username=DEPLOY_USERNAME, password=node.root_passwd,
                     timeout=SSH_TIMEOUT,
                     allow_agent=False, look_for_keys=False)
      logger.info(f"set {ip} hostname to {hostname}")
      command = f"hostnamectl set-hostname {hostname}"
      stdin, stdout, stderr = client.exec_command(command)
      exit_code = stdout.channel.recv_exit_status()
      if exit_code == 0:
        logger.info(f"{ip} set {hostname} success")
      else:
        for line in stderr:
          error_message = io.StringIO()
          error_message.write(line)
        raise Exception(f'{ip} set {hostname} failed,{error_message.getvalue()}')

      logger.info(f"disable and stop firewalld {hostname}")
      stdin, stdout, stderr = client.exec_command('systemctl disable firewalld')
      stdin, stdout, stderr = client.exec_command('systemctl stop firewalld')
      exit_code = stdout.channel.recv_exit_status()
      if exit_code == 0:
        logger.info(f"disable and stop firewalld {hostname} success")
      else:
        for line in stderr:
          error_message = io.StringIO()
          error_message.write(line)
        raise Exception(f'disable and stop firewalld failed,{error_message.getvalue()}')

      # Add all hostnames and IPs to /etc/hosts
      for s in nodes.values():
        command = f"bash -s {s.ip} {s.hostname}"
        stdin, stdout, stderr = client.exec_command(command)
        stdin.write(update_hosts)
        stdin.flush()
      client.close()
    # grant credit to other nodes
    logger.info("cluster nodes grant credit start")
    result = setup_ssh_trust(nodes)
    logger.info('cluster nodes grant credit end')
  except Exception as e:
    raise DsmsDeployException(f'Configure_cluster failed: {str(e)}')
  logger.info("Config cluster nodes end")
  return result


def is_mgr_available(ip, password):
  # type: () -> bool
  """
  Check mgr is available
  """
  try:
    cluster_status = run_ssh_command(ip, DEPLOY_USERNAME, password, 'ceph status -f json-pretty')
    json_result = json.loads(cluster_status)
    return json_result.get('mgrmap', {}).get('available', False)
  except Exception as e:
    logger.debug('status failed: %s' % e)
    return False


def is_mon_available(ip, password):
  # type: () -> bool
  """
  Check mon is available
  """
  try:
    cluster_status = run_ssh_command(ip, DEPLOY_USERNAME, password, 'ceph status -f json-pretty')
    json_result = json.loads(cluster_status)
    return json_result.get('monmap', {}).get('num_mons', 0) > 0
  except Exception as e:
    logger.debug('status failed: %s' % e)
    return False


def is_mds_available(ip, password):
  # type: () -> bool
  """
  Check mds is available
  """
  try:
    cluster_status = run_ssh_command(ip, DEPLOY_USERNAME, password, 'ceph status -f json-pretty')
    json_result = json.loads(cluster_status)
    return json_result.get('fsmap', {}).get('up:standby', 0) > 0
  except Exception as e:
    logger.debug('status failed: %s' % e)
    return False


def is_available(service, ip, passwd, func):
  """
  Wait for a service to become available.

  :param ip: ceph node ssh ip
  :param passwd: ceph node ssh password
  :param service: the name of the service
  :param func: the callable object that determines availability
  """
  logger.info('waiting for %s...' % service)
  num = 1
  while True:
    if func(ip, passwd):
      logger.info('%s is available' % service)
      break
    elif num > SERVER_CHECK_RETRY:
      raise DsmsDeployException('%s not available after %s tries' % (service, SERVER_CHECK_RETRY))

    logger.info('%s not available, waiting (%s/%s)...' % (service, num, SERVER_CHECK_RETRY))

    num += 1
    time.sleep(1)


# wait for mgr to restart (after enabling a module)
def wait_for_mgr_restart(ip, password):
  # first get latest mgrmap epoch from the mon
  mgr_dump = run_ssh_command(ip, DEPLOY_USERNAME, password, 'ceph mgr dump')
  j = json.loads(mgr_dump)
  epoch = j['epoch']
  # wait for mgr to have it
  logger.info('waiting for the mgr to restart...')

  def mgr_has_latest_epoch(ip, password):
    # type: () -> bool
    try:
      out = run_ssh_command(ip, DEPLOY_USERNAME, password, 'ceph tell mgr mgr_status')
      j = json.loads(out)
      return j['mgrmap_epoch'] >= epoch
    except Exception as e:
      logger.debug('tell mgr mgr_status failed: %s' % e)
      return False

  is_available('Mgr version %d' % epoch, ip, password, mgr_has_latest_epoch)


# remove ceph orch services
def remove_ceph_orch_services(ip, password):
  # first get ceph orch services
  ceph_orch_ls = run_ssh_command(ip, DEPLOY_USERNAME, password, 'ceph orch ls -f json')
  orch_services = json.loads(ceph_orch_ls)
  for service in orch_services:
    logger.info(f'remove ceph orch service {service["service_name"]}')
    run_ssh_command(ip, DEPLOY_USERNAME, password, f'ceph orch rm {service["service_name"]}')


def get_ceph_fs_id(ip, password):
  try:
    cluster_fs_id = run_ssh_command(ip, DEPLOY_USERNAME, password, 'ceph fsid -f json-pretty')
    json_result = json.loads(cluster_fs_id)
    return json_result.get('fsid', '')
  except Exception as e:
    logger.debug('get cluster fsid failed: %s' % e)
    return ""


def deploy_node(node):
  if node.ip:
    logger.info(f'starting deploy server: {node.hostname} ...')
    run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'mkdir -p {DEPLOY_PACKAGE}')
    depend_list = []
    whl_list = []
    ceph_rpm_list = []
    dsms_rpm_list = []
    if node.repo_url:
      logger.info('use customer repo')
      gpgcheck = 0
      gpgkey = ''
      if node.repo_url:
        if node.repo_key_file:
          gpgcheck = 1
          if node.repo_key_file.startswith('http'):
            gpgkey = 'gpgkey=' + node.repo_key_file
          else:
            run_ftp_command(
              local_file=node.repo_key_file,
              server=node.ip,
              username=DEPLOY_USERNAME,
              password=node.root_passwd,
              remote_dir=DEPLOY_PACKAGE,
            )
            filename = os.path.basename(node.repo_key_file)
            run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'sudo rpm --import {DEPLOY_PACKAGE}/{filename}')
        command = f'echo -e "[dsmsrepo]\nname=dsms-deploy\nbaseurl={node.repo_url}\ngpgcheck={gpgcheck}\nenabled=1\n{gpgkey}" | sudo tee /etc/yum.repos.d/dsms-deploy.repo > /dev/null'
        run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, command)
        run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"sudo yum clean all && sudo yum makecache")

    with open(DEPLOY_PACKAGE + "/" + DEPEND_LIST, 'r') as f:
      for line in f:
        if line.startswith('depend='):
          depend_list.append(line.strip().split('=')[1])
        elif line.startswith('whl='):
          whl_list.append(line.strip().split('=')[1])
        elif line.startswith('ceph_rpm='):
          ceph_rpm_list.append(line.strip().split('=')[1])
        elif line.startswith('dsms_rpm='):
          dsms_rpm_list.append(line.strip().split('=')[1])

    # install all depend
    depend_without_ext_list = []
    for depend in depend_list:
      depend_without_ext = depend.rsplit('.', 1)[0]
      depend_without_ext_list.append(depend_without_ext)
    for ceph_rpm in ceph_rpm_list:
      ceph_rpm_without_ext = ceph_rpm.rsplit('.', 1)[0]
      depend_without_ext_list.append(ceph_rpm_without_ext)
    depends = ' '.join(str(depend) for depend in depend_without_ext_list)
    run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"yum install -y {depends}")
    if node.leader:
      for dsms_rpm in dsms_rpm_list:
        dsms_rpm_without_ext = dsms_rpm.rsplit('.', 1)[0]
        run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"yum install -y {dsms_rpm_without_ext}")
      for whl in whl_list:
        run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"pip3 install {DEPLOY_PACKAGE}/{whl}")


    run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, 'ceph -v')

def uninstall_node(node):
  if node.ip:
    logger.info(f'starting uninstall server: {node.hostname} ...')
    depend_list = []
    whl_list = []
    ceph_rpm_list = []
    dsms_rpm_list = []

    with open(DEPLOY_PACKAGE + "/" + DEPEND_LIST, 'r') as f:
      for line in f:
        if line.startswith('depend='):
          depend_list.append(line.strip().split('=')[1])
        elif line.startswith('whl='):
          whl_list.append(line.strip().split('=')[1])
        elif line.startswith('ceph_rpm='):
          ceph_rpm_list.append(line.strip().split('=')[1])
        elif line.startswith('dsms_rpm='):
          dsms_rpm_list.append(line.strip().split('=')[1])

    # remove all depend
    depend_without_ext_list = []
    for depend in depend_list:
      depend_without_ext = depend.rsplit('.', 1)[0]
      depend_without_ext_list.append(depend_without_ext)
    for ceph_rpm in ceph_rpm_list:
      ceph_rpm_without_ext = ceph_rpm.rsplit('.', 1)[0]
      depend_without_ext_list.append(ceph_rpm_without_ext)
    depends = ' '.join(str(depend) for depend in depend_without_ext_list)
    run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"yum remove -y {depends}")
    if node.leader:
      for dsms_rpm in dsms_rpm_list:
        dsms_rpm_without_ext = dsms_rpm.rsplit('.', 1)[0]
        run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"yum remove -y {dsms_rpm_without_ext}")
      for whl in whl_list:
        run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"pip3 uninstall -y dsms-deploy")


def check_time_difference(nodes):
  logger.info("Check servers time...")

  remote_times = {}

  for server in nodes.values():
    ssh_client = paramiko.SSHClient()
    ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    try:
      ssh_client.connect(hostname=server.ip, username=DEPLOY_USERNAME,
                         password=server.root_passwd, timeout=SSH_TIMEOUT,
                         allow_agent=False, look_for_keys=False)
      stdin, stdout, stderr = ssh_client.exec_command('date +%s')
      remote_time = int(stdout.read().decode().strip())
      remote_times[server] = remote_time
    except Exception as e:
      raise DsmsDeployException(f"get {server.hostname} time failed: {str(e)}")
    finally:
      ssh_client.close()

  for server1 in remote_times:
    for server2 in remote_times:
      if server1 != server2:
        time_difference = remote_times[server1] - remote_times[server2]
        if time_difference > 1:
          raise DsmsDeployException(
            f"server time check failed. server1: {server1.hostname} server2: {server2.hostname}, time difference is: {time_difference}s")
  logger.info("Check servers time pass")

def update_prometheus_service(node):
  logger.info("update prometheus service...")
  remote_file = '/usr/lib/systemd/system/prometheus.service'
  old_user = 'User=prometheus'
  new_user = 'User=root'
  insert_index = -1
  line_exists = False
  lines = run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'cat {remote_file}').splitlines()
  # get old user index
  for i, line in enumerate(lines):
    if old_user in line:
      insert_index = i + 1
    if new_user in line:
      line_exists = True
      break
  # insert new user
  if not line_exists and insert_index != -1:
    lines.insert(insert_index, new_user)
  modified_content = '\n'.join(lines)
  modified_content = modified_content.replace('$', r'\$')
  run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'echo "{modified_content}" > {remote_file}')
  run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, 'systemctl daemon-reload')

  logger.info("update prometheus service success")




def purge_cluster(nodes):
  logger.info('starting purge old cluster ...')
  leader = nodes.get(LEADER_NODE)

  try:
    fsid = get_ceph_fs_id(leader.ip, leader.root_passwd)
    remove_ceph_orch_services(leader.ip, leader.root_passwd)
  except Exception as e:
    logger.debug('remove ceph orch services failed: %s' % e)

  for server in nodes.values():
    # purge osd
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'systemctl stop ceph-osd@*.service')
    osd_count = run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'df -h | grep /var/lib/ceph/osd/ceph- | wc -l')
    if (int(osd_count.strip()) > 0):
      run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'umount -f -l /var/lib/ceph/osd/ceph-*')

    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/run/ceph/ceph-osd.*.asok')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/lib/ceph/osd/*')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/lib/ceph/bootstrap-osd/*')
    # purge mds
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'systemctl stop ceph-mds@*.service')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/run/ceph/ceph-mds.*.asok')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/lib/ceph/mds/*')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/lib/ceph/bootstrap-mds/*')
    # purge mgr
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'systemctl stop ceph-mgr@*.service')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/run/ceph/ceph-mgr.*.asok')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/lib/ceph/mgr/*')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/lib/ceph/bootstrap-mgr/*')
    # purge mon
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'systemctl stop ceph-mon@*.service')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/run/ceph/ceph-mon.*.asok')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/lib/ceph/mon/*')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/lib/ceph/bootstrap-mon/*')
    # purge /etc/ceph
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /etc/ceph/*')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /run/cephadm/*')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /run/ceph/*')
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, 'rm -rf /var/log/ceph/*')
    if (fsid):
      run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, f'rm -rf /var/lib/ceph/{fsid}')

    # purge deploy-dir
    run_ssh_command(server.ip, DEPLOY_USERNAME, server.root_passwd, f'rm -rf {INIT_DIR}/*')
  logger.info('purge old cluster success')


def command_version(args):
  return VERSION


def command_deploy(args):
  logger.info('Starting deploy dsms-storage cluster...')
  nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF)

  for node in nodes.values():
    deploy_node(node)

  logger.info('deploy dsms-storage cluster success')


def command_init(args):
  logger.info("Initializing dsms-storage cluster...")
  nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF)
  check_time_difference(nodes)
  configure_cluster(nodes)
  leader = nodes.get(LEADER_NODE)
  hostname = ' '.join([node.hostname for node in nodes.values()])
  update_prometheus_service(leader)
  # first purge exist cluster.
  purge_cluster(nodes)
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'mkdir -p {INIT_DIR}')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd,
                  f'cd {INIT_DIR};ceph-deploy new {hostname} --cluster-network={leader.cluster_network} --public-network={leader.public_network}')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy --overwrite-conf config push {hostname}')
  logger.info('Creating mon...')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mon create-initial')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy admin {hostname}')
  logger.info('waiting for mon to start...')
  for node in nodes.values():
    run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'systemctl enable ceph-mon.target')
  is_available('mon', leader.ip, leader.root_passwd, is_mon_available)

  logger.info('Creating mgr...')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mgr create {leader.hostname}')
  logger.info('waiting for mgr to start...')
  for node in nodes.values():
    run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'systemctl enable ceph-mgr.target')
  is_available('mgr', leader.ip, leader.root_passwd, is_mgr_available)

  logger.info('Creating mds...')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mds create {hostname}')
  logger.info('waiting for mds to start...')
  for node in nodes.values():
    run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'systemctl enable ceph-mds.target')
  is_available('mds', leader.ip, leader.root_passwd, is_mds_available)

  # Config cluster
  logger.info('mon, mgr, mds service is started.')
  logger.info('Starting config cluster...')
  logger.info('enabling cephadm module...')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph mgr module enable cephadm')
  wait_for_mgr_restart(leader.ip, leader.root_passwd)
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph orch set backend cephadm')

  logger.info('enabling prometheus module...')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph config set mgr mgr/prometheus/scrape_interval 10')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph mgr module enable prometheus')
  wait_for_mgr_restart(leader.ip, leader.root_passwd)

  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph cephadm generate-key')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph cephadm get-pub-key > ~/ceph.pub')

  for node in nodes.values():
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ssh-copy-id -f -i ~/ceph.pub root@{node.hostname}')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd,
                    f'scp /home/my-cluster/ceph.bootstrap-osd.keyring root@{node.hostname}:/var/lib/ceph/bootstrap-osd/ceph.keyring')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph orch host add {node.hostname}')

  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph orch apply prometheus {leader.hostname}')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph orch apply node-exporter')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph restful create-self-signed-cert')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph restful create-key admin')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph config set mon mon_allow_pool_delete true')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph config set global rbd_default_features 1')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph config set global osd_crush_update_on_start false')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph config set global mon_warn_on_pool_no_app false')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph config set mgr mgr/devicehealth/enable_monitoring false')
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph restful restart')
  time.sleep(5)  # give some room to start

  command_info('')
  logger.info('init dsms-storage cluster success')


def command_info(args):
  logger.info("Getting dsms-storage cluster information")

  nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF)
  leader = nodes.get(LEADER_NODE)
  mgr_services = run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph mgr services')
  auth_key = run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph restful list-keys')
  parsed_data = json.loads(mgr_services)
  mgr_service = parsed_data["restful"]
  parsed_data = json.loads(auth_key)
  admin_value = parsed_data["admin"]

  logger.info(
    f"Get dsms-storage cluster information finish \n\tCluster server address: {mgr_service}\n\tCluster server admin key: {admin_value}")


def command_add_node(args):
  logger.info("Adding node to dsms-storage...")
  cluster_nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF)
  add_nodes = read_add_conf(DEPLOY_PACKAGE + '/' + ADD_NODE_CONF)

  for node in add_nodes.values():
    deploy_node(node)
  all_nodes = cluster_nodes.copy()
  all_nodes.update(add_nodes)
  check_time_difference(all_nodes)
  configure_cluster(all_nodes)
  leader = cluster_nodes.get(LEADER_NODE)
  for add_node in add_nodes.values():
    logger.info(f"adding {add_node.hostname} to dsms-storage")
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd,
                    f'cd {INIT_DIR};ceph-deploy --overwrite-conf config push {add_node.hostname}')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy admin {add_node.hostname}')
    if add_node.mon:
      run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mon create {add_node.hostname}')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mds create {add_node.hostname}')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ssh-copy-id -f -i ~/ceph.pub root@{add_node.hostname}')

    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd,
                    f'scp /home/my-cluster/ceph.bootstrap-osd.keyring root@{add_node.hostname}:/var/lib/ceph/bootstrap-osd/ceph.keyring')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph orch host add {add_node.hostname}')
    logger.info(f"add {add_node.hostname} to dsms-storage complete")
  logger.info("Added node to dsms-storage success")


def command_remove_node(args):
  logger.info("Removing node from dsms-storage...")
  cluster_nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF)
  leader = cluster_nodes.get(LEADER_NODE)
  for node in args.host:
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph-deploy mon destroy {node}')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph-deploy purge {node}')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph-deploy purgedata {node}')
  logger.info("Removed node from dsms-storage success")


def command_rm_cluster(args):
  # Removing the dsms-storage cluster and uninstall package
  logger.info('Removing the dsms-storage cluster...')
  cluster_nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF)
  leader = cluster_nodes.get(LEADER_NODE)

  for node in cluster_nodes.values():
    logger.info(f'Starting remove node: {node.hostname}')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph-deploy purge {node.hostname}')
    run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph-deploy purgedata {node.hostname}')
  for node in cluster_nodes.values():
    uninstall_node(node)
  run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'rm -rf {INIT_DIR}/*')
  logger.info('Removed the dsms-storage cluster success')


def _get_parser():
  # type: () -> argparse.ArgumentParser
  parser = argparse.ArgumentParser(
    description='Bootstrap dsms storage.',
    formatter_class=argparse.ArgumentDefaultsHelpFormatter)

  subparsers = parser.add_subparsers(help='sub-command')

  parser_version = subparsers.add_parser('version', help='Show dsms-deploy version')
  parser_version.set_defaults(func=command_version)

  parser_deploy = subparsers.add_parser('deploy', help='Deploy dsms-storage package')
  parser_deploy.set_defaults(func=command_deploy)

  parser_init = subparsers.add_parser('init', help='Init dsms-storage cluster')
  parser_init.set_defaults(func=command_init)

  parser_add_node = subparsers.add_parser('addNode', help='Add a node to an existing cluster')
  parser_add_node.set_defaults(func=command_add_node)

  parser_remove_node = subparsers.add_parser('removeNode', help='Remove a node for an existing cluster')
  parser_remove_node.set_defaults(func=command_remove_node)
  parser_remove_node.add_argument(
    'host',
    metavar='HOST',
    nargs='+',
    help='hosts to purge Ceph data from',
  )

  parser_rm_cluster = subparsers.add_parser('rmCluster', help='Remove all daemons for this cluster')
  parser_rm_cluster.set_defaults(func=command_rm_cluster)

  parser_info = subparsers.add_parser('info', help='Get cluster info')
  parser_info.set_defaults(func=command_info)

  return parser


def _parse_args(av):
  parser = _get_parser()
  args = parser.parse_args(av)

  return args


if __name__ == '__main__':

  # get args
  try:
    av = injected_argv  # type: ignore
  except NameError:
    av = sys.argv[1:]

  if not av:
    sys.stderr.write('No command specified; pass -h or --help for usage\n')
    sys.exit(1)

  dsms_require_root()

  # init log
  if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)
  dictConfig(logging_config)

  logger = logging.getLogger()

  logger.info("dsms-deploy params %s" % av)
  # parse args
  args = _parse_args(av)

  try:
    r = args.func(args)
  except DsmsDeployException as e:
    logging.error(e)
    sys.exit(1)
  if not r:
    r = 0
  sys.exit(r)
