#!/usr/bin/python
# __CR__
# Copyright (c) 2008-2013 EMC Corporation
# All Rights Reserved
#
# This software contains the intellectual property of EMC Corporation
# or is licensed to EMC Corporation from third parties.  Use of this
# software and the intellectual property contained therein is expressly
# limited to the terms and conditions of the License Agreement under which
# it is provided by or on behalf of EMC.
# __CR__

"""
 Author: Sunil Yadawad

 Since we are disabling BDB auto-init in order to improve the MDS safety.
 This impacts the MDS disk replacement procedure as we cannot rely on the
 BDB automated initialization + replication to sync up the new MDS databases
 on the replaced disk. MDS Sync script uses rsync internally and identifies
 the correct MDS set and host+port for the rsync target on the replaced MDS
 disk and the host+port for the running master for the sync source. 
 NOTE: The script can also take some additional options to sync up individual
 port
"""

import sys
import os
import errno
import time
import random
import datetime
import subprocess
import commands
from xml.dom import DOMException
import xml.dom.minidom as xml
import tempfile
import socket
import common
import service
import mds_service
import mauiutility
import pdb
import string
import logging
import glob
import shutil
import threading
import Queue
import signal
from optparse import OptionParser


LOGFILE = "/var/log/maui/mdssync.log"
log = common.get_file_logger(LOGFILE, 'mdssync')
log = common.get_console_logger('mdssync')

VERSION = "5.0.0"
RSYNC_BANDWIDTH = '--bwlimit=5120'
NOSTART      = False
IGNORE_SPACE = False
FORCE_RUN    = False
REUSE_SPLIT  = False

HOSTS_FILE                 = "/etc/hosts"
MGMT_HOST_SUFFIX           = "-mgmt"
DISK_SPACE_THRESHOLD_RATIO_LOCAL = 1.5
DISK_SPACE_THRESHOLD_RATIO_SRC   = 0.2
# reserve extra 5GB disk space
EXTRA_DISK_SPACE           = 5*1024*1024*1024
CHKPOINT_SLEEP_SEC         = 120
CHKPOINT_NUM               = 2

# only apply split-restore to files larger than 20GB
SPLIT_FILE_SIZE_THRESHOLD  = 20*1024*1024*1024

SPLIT_RESTORE_FILE_UTIL    = os.path.abspath(__file__)
SPLIT_DIR_SUFFIX           = ".split"
SRC_TMP_ENV_PREFIX         = "mdssync.src."
LOCAL_TMP_ENV_PREFIX       = "mdssync.local."
CHKPOINT_DONE_FLAG         = 'chkpoint.done'
LOG_FILE_LIST              = 'mdssync.log.list'
BUFF_LOG_NUM               = 5


INITED_RETRY_TIMES         = 60
INITED_RETRY_INTERVAL      = 10
RSYNC_MAX_RETRY_COUNT      = 10

CHKPOINT_CHK_INTERVAL      = 10

SSH_RETRY_COUNT            = 5
SSH_TIMEOUT                = 300

KEEP_SPLIT                 = False

DEBUG_MODE                 = False

HEARTBEAT_ID_SPLIT    = 'split'
HEARTBEAT_ID_CHKPOINT = 'checkpoint'

SOURCE_UTIL_OP_SPLIT    = 'split'
SOURCE_UTIL_OP_CHKPOINT = 'chkpoint'

class Watcher:
    """this class solves two problems with multithreaded 
    programs in Python, (1) a signal might be delivered 
    to any thread (which is just a malfeature) and (2) if 
    the thread that gets the signal is waiting, the signal 
    is ignored (which is a bug). 
    """  
  
    def __init__(self):  
        """ Creates a child process, which returns.  The parent 
            process waits for a KeyboardInterrupt and then kills 
            the child process. 
        """  
        self.child = os.fork()  
        if self.child == 0:  
            return  
        else:  
            self.watch()  
  
    def watch(self):  
        try:  
            os.wait()  
        except KeyboardInterrupt:
            self.kill()  
        sys.exit()  
  
    def kill(self):  
        try:  
            os.kill(self.child, signal.SIGKILL)  
        except OSError: pass


# SingleInstance uses a global logger, create an alias for log
logger = log
# copied from tendo library http://pypi.python.org/pypi/tendo
# LICENSE: PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# https://github.com/pycontribs/tendo/blob/master/tendo/singleton.py
class SingleInstance:
    """
    If you want to prevent your script from running in parallel just instantiate SingleInstance() class. If is there another instance already running it will exist the application with the message "Another instance is already running, quitting.", returning -1 error code.

    >>> import tendo
    ... me = SingleInstance()

    This option is very useful if you have scripts executed by crontab at small amounts of time.

    Remember that this works by creating a lock file with a filename based on the full path to the script file.
    """

    def __init__(self, flavor_id=""):
        import sys
        self.initialized = False
        basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace("/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
        # os.path.splitext(os.path.abspath(sys.modules['__main__'].__file__))[0].replace("/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
        self.lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename)

        logger.debug("SingleInstance lockfile: " + self.lockfile)
        if sys.platform == 'win32':
            try:
                # file already exists, we try to remove (in case previous execution was interrupted)
                if os.path.exists(self.lockfile):
                    os.unlink(self.lockfile)
                self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
            except OSError:
                _, e, _ = sys.exc_info()
                if e.errno == 13:
                    logger.error("Another instance is already running, quitting.")
                    # sys.exit(-1)
                    sys.exit(errno.EALREADY)
                print(e.errno)
                raise
        else:  # non Windows
            import fcntl
            self.fp = open(self.lockfile, 'w')
            try:
                fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError:
                logger.warning("Another instance is already running, quitting.")
                # sys.exit(-1)
                sys.exit(errno.EALREADY)
        self.initialized = True

    def __del__(self):
        import sys
        if not self.initialized:
            return
        try:
            if sys.platform == 'win32':
                if hasattr(self, 'fd'):
                    os.close(self.fd)
                    os.unlink(self.lockfile)
            else:
                import fcntl
                fcntl.lockf(self.fp, fcntl.LOCK_UN)
                #os.close(self.fp)
                if os.path.isfile(self.lockfile):
                    os.unlink(self.lockfile)
        except Exception, e:
            logger.warning(e)
            sys.exit(-1)


class Properties(common.Properties):
    """Parse xml from string instead of mds_cfg.xml file in common.Properties"""

    def __init__(self, prop_str):
        """
        Create properties from string

        @type prop_str: str
        @param prop_str: The properties xml string
        """
        try:
            self.props = xml.parseString(prop_str)
        except IOError:
            raise
        except DOMException:
            raise


class SplitMerge(object):
    """
    Utility to:
        1. Split a large file into small blocks.
        2. Merge split blocks into one file.
    """

    # default block is 16k
    BLOCK_SIZE  = 16384
    
    # default chunk size is 10G
    KILO_BYTE   = 1024
    MEGA_BYTE   = 1048576
    GIGA_BYTE   = 1073741824
    CHUNK_UNIT  = MEGA_BYTE
    CHUNK_SIZE  = 100 * CHUNK_UNIT
    #CHUNK_UNIT = MEGA_BYTE
    #CHUNK_SIZE = 15 * CHUNK_UNIT
    
    BLOCK_BASE_NAME = 'block'
    BLOCK_TMP_NAME  = '.blocktmp'
    DONE_FLAG_FILE  = '.done'
    
    RESULT_OK                          = 0
    RESULT_SPLIT_OUTPUT_DIR_INCOMPLETE = 1
    RESULT_SPLIT_OUTPUT_DIR_WRONG_SRC  = 2
    RESULT_SPLIT_OUTPUT_DIR_EXPIRED    = 3
    RESULT_SPLIT_SRC_FILE_NOT_EXIST    = 4
    RESULT_SPLIT_FAILED_CREATE_DIR     = 5
    RESULT_SPLIT_FAILED_SPLIT          = 6
    RESULT_RESTORE_SPLIT_DIR_NOT_EXIST = 7
    RESULT_RESTORE_FAILED_RESTORE      = 8
    RESULT_RESTORE_INVALID_SRC_BLOCK   = 9

    def __init__(self, src_file, split_dir):
        super(SplitMerge, self).__init__()
        self.__src_file = src_file
        self.__split_dir = split_dir

    def __verify_output(self):
        """
        verify the output directory to check whether the splited file blocks could be reused or not
        """
        log.info('[SplitMerge] Validate existing split directory')
        split_done_file = os.path.join(self.__split_dir, self.DONE_FLAG_FILE)
        # if it's not a fully splitted output, maybe killed half way, then it's invalid
        if not os.path.isfile(split_done_file):
            log.error('[SplitMerge] <NEED MANUAL CHECK> Output directory %s already exists. It may be an incomplete copy of last split.' \
                % self.__split_dir)
            return self.RESULT_SPLIT_OUTPUT_DIR_INCOMPLETE

        # check if it's for different file splitting
        split_done = open(split_done_file, 'r')
        split_flag_file_txt = split_done.readline()
        [split_filename, split_timestamp] = split_flag_file_txt.split(':', 1)
        split_done.close()
        if split_filename != self.__src_file:
            log.error('[SplitMerge] <NEED MANUAL CHECK> Output directory %s already exists. It is split for a different source file %s.' \
                % (self.__split_dir, split_filename))
            return self.RESULT_SPLIT_OUTPUT_DIR_WRONG_SRC

        # if file was splitted more than 12 hours ago, we consider it's invalid
        # FIXME: skip expiration check. always re-use the complete split env
        # split_time = datetime.datetime.fromtimestamp(float(split_timestamp))
        # current_time = datetime.datetime.now()
        # time_delta = current_time - split_time
        # time_delta_hour = time_delta.days * 24 + time_delta.seconds / 3600
        # if time_delta_hour > self.SPLIT_EXPIRATION_HOUR:
        #     log.error('[SplitMerge] <NEED MANUAL CHECK> Output directory %s already exists. It has expired.' \
        #         % self.__split_dir)
        #     return self.RESULT_SPLIT_OUTPUT_DIR_EXPIRED

        log.info('[SplitMerge] Existing split directory is valid')
        return self.RESULT_OK

    def __do_split(self):
        log.info('[SplitMerge] Start a new split process')
        log.info('[SplitMerge] Split %s into %s' % (self.__src_file, self.__split_dir))
        offset = 0
        is_last_chunk = False
        block = 0
        tmp_block = os.path.join(self.__split_dir, self.BLOCK_TMP_NAME)
        while not is_last_chunk:
            src_file_size = os.path.getsize(self.__src_file)
            if offset > src_file_size:
                log.info('[SplitMerge] Split done! File: %s' % self.__src_file)
                break

            left = src_file_size - offset
            skip = int(offset / self.BLOCK_SIZE)
            log.info('[SplitMerge] Split block #%s. File: %s' % (block, self.__src_file))

            # cut and compress a chunk
            # split imcrement at self.BLOCK_SIZE
            # split the chunk to a tmp block
            if left > self.CHUNK_SIZE:
                count = int(self.CHUNK_SIZE / self.BLOCK_SIZE)
                cmd = 'dd if=%s bs=%s skip=%s count=%s | gzip > %s' \
                    % (self.__src_file, self.BLOCK_SIZE, skip, count, tmp_block)
                # set the new offset
                offset += self.CHUNK_SIZE
            else:
                # remaining data is smaller than the self.CHUNK_SIZE
                # it is the last chunk
                cmd = 'dd if=%s bs=%s skip=%s | gzip > %s' \
                    % (self.__src_file, self.BLOCK_SIZE, skip, tmp_block)

                # doesn't set the new offset, since the file may grow at the same time
                # is_last_chunk flag will just skip the next round dd
                is_last_chunk = True

            log.debug('[SplitMerge] <RUN> %s' % cmd)
            status, _ = commands.getstatusoutput(cmd)
            if status != 0:
                log.error('[SplitMerge] Failed to split file %s' % self.__src_file)
                return self.RESULT_SPLIT_FAILED_SPLIT

            # then rename the tmp block
            dest_block = os.path.join(self.__split_dir, '%s-%s.gz' % (self.BLOCK_BASE_NAME, block))
            shutil.move(tmp_block, dest_block)    

            block += 1

        # Make a mark file to indicate the splitting is done
        utc_seconds = int(time.mktime(datetime.datetime.now().timetuple()))
        split_done_txt = '%s:%s' % (self.__src_file, utc_seconds)
        split_done_file = os.path.join(self.__split_dir, self.DONE_FLAG_FILE)
        split_done = open(split_done_file, 'w')
        split_done.write(split_done_txt)
        split_done.close()

        return self.RESULT_OK

    def split(self):
        """
        Split self.__src_file -> self.__split_dir/BLOCKS
        """

        log.info('[SplitMerge] Split file %s into directory %s' % (self.__src_file, self.__split_dir))
        
        # check if src file exists
        if not os.path.isfile(self.__src_file):
            log.error('[SplitMerge] Source file %s does not exist!' % self.__src_file)
            return self.RESULT_SPLIT_SRC_FILE_NOT_EXIST

        # check if dest dir exists
        if os.path.isdir(self.__split_dir):
            verify_result = self.__verify_output()
            if verify_result == self.RESULT_OK:
                return self.RESULT_OK
            else:
                # there is some problem with the existing dest split dir
                # back up the existing dir
                current_time = datetime.datetime.now()
                utc_seconds = int(time.mktime(current_time.timetuple()))
                backup_directory = '%s.%s' % (self.__split_dir, utc_seconds)
                log.warning('[SplitMerge] <NEED MANUAL CHECK> Move existing output directory %s to %s.' % \
                    (self.__split_dir, backup_directory))
                shutil.move(self.__split_dir, backup_directory)

        # create the dest directory
        try:
            os.mkdir(self.__split_dir)
        except Exception, e:
            log.error(e)
            log.error('[SplitMerge] Failed to create output directory %s' % self.__split_dir)
            return self.RESULT_SPLIT_FAILED_CREATE_DIR

        # do the file splitting
        result = self.__do_split()
        return result

    @classmethod
    def restore_block(cls, src_block, dest_file):
        """
        Restore a single to the destination file
        """

        result = cls.RESULT_OK

        if not os.path.isfile(src_block):
            log.error('[SplitMerge] Block %s does not exist' % src_block)
            result = cls.RESULT_RESTORE_INVALID_SRC_BLOCK
        else:

            log.info('[SplitMerge] Restore block %s to file %s' % (src_block, dest_file))

            try:
                # block-<block_num>.gz
                block_num_str = src_block.rsplit('-', 1)[-1].split('.', 1)[0]
                block_num = int(block_num_str)
                offset = block_num * cls.CHUNK_SIZE

                seek = int(offset / cls.BLOCK_SIZE)
                cmd = 'gunzip -c %s | dd bs=%s seek=%s of=%s' \
                    % (src_block, cls.BLOCK_SIZE, seek, dest_file)
                log.debug('[SplitMerge] <RUN> %s' % cmd)
                status, _ = commands.getstatusoutput(cmd)
                if status != 0:
                    log.error('[SplitMerge] Failed to restore block %s to file %s' % (src_block, dest_file))
                    result = cls.RESULT_RESTORE_FAILED_RESTORE
                else:
                    result = cls.RESULT_OK
            except Exception:
                result = cls.RESULT_RESTORE_INVALID_SRC_BLOCK
        return result


class Heartbeat(object):
    """
    Heartbeat class solve 2 problems:
        1. There might be multiple mdssync instances syncing from the same source mds.
           The split process on the source side split each db file only once for all mdssync clients
           and clear the temp split env when all mdssync clients are done successfully.
           All mdssync clients maintain a heartbeat in order to indicate the progress.
           The last mdssync client will find there is no heartbeat from other mdssync clients
           and clear the temp split env
        2. The spliting for a single block may take long time in systems with heavy load.
           The split process on the source side maintain a heartbeat to notify all mdssync clients
           that the split is in progress.
    """

    HEARTBEAT_FILE_PREFIX     = '.heartbeat'
    # if there is no heartbeat in (2 * __HEARTBEAT_INTERVAL) time
    # it is reasonable to determine that this instance is dead
    __HEARTBEAT_INTERVAL      = 60
    __CHECK_INSTANCE_INTERVAL = 10
    __CHECK_INSTANCE_RETRY    = int(2 * __HEARTBEAT_INTERVAL / __CHECK_INSTANCE_INTERVAL)
    __CHECK_ALIVE_RETRY       = 8
    __CHECK_ALIVE_INTERVAL    = 10
    __BEAT_RETRY_COUNT        = 5
    __BEAT_RETRY_INTERVAL     = 2

    def __init__(self, host, directory, instance_id = None):
        """
        The heartbeat flag file will be:
            <host>:<directory>/.heartbeat.<instance_id>
        """
        super(Heartbeat, self).__init__()
        self.__host = host
        if instance_id == None:
            instance_id = socket.gethostname()
        self.__instance_id = instance_id
        self.__heartbeat_file_dir = directory
        self.__heartbeat_file = os.path.join(self.__heartbeat_file_dir, 
            '%s.%s' % (self.HEARTBEAT_FILE_PREFIX, self.__instance_id))
        self.__heartbeat_thread = threading.Thread(target = self.__heartbeat_worker)
        self.__heartbeat_thread.daemon = True
        self.__stop_event = threading.Event()

    def start(self):
        log.info('[Heartbeat] Start heartbeat: %s:%s' % (self.__host, self.__heartbeat_file))
        self.__heartbeat_thread.start()

    def __heartbeat_worker(self):
        # touch a flag file on target host
        cmd = 'touch %s' % self.__heartbeat_file
        while not self.__stop_event.isSet():
            for retry in xrange(self.__BEAT_RETRY_COUNT):
                log.debug('[Heartbeat] <HEARTBEAT>: try #%s %s:%s' % (retry, self.__host, self.__heartbeat_file))
                status, _ = common.ssh(self.__host, cmd, timeout=SSH_TIMEOUT)
                if status == 0:
                    break
                else:
                    log.warn('[Heartbeat] <FAILED>: try #%s %s:%s' % (retry, self.__host, self.__heartbeat_file))
                    time.sleep(self.__BEAT_RETRY_INTERVAL)
            self.__stop_event.wait(self.__HEARTBEAT_INTERVAL)

    def stop(self):
        log.info('[Heartbeat] Stop heartbeat: %s:%s' % (self.__host, self.__heartbeat_file))
        self.__stop_event.set()
        self.__heartbeat_thread.join()
        cmd = 'rm -f %s' % self.__heartbeat_file
        common.ssh(self.__host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)

    def is_alive(self):
        """
        check if the heartbeat is alive
        return: True  - alive
                False - heartbeat file exist but expired
                None  - heartbeat file does not exist
        """
        result = None
        # current time - last modification time
        cmd = 'echo \\$((\\$(date +%%s)-\\$(stat %s -c %%Y)))' % self.__heartbeat_file
        try:
            for retry in xrange(self.__CHECK_ALIVE_RETRY):
                status, output = common.ssh(self.__host, cmd, timeout=SSH_TIMEOUT)

                # heartbeat file exists
                if status == 0:
                    try:
                        last_heartbeat_second = int(output)
                        log.debug('[Heartbeat] Last heartbeat %s sec ago. host: %s, id: %s' \
                            % (last_heartbeat_second, self.__host, self.__instance_id))
                        # set the expire time threshold to 3 * __HEARTBEAT_INTERVAL
                        # to tolerate 1 missing heartbeat
                        if last_heartbeat_second <= 3 * self.__HEARTBEAT_INTERVAL:
                            result = True
                        else:
                            # heartbeat file is expired
                            result = False
                        break
                    except Exception, e:
                        log.error('[Heartbeat] %s' % e)
                        result = None

                # heartbeat file does not exist
                # the heartbeat is stoped (or has not started yet)
                elif 'No such file or directory' in output:
                    result = None
                    log.debug('[Heartbeat] Heartbeat stopped or not started. try #%s, host: %s, id: %s' \
                        % (retry, self.__host, self.__instance_id))
                    # continue to retry to make sure it has stopped, instead of 'not started yet'

                # there is error, etc. ssh timeout
                else:
                    result = None
                    log.error('[Heartbeat] Check alive try #%s failed %s:%s' \
                        % (retry, self.__host, self.__heartbeat_file))
                    log.error('[Heartbeat] Check alive output %s' % (output))

                time.sleep(self.__CHECK_ALIVE_INTERVAL)
        except Exception, e:
            log.error(e)
            result = False
        return result

    def is_any_other_living_instance(self):
        """
        Check if there is any other running MDSSync instances for the same peer host:port
        """
        is_running_instance = False
        existing_instances_str = None
        cmd = 'ls -l %s/%s.*' % (self.__heartbeat_file_dir, self.HEARTBEAT_FILE_PREFIX)
        for _ in xrange(self.__CHECK_INSTANCE_RETRY):
            status, output = common.ssh(self.__host, cmd, timeout=SSH_TIMEOUT)
            if status != 0:
                if 'No such file or directory' in output:
                    # no other heartbeat flag file
                    break
                else:
                    log.error('[Heartbeat] ' + output)
            else:
                if existing_instances_str != None and output != existing_instances_str:
                    # some heartbeat flag file is refreshed
                    # there is some other running instance
                    is_running_instance = True
                    break
                existing_instances_str = output
            time.sleep(self.__CHECK_INSTANCE_INTERVAL)

        return is_running_instance


class LargeFileTransfer(object):
    """
    Manage the split, transfer and restore of all large db files, in async way.
    """

    __SPLIT_BLOCK_TMP      = ".blocktmp"
    __SPLIT_FLAG_DONE      = ".done"
    
    __SPLIT_STATUS_INIT    = 0
    __SPLIT_STATUS_UPDATED = 1
    __SPLIT_STATUS_DONE    = 2
    
    __SPLIT_CHK_INTERVAL   = 10
    
    __SPLIT_THREAD_COUNT   = 2
    __RESTORE_THREAD_COUNT = 3


    def __init__(self, file_list, src_host, src_path, is_same_segment):
        super(LargeFileTransfer, self).__init__()
        self.__SPLIT_Q   = Queue.Queue()
        self.__RESTORE_Q = Queue.Queue()

        self.__SPLIT_THREADS    = []
        self.__RESTORE_THREADS  = []

        self.__file_list = file_list
        self.__src_host = src_host
        self.__src_path = src_path
        self.__is_same_segment = is_same_segment
        
        self.ASYNC_SPLIT_RESTORE_RESULT = True

        self.__init__threads()

    def __init__threads(self):
        for i in xrange(self.__SPLIT_THREAD_COUNT):
            t = threading.Thread(target = self.__split_transfer_file_worker)
            t.daemon = True
            t.start()
            self.__SPLIT_THREADS.append(t)
        for i in xrange(self.__RESTORE_THREAD_COUNT):
            t = threading.Thread(target = self.__restore_block_worker)
            t.daemon = True
            t.start()
            self.__RESTORE_THREADS.append(t)

    def start(self):
        ret_code = True
        try:
            log.info("[LargeFileTransfer] Syncing large db files")
            for db_file in self.__file_list:
                src_file = os.path.join(self.__src_path, os.path.basename(db_file))
                log.info("[LargeFileTransfer] Start async transfer %s:%s to %s" %(self.__src_host, src_file, db_file))
                self.__add_new_split_restore_task(src_file, db_file)
        except Exception, e:
            log.error("[LargeFileTransfer] Exception when starting syncing large db files")
            log.error(e)
            ret_code = False
        self.__all_tasks_added()
        return ret_code

    def __add_new_split_restore_task(self, src_file, dest_file):
        dest_tmp_dir = os.path.basename(src_file) + SPLIT_DIR_SUFFIX
        src_tmp_dir = os.path.join(get_src_tmp_env(self.__src_path), dest_tmp_dir)

        self.__SPLIT_Q.put((src_file, src_tmp_dir, dest_tmp_dir, dest_file))

    def __split_transfer_file_worker(self):
        while True:
            (src_file, src_tmp_dir, dest_tmp_dir, dest_file) = self.__SPLIT_Q.get(True)
            if src_file == None or self.ASYNC_SPLIT_RESTORE_RESULT == False:
                self.__RESTORE_Q.put((None, None))
                break

            try:
                self.__start_split_file(src_file, src_tmp_dir)
                self.__transfer_split_file(src_tmp_dir, dest_tmp_dir, dest_file)
                # self.__RESTORE_Q.put((dest_tmp_dir, dest_file))
            except Exception, e:
                log.error(e)
                self.ASYNC_SPLIT_RESTORE_RESULT = False
                self.__RESTORE_Q.put((None, None))
                break
            
    def __start_split_file(self, src_file, src_tmp_dir):
        if DEBUG_MODE:
            verbose = '-v'
        else:
            verbose = ''
        cmd = "python %s -o %s --src_file %s --dest_dir %s %s > /dev/null 2>&1 &" % \
            (SPLIT_RESTORE_FILE_UTIL, SOURCE_UTIL_OP_SPLIT, src_file, src_tmp_dir, verbose)
        log.info("[LargeFileTransfer] Run: %s" % cmd)
        status, _ = common.ssh(self.__src_host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
        # errno.EALREADY means there is already an instance running splitting the same src file
        if status != 0 and status != errno.EALREADY:
            raise Exception("Start split file %s:%s failed." % (self.__src_host, src_file))

    def __transfer_split_file(self, src_tmp_dir, dest_tmp_dir, dest_file):
        split_block_list = []
        status = self.__SPLIT_STATUS_INIT

        split_heartbeat = Heartbeat(self.__src_host, src_tmp_dir, instance_id = HEARTBEAT_ID_SPLIT)

        while status != self.__SPLIT_STATUS_DONE:
            status, split_block_list, new_block_list = self.__wait_for_split_update(src_tmp_dir, split_block_list, split_heartbeat)
            self.__transfer_split_blocks(src_tmp_dir, dest_tmp_dir, not self.__is_same_segment)
            for block in new_block_list:
                src_block = os.path.join(dest_tmp_dir, block)
                self.__RESTORE_Q.put((src_block, dest_file))
            
        log.info("[LargeFileTransfer] %s tranfered successfully." % dest_tmp_dir)

    def __wait_for_split_update(self, src_tmp_dir, split_block_list, split_heartbeat):
        """
          Check if split process has generated new block

          return: self.__SPLIT_STATUS_DONE if split is done
                  self.__SPLIT_STATUS_UPDATED is new block is generated but entire process is not done
                  raise exception if connection error or split time out
        """
        log.info("[LargeFileTransfer] Wait for split at %s:%s" % (self.__src_host, src_tmp_dir))
        status = self.__SPLIT_STATUS_INIT
        while True:
            time.sleep(self.__SPLIT_CHK_INTERVAL)
            split_alive = split_heartbeat.is_alive()
            cur_split_block_list = self.__get_split_block_list(src_tmp_dir)
            # split heart beat is expired or stopped
            # but split has not completed
            if (not split_alive) and (self.__SPLIT_FLAG_DONE not in cur_split_block_list):
                break
            # no new split block
            if split_block_list == cur_split_block_list:
                continue
            # there is new split block
            else:
                # split complete
                if self.__SPLIT_FLAG_DONE in cur_split_block_list:
                    log.info("[LargeFileTransfer] Split done at %s:%s" % (self.__src_host, src_tmp_dir))
                    cur_split_block_list.remove(self.__SPLIT_FLAG_DONE)
                    status = self.__SPLIT_STATUS_DONE
                # split update, but not complete
                else:
                    log.info("[LargeFileTransfer] Split updated at %s:%s" % (self.__src_host, src_tmp_dir))
                    status = self.__SPLIT_STATUS_UPDATED

                new_block_list = filter(lambda block: block not in split_block_list, cur_split_block_list)
                return status, cur_split_block_list, new_block_list

        # no new heartbeat is detected after certain time
        # split timeout
        raise Exception("Split time out for %s:%s" % (self.__src_host, src_tmp_dir))

    def __get_split_block_list(self, src_tmp_dir):
        log.info("[LargeFileTransfer] Get split block list from %s:%s" % (self.__src_host, src_tmp_dir))
        cmd = "/bin/ls -1Atr %s -I %s -I %s*" % (src_tmp_dir, self.__SPLIT_BLOCK_TMP, Heartbeat.HEARTBEAT_FILE_PREFIX)
        log.info("[LargeFileTransfer] Run: %s" % cmd)
        status, output = common.ssh(self.__src_host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
        if status != 0:
            raise Exception("Cannot get split status from %s:%s" % (self.__src_host, src_tmp_dir))
        else:
            block_list = output.split("\n")
            block_list = filter(None, block_list)
            for i in xrange(len(block_list)):
                block_list[i] = os.path.basename(block_list[i])
            return block_list

    def __transfer_split_blocks(self, src_tmp_dir, split_dir, bw_limit_flag):
        """
          rsync the split file blocks to current dir, retry if failed.
        """
        path = "%s:%s/*" % (self.__src_host, src_tmp_dir)
        command = ['rsync', '-avc', '--progress', '-e', 'ssh -o StrictHostKeyChecking=no', 
            '--exclude=%s' % self.__SPLIT_BLOCK_TMP, '--exclude=%s*' % Heartbeat.HEARTBEAT_FILE_PREFIX, path, split_dir+'/']
        if bw_limit_flag:
            command.insert(1, RSYNC_BANDWIDTH)
        result = rsync_with_retry(command)
        if not result:
            raise Exception("Failed to transfer split blocks from %s:%s" % (self.__src_host, src_tmp_dir))

    def __restore_block_worker(self):
        while True:
            (src_block, dest_file) = self.__RESTORE_Q.get(True)
            if src_block == None or self.ASYNC_SPLIT_RESTORE_RESULT == False:
                break
            # add random time delay for testing purpose
            time.sleep(random.randint(1, 20))
            result = self.__restore_block(src_block, dest_file)
            if not result:
                log.error("[LargeFileTransfer] Restore file '%s' failed." % dest_file)
                self.ASYNC_SPLIT_RESTORE_RESULT = False
                break

    @staticmethod
    def __restore_block(src_block, dest_file):
        """
            Restore the dest file from the given src block
        """
        result = SplitMerge.restore_block(src_block, dest_file)
        if result == SplitMerge.RESULT_OK:
            log.info("[LargeFileTransfer] Restore block %s succeeded" % src_block)
            return True
        else:
            log.error("[LargeFileTransfer] Failed to restore block %s to %s" % (src_block, dest_file))
            return False


    def __all_tasks_added(self):
        for i in xrange(self.__SPLIT_THREAD_COUNT):
            self.__SPLIT_Q.put((None, None, None, None))

    def wait(self):
        for i in xrange(self.__SPLIT_THREAD_COUNT):
            self.__SPLIT_Q.put((None, None, None, None))
        for t in self.__SPLIT_THREADS:
            t.join()
        log.info('[LargeFileTransfer] All split and transfer tasks done')
        for i in xrange(self.__RESTORE_THREAD_COUNT):
            self.__RESTORE_Q.put((None, None))
        for t in self.__RESTORE_THREADS:
            t.join()
        log.info('[LargeFileTransfer] All restore tasks done')


def is_mds_stopped(port):

    """
    Is the MDS service stopped?
    """
    mds_svc_name = mds_service.get_mds_service_name_by_port(port)

    running = False
    # only sync if mds is stopped
    try:
        # The reason to check the running status is because when the mds is
        # disabled, the call is_svc_stopped() will have issue mis-reporting
        # service is running.
        running = service.is_svc_running(mds_svc_name)
    except Exception:
        running = False

    return not running

def is_mds_initialized(host, port):
    cmd = "mauisvcmgr -s mauimds -c mauimds_isNodeInitialized -m %s:%s | egrep -vi 'sending|running|true|^$'" \
        % (host, port)
    status, _ = commands.getstatusoutput(cmd)
    if status == 0:
        return False
    else:
        return True

def wait_mds_initialized(host, port):
    if NOSTART:
        return True
    log.info('[Status] Wait for MDS %s to be initialized' % port)
    for i in range(INITED_RETRY_TIMES):
        time.sleep(INITED_RETRY_INTERVAL)
        if is_mds_initialized(host, port):
            return True
    return False

def set_mds_status(port, action):

    """
    enable or disable the MDS
    """
    if not action in ["enable", "disable"]:
        raise Exception("Invalid action")

    if mds_service.is_remote_mds_port(port):
        service_name = 'mauiremoterep'
    elif mds_service.is_valid_mds_port(port):
        service_name = 'mauimds'
    else:
        raise Exception('Invalid port number: %s' % port)

    log.info("[Status] %s MDS: %d" % (action, port))

    cmd = "service %s %s %s" % (service_name, action, port)

    status, output = commands.getstatusoutput(cmd)
    if status != 0:
        msg = "Failed to %s mds %s\n" % (action, port)
        msg += cmd + "\n"
        msg +=  output + "\n"
        raise Exception(msg)
    else:
        return True

def validateUUID(UUID, target_path):

    """
    From target_path extract the UUID and validate the UUID
    Example of env root path is here :
    "/mauimds-db/mds-6c1082d0-1fbd-492a-b75c-c2e8ffd51e21/master"
    """

    path_elements = target_path.split('/')
    if len(path_elements) > 3:
        fsud = path_elements[2]
    else:
        log.error("[Status] could not get fsud from target path %s" % target_path)
        return False
    
    # Check the config file to make sure if the UUID that is passed is the
    # same as the extracted UUID value in bdbxmlEnvRoot
    # If they are different then go to the next port
    if  UUID != "" and UUID != string.lstrip(fsud, "mds-"):
        log.error("[Status] fs uuid not consist in mds config file, skip this mds")
        return False

    return True


def validate_and_create_env(target_path, tmp_target_path):
    """
      Check the env root existence, create the env root if necessary.
    """

    # If target directory not empty then abort. This is because we do not want to erase anything from the target.
    # Abort it and let the CE folks figure out what to do.
    if os.path.isdir(target_path) and len(os.listdir(target_path)) != 0:
        log.error("[TmpEnv] path %s not empty, skip this mds. Please back up the directory before perform mdssync" % (target_path))
        return False

    # We are now using a tmp local env for transferring files
    # and then remane it to the target env afterward
    if os.path.exists(tmp_target_path):
        if os.path.isdir(tmp_target_path):
            return True
        else:
            log.error("[TmpEnv] tmp env %s exists but is not a directory")
            return False

    # If the tmp directory does not exist we go ahead and create it
    try:
        log.info("[TmpEnv] tmp env %s does not exist, creating a new dir" % (tmp_target_path))
        os.mkdir(tmp_target_path)
    except OSError, msg:
        log.error("[TmpEnv] Error creating the tmp env %s. Error Message:%s"%(tmp_target_path, msg) )               
        return False

    return True

def check_same_segment(host):
    """
    Determine if the host is reachable via mgmt network interface to speed up file transport
    """

    mgmt_host = host + MGMT_HOST_SUFFIX
    cmd = "grep %s %s" % (mgmt_host, HOSTS_FILE)
    status, _ = commands.getstatusoutput(cmd)
    if status != 0:
        return False, host
    else:
        log.info("[Status] Use mgmt interface for transfer")
        return True, mgmt_host

def get_src_size(host, src_path):
    """
    Get the size of db env on peer node
    """
    # get output in bytes
    cmd = "/usr/bin/du -b -s %s" % src_path
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status != 0:
        raise Exception("Cannot get size for %s:%s" % (host, src_path))
    else:
        output = output.split()
        try:
            src_size = int(output[0])
            return src_size
        except Exception:
            raise Exception("Failed to parse size for %s:%s" % (host, src_path))

def get_src_tmp_env(src_path):
    """
    Get the path of temp env for mdssync on the peer node
    """
    src_tmp_dir = SRC_TMP_ENV_PREFIX + os.path.basename(src_path)
    src_tmp_dir = os.path.join(os.path.dirname(src_path), src_tmp_dir)
    return src_tmp_dir

def get_local_tmp_env(db_env):
    """
    Get the path of temp env for mdssync on the local host
    """
    local_tmp_dir = LOCAL_TMP_ENV_PREFIX + os.path.basename(db_env)
    local_tmp_dir = os.path.join(os.path.dirname(db_env), local_tmp_dir)
    return local_tmp_dir

def get_available_space(host, path):
    # get available disk space in byte
    cmd = "df -B1 %s" % path
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status != 0:
        raise Exception("Cannot get available space for %s:%s" % (host, path))
    else:
        output = output.split('\n')[-1].split()
        try:
            available_space = int(output[3])
            return available_space
        except Exception:
            raise Exception("Failed to parse available space for %s:%s" % (host, path))

def check_local_available_disk_space(host, src_path, tmp_target_path):
    """
    Check if there is enough space on the current disk for rsync and db_recover
    """

    try:
        src_size = get_src_size(host, src_path)
        available_space = get_available_space('localhost', tmp_target_path)
        log.info('[Status] Source size: %s bytes' % src_size)
        log.info('[Status] Local available space: %s bytes' % available_space)
    except Exception, e:
        log.error(e)
        return False

    if available_space > src_size * DISK_SPACE_THRESHOLD_RATIO_LOCAL:
        return True
    elif available_space > src_size + EXTRA_DISK_SPACE:
        log.warn("[Status] LOW DISK SPACE!")
        if IGNORE_SPACE:
            log.warn('-=================== WARNNING ===================-')
            log.warn('| You are running mdssync with --ignore-space option.')
            log.warn('| Please make sure there is enough disk space on localhost.')
            log.warn('--------------------------------------------------')
            return True
        else:
            log.warn("[Status] Use '--ignore-space' option to force mdssync run when disk space is low")
            return False
    else:
        log.error("[Status] Not enough local disk space for mds sync")
        return False

def check_src_available_disk_space(host, src_path):
    try:
        src_size = get_src_size(host, src_path)
        available_space = get_available_space(host, src_path)
        log.info('[Status] Source size: %s bytes' % src_size)
        log.info('[Status] Source available space: %s bytes' % available_space)
    except Exception, e:
        log.error(e)
        return False

    if available_space > src_size * DISK_SPACE_THRESHOLD_RATIO_SRC:
        return True
    elif IGNORE_SPACE:
        log.warn('-=================== WARNNING ===================-')
        log.warn('| You are running mdssync with --ignore-space option.')
        log.warn('| Please make sure there is enough disk space on')
        log.warn('| %s:%s' % (host, src_path))
        log.warn('--------------------------------------------------')
        return True
    else:
        log.warn("[Status] Use '--ignore-space' option to force mdssync run when src disk space is low")
        log.error("[Status] Not enough src disk space for mds sync on %s" % host)
        return False

def check_available_disk_space(host, src_path, tmp_target_path):
    return (check_local_available_disk_space(host, src_path, tmp_target_path)
        and check_src_available_disk_space(host, src_path))

def get_latest_log_file(host, env):
    """
    Get the latest BDB log file which has the largest log sequence
    """
    log.info('[Log] Get last BDB log file at %s:%s' % (host, env))
    cmd = 'find %s -maxdepth 1 -name log* | sort -r | head -n 1' % env
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status == 0:
        output = output.strip()
        log.info('[Log] Last log: %s:%s' % (host, output))
        return output
    else:
        log.error(output)
        raise Exception('Failed to get latest BDB log at %s:%s' % (host, env))

def get_checkpoint_log_file(host, env):
    """
    Get the latest log file when doing checkpoint at source side
    """
    log.info('[Log] Get checkpoint log file at %s:%s' %(host, env))
    checkpoint_done_flag = os.path.join(get_src_tmp_env(env), CHKPOINT_DONE_FLAG)
    cmd = 'cat %s' % checkpoint_done_flag
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status == 0:
        return output.strip()
    else:
        log.error(output)
        raise Exception('Failed to get checkpoint log file')

def get_src_mds_db_file_list(src_host, src_dir):
    """
      Get the MDS DB file list from the src MDS src_host node regarding with
      src_dir.

      Return: a list with the file name as each element on success.
              None on failure.
    """

    # ignore subdirectories, log files and __db* files
    # use 'du -b' to get size in bytes
    cmd = "find %s -maxdepth 1 -not -type d -and -not -path '*__db*' -and -not -path '*log*' | xargs du -b" % (src_dir)
    status, output = common.ssh(src_host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status != 0:
        log.error("[Status] Cannot get MDS DB file list from %s:%s" % (src_host, src_dir))
        return None
    else:
        try:
            file_list = output.split('\n')
            for i in range(len(file_list)):
                # parse the output of 'du -b' command and get the file name and file size
                # e.g. '16384   /mauimds-db/mds-4385d679-a907-45b7-acd7-121969e50caf/mds/mds_10402/master/<SOME_FILE>'
                attrs = file_list[i].split()
                file_list[i] = (int(attrs[0]), os.path.basename(attrs[1]))
            return file_list
        except Exception, msg:
            log.error(msg)
            log.error("[Status] Failed to get MDS DB file list") 
            return None

def get_small_and_large_db_file_list(host, src_path):
    # get db file names and their sizes
    # handle large and small db files separately
    raw_file_tuple_list = get_src_mds_db_file_list(host, src_path)
    if raw_file_tuple_list is None:
        raise Exception("There was a problem. Fail to get MDS DB file from %s:%s" % (host, src_path))

    # saperate small and large db files
    large_db_file_list = []
    small_db_file_list = []
    for db_file_tuple in raw_file_tuple_list:
        if db_file_tuple[0] > SPLIT_FILE_SIZE_THRESHOLD:
            large_db_file_list.append(db_file_tuple[1])
        else:
            small_db_file_list.append(db_file_tuple[1])

    return small_db_file_list, large_db_file_list

def rsync_with_retry(command):
    log.info("[Rsync] New rsync command %s" % command)
    ret_code = False
    try:
        for try_time in range(RSYNC_MAX_RETRY_COUNT):
            # print the output directly to stdout but capture error message and record to log file
            p = subprocess.Popen(command, stderr=subprocess.PIPE)
            p.wait()

            for err_msg in p.stderr:
                log.error("[Rsync] %s", err_msg)

            if p.returncode == 0:
                log.info("[Rsync] Rsync succeeded")
                ret_code = True
                break
            elif p.returncode == 24:
                log.warn("[Rsync] Ignore error code: 24 vanished file error")
                ret_code = True
                break
            else:
                log.error("[Rsync] %d time rsync failed, error code: %s" % ((try_time+1), p.returncode))

    except Exception, e:
        log.error('[Rsync] %s' % e)

    return ret_code

def transfer_small_db_files(exclude_db_file_list, host, src_path, is_same_segment):
    log.info("[SyncDbEnv] Syncing small db files")

    path = "%s:%s/*" % (host, src_path)
    # add '--no-r' option so that rsync will skip directories in db env
    # '--no-l' option will skip symlinks
    command = ['rsync', '-avz', '--no-r', '--no-l', '--block-size=16384', '--progress', '-e', 'ssh -o StrictHostKeyChecking=no', 
        '--exclude=log.*', '--exclude=__db.*', path, '.']
    if not is_same_segment:
        command.insert(1, RSYNC_BANDWIDTH)
    # exclude large db files
    for db_file in exclude_db_file_list:
        exclude_file = '--exclude=%s' % os.path.basename(db_file)
        command.insert(-2, exclude_file)

    return rsync_with_retry(command)

def trigger_check_point(host, src_tmp_dir, port):
    if DEBUG_MODE:
        verbose = '-v'
    else:
        verbose = ''
    cmd = "python %s -o %s -p %s --dest_dir %s %s > /dev/null 2>&1 &" % \
        (SPLIT_RESTORE_FILE_UTIL, SOURCE_UTIL_OP_CHKPOINT, port, src_tmp_dir, verbose)
    log.debug('[Checkpoint] <RUN> %s' % cmd)
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    # errno.EALREADY means there is another mdssync instance running on source side triggering checkpoint for the same MDS port
    if status == 0 or status == errno.EALREADY:
        log.info("[Checkpoint] Checkpoint started")
    else:
        log.error('[Checkpoint] ' + output)
        return False

    log.info('[Checkpoint] Wait for checkpoint')
    # Detect the heartbeat maintained by source side
    chkpoint_heartbeat = Heartbeat(host, src_tmp_dir, instance_id = HEARTBEAT_ID_CHKPOINT)
    while True:
        # wait some time for the first heartbeat
        time.sleep(CHKPOINT_CHK_INTERVAL)
        if not chkpoint_heartbeat.is_alive():
            break

    cmd = "test -f %s" % os.path.join(src_tmp_dir, CHKPOINT_DONE_FLAG)
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status == 0:
        log.info('[Checkpoint] Checkpoint done')
    else:
        log.error('[Checkpoint] Checkpoint failed')
        log.error('[Checkpoint] %s' % output)
        return False

def interpolate_log_files_list(first_log, last_log):
    """
    Get the log file list by the first and last log file path
    """
    # there is no log in BDB env
    # and last log file path is empty
    if last_log == '':
        last_log = first_log
    log.info('[Log] Interpolate log file list')
    log.info('[Log] First log file: %s' % first_log)
    log.info('[Log] Last log file:  %s' % last_log)

    log_file_list = []

    first_log_num = int(first_log.rsplit('.', 1)[-1])
    last_log_num = int(last_log.rsplit('.', 1)[-1])

    # give it some buffer
    first_log_num -= BUFF_LOG_NUM
    # log num starts from 1
    first_log_num = max(first_log_num, 1)

    # (0, 0) -> range(1, 0+1) = []
    # (0, 1) -> range(1, 1+1) = [1]
    # (1, 1) -> range(1, 1+1) = [1]
    # (m, m) -> range(m, m+1) = [m]
    # (m, n) -> range(m, n+1) = [m, ..., n]
    for log_num in xrange(first_log_num, last_log_num + 1):
        log_file = 'log.%010d' % log_num
        log_file_list.append(log_file)
        log.debug('[Log] Log file to transfer: %s' % log_file)

    return log_file_list


def transfer_log_files(host, src_path, is_same_segment):
    """
    transfer db logs using rsync
    use the original rsync method instead of tar-rsync-untar method
    """
    log.info("[Log] Syncing log files")

    checkpoint_log_file = get_checkpoint_log_file(host, src_path)
    last_log_file = get_latest_log_file(host, src_path)

    # write a list of log files to be rsynced
    log_file_list = interpolate_log_files_list(checkpoint_log_file, last_log_file)
    # we have cd'ed to target side tmp env
    # so LOG_FILE_LIST is in tmp env
    log_file_list_file = open(LOG_FILE_LIST, 'w')
    for log_file in log_file_list:
        log_file_list_file.write(log_file + '\n')
    log_file_list_file.close()

    path = "%s:%s/" % (host, src_path)
    command = ['rsync', '-avz', '--progress', '--files-from=%s' % LOG_FILE_LIST, path, '.']
    if not is_same_segment:
        command.insert(1, RSYNC_BANDWIDTH)

    result = rsync_with_retry(command)

    return result

def rsync_db_env_and_log_files(host, src_path):
    """
      Sync Db files and log files to current directory.
    """

    # check if the peer node is in the same IS
    is_same_segment, host = check_same_segment(host)

    # first sync db files
    # handle large and small db files separately
    try:
        (_, large_db_file_list) = get_small_and_large_db_file_list(host, src_path)
    
        # use split-merge for large db files
        # result = startTransferLargeDBFiles(large_db_file_list, host, src_path, is_same_segment)
        large_db_transer = LargeFileTransfer(large_db_file_list, host, src_path, is_same_segment)
        result = large_db_transer.start()
        if result == False:
            raise Exception("Failed to transfer large db files")

        if large_db_transer.ASYNC_SPLIT_RESTORE_RESULT == False:
            raise Exception("Failed to split and restore large db files")
        # use rsync for small db files
        # the large_db_file_list is passed to be excluded
        # result = transfer_small_db_files(large_db_file_list, host, src_path, is_same_segment)
        # if result == False:
        #     raise Exception("Failed to transfer small db files")

        if large_db_transer.ASYNC_SPLIT_RESTORE_RESULT == False:
            raise Exception("Failed to split and restore large db files")

        log.info("[SyncDbEnv] Wait for restoring file blocks")
        large_db_transer.wait()
        if large_db_transer.ASYNC_SPLIT_RESTORE_RESULT == False:
            raise Exception("Failed to split and restore large db files")
        # use rsync for log files
        # result = transfer_log_files(host, src_path, is_same_segment)
        # if result == False:
        #     raise Exception("Failed to transfer log files")

        log.info("[SyncDbEnv] All file blocks restored successfully")

    except Exception, e:
        log.error('[SyncDbEnv] %s' % e)
        return False
    return True

def start_mds_service(port):
    if NOSTART == False:
        mds_svc_name = mds_service.get_mds_service_name_by_port(port)
        ret_code = service.start_service(mds_svc_name, log)
        return ret_code

    return True

def get_db_env_by_port(host, portnum):

    '''
      # Get the PEER mount path from the peer host and portnum
    '''

    cmd = "mauisvcmgr -s mauimds -c mauimds_getmnt -m %s:%d | grep -vi 'Sending command to' | awk -F : '{print $2}'" \
        % (host, portnum)
    status, src_path = commands.getstatusoutput(cmd)
    if status != 0:
        log.error("[Status] Failed to get mount point from peer, skip this mds")
        return False, None

    return True, src_path

def create_src_temp_dir(host, src_tmp_dir):
    """
    Check and create the temp directory on peer node
    If the src temp dir exists and reuse_split option is set, keep and reuse the temp directory
    """

    # Check if tmp dir exists on src node
    cmd = "ls -A %s" % src_tmp_dir
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status == 0:
        if output:
            # tmp dir exists
            # need to make sure reuse option is set by user
            if REUSE_SPLIT == False:
                log.error('[TmpEnv] %s:%s already exists and is not empty' % (host, src_tmp_dir))
                log.error('[TmpEnv] Please make sure the tmp dir is not in use and move it')
                log.error('[TmpEnv] Or use -R option to reuse the tmp dir on peer node')
                return False
            else:
                log.warn('[TmpEnv] %s:%s already exists and is not empty' % (host, src_tmp_dir))
                log.warn('[TmpEnv] Running with -R option')
    elif not 'No such file or directory' in output:
        log.error('[TmpEnv] Failed to check temp dir on peer node')
        log.error('[TmpEnv] ' + output)
        return False

    cmd = "src_tmp_dir=%s; test -d \\$src_tmp_dir || mkdir \\$src_tmp_dir" % src_tmp_dir
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status == 0:
        log.info("[TmpEnv] Create temp dir on peer node succeeded")
        return True
    else:
        log.error('[TmpEnv] Failed to create temp dir on peer node')
        log.error('[TmpEnv] ' + output)
        return False

def cleanup_local_tmp_env(target_path):
    """
    Clean up tmp env on localhost
    """
    # clean up local tmp data first
    log.info("[TmpEnv] Clean up %s tmp directories on local host..." % SPLIT_DIR_SUFFIX)

    log.info('[TmpEnv] Clean up log file list')
    os.remove(os.path.join(target_path, LOG_FILE_LIST))

    tmp_dirs = glob.glob("%s/*%s" % (target_path, SPLIT_DIR_SUFFIX))
    for split_dir in tmp_dirs:
        if os.path.isdir(split_dir):
            try:
                log.info("[TmpEnv] Clean up split directory %s..." % split_dir)
                shutil.rmtree(split_dir)
            except Exception, e:
                log.error('[TmpEnv] %s' % e)
                log.warn("[TmpEnv] Clean up local tmp dir %s failed, please remove manually later" \
                    % split_dir)

def cleanup_remote_tmp_env(host, src_path):
    """
    Clean up tmp env on remote node
    """
    src_tmp_env = get_src_tmp_env(src_path)
    log.info("[TmpEnv] Clean up tmp directory at %s:%s..." % (host, src_tmp_env))
    cmd = "rm -rf %s" % (src_tmp_env)
    status, _ = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status != 0:
        log.warn("[TmpEnv] Clean up remote tmp directory %s:%s failed, please remove manually later" \
            % (host, src_tmp_env))

def move_tmp_env(tmp_target_path, target_path):
    """
    create target directory and move the content of the temp directory into target
    """
    result = True
    try:
        if os.path.isfile(target_path):
            raise Exception('%s is a regular file' % target_path)
        if not os.path.isdir(target_path):
            os.mkdir(target_path)
        cmd = 'mv %s/* %s' % (tmp_target_path, target_path)
        status, output = commands.getstatusoutput(cmd)
        if status != 0:
            raise Exception(output)
        shutil.rmtree(tmp_target_path)
    except Exception, e:
        result = False
        log.error(e)
        log.error("Failed to create target MDS env")
    
    return result

def _mdscfg_get_peer_port(peer_host, rep_port):
    """
    Get MDS port from replication port.
    Cannot use mauisvcmgr -s mauimds -c mauimds_getrepport in 2.1.x.
    The mauisvcmgr command will timeout
    when there is only one running mds in the set.
    Get the peer MDS port by grep the replication port
    and parse the path of the mds_cfg.xml config file:
    /etc/maui/mds/<port>/mds_cfg.xml
    """
    cmd = "grep %s /etc/maui/mds/*/mds_cfg.xml | grep replicationPort" % rep_port
    status, output = common.ssh(peer_host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status:
        log.error(output)
        raise Exception('Failed to get mds port from peer host %s' % peer_host)
    peer_port = int(output.split('/')[4])
    return peer_port

def _mdscfg_get_mdsset_members(mds_config):
    """
    Parse and return a list of MDS members (host, port) of the mds set
    or an empty list [] if failed to parse any MDS member
    """
    mdsset_members = []

    peer_host = mds_config.get_value('masterHost')
    rep_port = mds_config.get_value('masterReplicationPort')

    if  rep_port == "0000":
        raise Exception("This mds has not been configured yet, ignore")

    try:
        peer_port = _mdscfg_get_peer_port(peer_host, rep_port)
        mdsset_members.append((peer_host, peer_port))
        log.info('[Status] MDS set member: %s:%s' % (peer_host, peer_port))
    except Exception, e:
        # this node may be down
        log.error(e)

    # 2nd slave MDS and/or remote MDS
    other_replicas_str = mds_config.get_value('otherReplicas')
    if other_replicas_str:
        for other_replica in other_replicas_str.split():
            [peer_host, rep_port] = other_replica.split(':')
            if rep_port.startswith('105'):
                try:
                    peer_port = _mdscfg_get_peer_port(peer_host, rep_port)
                    mdsset_members.append((peer_host, peer_port))
                    log.info('[Status] MDS set member: %s:%s' % (peer_host, peer_port))
                except Exception, e:
                    # this node may be down
                    log.error(e)

    # for Atmos before version 2.1.0, there is no 'otherReplicas' field in mds_cfg.xml
    # if we are on a remote node, we can only see the master host from mds_cfg.xml
    # we should parse the mds_cfg.xml of the master node
    # to get the 3rd (slave) MDS
    replication_port = mds_config.get_value('replicationPort')
    if replication_port.startswith('107'):
        # it is a remote MDS
        # get the version e.g. get '2.1' from version string '2.1.0.78154'
        # Note that there is a bug in mauiutility.get_maui_version()
        # in Atmos version 2.1.3 which will throw Exception
        try:
            try:
                cur_version = mauiutility.get_maui_version().rsplit('.', 2)[0]
            finally:
                # FIXME: after call mauiutility, log.parent get some StreamHandler
                # which will pollute the logging of the current script
                # so fix this issue by manually removing handlers from log.parent
                while log.parent.handlers:
                    log.parent.removeHandler(log.parent.handlers[0])
            if cur_version < '2.1':
                mds_cfgfile = '/etc/maui/mds/%s/mds_cfg.xml' % peer_port
                cmd = 'cat %s' % mds_cfgfile
                status, output = common.ssh(peer_host, cmd)
                if status == 0:
                    # this recursion will only happen on a remote node
                    # the recursion will stop when called for a local MDS
                    # so the max recursion depth is 2
                    xmlconfObj = Properties(output)
                    mdsset_members.extend(_mdscfg_get_mdsset_members(xmlconfObj))
                else:
                    raise Exception('Failed to parse mds cfg file from %s:%s' % (peer_host, peer_port))
        except Exception, e:
            log.error(e)

    return list(set(mdsset_members))

def _mdscfg_filter_running_master(mds_list):
    """
    Given a MDS (host, port) list,
    filter and return the running master (host, port)
    or None if there is no running master
    """
    running_master = (None, None)

    for mds in mds_list:
        cmd = "mauisvcmgr -s mauimds -c mauimds_ismaster -m %s:%s | grep mauimds_ismaster=yes" % mds
        status, _ = commands.getstatusoutput(cmd)
        if status == 0:
            running_master = mds
            log.info('[Status] Running master: %s:%s' % mds)
            break
    return running_master

def _mdscfg_filter_running_mds(mds_list):
    """
    Given a MDS (host, port) list,
    filter and return a running mds (host, port)
    or None if there is no running mds
    """
    running_mds = (None, None)

    for mds in mds_list:
        cmd = "mauisvcmgr -s mauimds -c mauimds_isNodeInitialized -m %s:%s | grep Initialized=true" % mds
        status, _ = commands.getstatusoutput(cmd)
        if status == 0:
            running_mds = mds
            log.info('[Status] Running mds: %s:%s' % mds)
            break
    return running_mds

def _mdscfg_get_db_env_and_running_peer(port):
    """
    Get the DB env, mds peer host and port
    """
    bad_ret = (False, None, None, None)
    try:
        ret_code = True
        mds_cfgfile = "/etc/maui/mds/%d/mds_cfg.xml" % port
        mds_config  = common.Properties(mds_cfgfile)
        db_env = mds_config.get_value('bdbxmlEnvRoot')

        set_members = _mdscfg_get_mdsset_members(mds_config)

        (peer_host, peer_port) = _mdscfg_filter_running_master(set_members)
        if peer_host == None:
            # failback to running mds if --force option is set
            log.warn('[Status] No running master mds')
            if FORCE_RUN:
                # log.warn('[Status] Running with --force option. Failback to running slave MDS')
                (peer_host, peer_port) = _mdscfg_filter_running_mds(set_members)
                if peer_host:
                    log.warn('-=================== WARNNING ===================-')
                    log.warn('| You are running mdssync with --force option.')
                    log.warn('| There is no running MDS master in the MDS set.')
                    log.warn('| Will sync form MDS %s:%s' % (peer_host, peer_port))
                    log.warn('| Please make sure MDS %s:%s is in healthy status' % (peer_host, peer_port))
                    log.warn('--------------------------------------------------')
                else:
                    log.error('[Status] No initialized local MDS in the current MDS set')
                    log.error('[Status] Please escalate to L4')
            else:
                log.warn('[Status] Use --force option to sync from running slave MDS')

        if peer_host == None:
            ret_code = False
        return (ret_code, db_env, peer_host, peer_port)
    except Exception, e:
        log.error('[Status] %s' % e)
        return bad_ret

def do_port_sync(port, UUID):

    """
    #  Below are the steps involved in doing the port sync
    #1. Get the env root for the port and the mds peer host and port
    #2. Validate UUID
    #3. Get the peer env root
    #4. Check the env root existence. And create the local tmp env
    #5. Check and create tmp env on souce node
    #6. Check free disk space
    #7. rsync the DB from the peer to this node
    #8. Run db_recover -cv
    #9. Enable and start the service
    #10. Wait until MDS service is initialized
    #11. Clean up tmp env on both remote and local host
    """

    single_instance_lock = SingleInstance(flavor_id = port)

    try:
        # First, Make sure the mds is stopped
        ret_code = is_mds_stopped(port)
        if ret_code == False:
            raise Exception('MDS %d is running' % (port))

        #1. Get the env root for the port and the mds peer host and port
        ret_code, target_path, host, portnum = _mdscfg_get_db_env_and_running_peer(port)
        if ret_code == False:
            raise Exception('Failed to get valid peer host and port')
        
        #2. Validate UUID
        ret_code = validateUUID(UUID, target_path)
        if ret_code == False:
            raise Exception('Invalic UUID')

        #3. Get the peer env root
        # Now, get the PEER mount path from the peer host and portnum
        ret_code, src_path = get_db_env_by_port(host, portnum)
        if ret_code == False:
            raise Exception('Failed to get mount path for host %s, port %d' % (host, portnum))

        # use a temp env for db files and log files transfer
        # rename to target env when transfer is done
        tmp_target_path = get_local_tmp_env(target_path)
        tmp_src_path = get_src_tmp_env(src_path)

        #4. Check the env root existence. And create the local tmp env
        ret_code = validate_and_create_env(target_path, tmp_target_path)
        if ret_code == False:
            raise Exception('Failed to create local tmp env')

        #5. Check and create tmp env on souce node
        ret_code = create_src_temp_dir(host, tmp_src_path)
        if ret_code == False:
            raise Exception('Failed to create source tmp env')

        # start client heartbeat
        client_heartbeat = Heartbeat(host, tmp_src_path)
        client_heartbeat.start()

        log.info("Start to sync data from %s:%s to %s" % (host, src_path, tmp_target_path))

        try:
            # disable the MDS
            set_mds_status(port, "disable")

            # We "cd" to the target direcory and do "sync" and "dbrecover"
            os.chdir(tmp_target_path)

            #6. Check free disk space
            ret_code = check_available_disk_space(host, src_path, tmp_target_path)
            if ret_code == False:
                raise Exception('Disk space check failed')

            # ret_code = trigger_check_point(host, tmp_src_path, portnum)
            # if ret_code == False:
            #     raise Exception('Failed to do checkpoint on source node')

            #7. rsync the DB from the peer to this node
            ret_code = rsync_db_env_and_log_files(host, src_path)
            if ret_code == False:
                raise Exception('Failed to sync DB and log files')

            # move the tmp env to target env
            ret_code = move_tmp_env(tmp_target_path, target_path)
            if ret_code == False:
                raise Exception('Failed to move tmp env to target env')

            os.chdir(target_path)

            #8. Run db_recover -cv
            # log.info("Running DB recover for MDS %d" % (port))
            # ret_code, console_string = commands.getstatusoutput("nice db_recover -cv")
            # log.info(console_string)
            # if ret_code != 0:
            #     log.error(console_string)
            #     raise Exception('Failed to recover DB')

            #9. Enable and start the service
            # the mds disk replacement script will skip mds service start and chkconfig
            # so we need to do it here
            #enable the MDS 
            # set_mds_status(port, "enable")
            # # Now go ahead and start the MDS service and chkconfig it    
            # ret_code = start_mds_service(port)
            # if ret_code == False:
            #     raise Exception('Failed to start mds %s' % port)

            # #10. Wait until MDS service is initialized
            # ret_code = wait_mds_initialized('localhost', port)
            # if ret_code == False:
            #     raise Exception('MDS %s not initialized' % port)

            #11. Clean up tmp env on both remote and local host
            # cleanup_local_tmp_env(target_path)
            client_heartbeat.stop()

            if KEEP_SPLIT:
                # do not clean up split dir on source side
                log.warn('-=================== WARNNING ===================-')
                log.warn('| You are running mdssync with -k option.')
                log.warn('| Please manually clean up temp directory on source side:')
                log.warn('| %s:%s' % (host, get_src_tmp_env(src_path)))
                log.warn('--------------------------------------------------')
            elif not client_heartbeat.is_any_other_living_instance():
                cleanup_remote_tmp_env(host, src_path)

        except Exception:
            client_heartbeat.stop()
            log.warn('Please manually clean up following tmp env if necessary')
            log.warn('%s:%s' % (host, get_src_tmp_env(src_path)))
            log.warn('%s:%s' % ('localhost', get_local_tmp_env(target_path)))
            raise
            

    except Exception, e:
        log.error(e)
        log.error('MDSSync failed')
    

def do_drive_sync(UUID):

    """
    #Get all the ports for the node
    """

    proc = os.popen("/bin/ls /etc/maui/mds/ | grep 10[0-9][0-9][0-9]")

    # Run thru all the ports and do sync
    for line in proc.readlines():
        port = int(line.rstrip())  
        log.info("=== For port %d" % (port))
        do_port_sync(port, UUID)
    
 
def is_mounted(FSUUID):

    """
    # The FSUUID is checked to see if it is mounted, If it is not mounted we abort it. Let CE folks mount the FSUUID
    # We could have mounted FSUUID ourself, but for now let us leave that it CE folks.
    """

    log.info("FSUUID %s" % (FSUUID))
    cmd = "grep -q %s /proc/mounts" % (FSUUID)
    ret_code = os.system(cmd)
    if ret_code == 0:
        log.info("%s is mounted" % (FSUUID)) 
        return True
    else:
        log.error("abort: %s is not mounted" % (FSUUID))
        return False

def print_version_and_exit(program):
    print ""
    print "%s version number %s" % (program, VERSION)
    print ""

def print_mdssync_steps():

    print ""
    print "#  Below are the steps involved in doing the port sync"
    print "#1. Get the env root for the port and the mds peer host and port"
    print "#2. Validate UUID"
    print "#3. Get the peer env root"
    print "#4. Check the env root existence. And create the local tmp env"
    print "#5. Check and create tmp env on souce node"
    print "#6. Check free disk space"
    print "#7. rsync the DB from the peer to this node"
    print "#8. Run db_recover -cv"
    print "#9. Enable and start the service"
    print "#10. Wait until MDS service is initialized"
    print "#11. Clean up tmp env on both remote and local host"
    print ""
    print ""

def set_log_tag(tag):
    """
    Set a tag in logging format string to help to identify the logs from different operations
    e.g.
    2013-05-10 18:50:00,680 mdssync.py:1824 - PID:30087: - INFO: [CLIENT]: ===== MDS SYNC STARTED =====
    2013-05-21 16:30:12,231 mdssync.py:302 - PID:19275: - INFO: [SPLIT]: [SplitMerge] Split block #91. File: xxx
    2013-05-06 17:38:16,920 mdssync.py:1695 - PID:29578: - INFO: [CHKPOINT]: Trigger checkpoint for MDS 10404
    """

    log_format = "%(asctime)s %(filename)s:%(lineno)d - PID:%(process)d: - %(levelname)s: [" + tag + "]: %(message)s"
    formatter = logging.Formatter(log_format)
    for handler in log.handlers:
        handler.setFormatter(formatter)

def source_side_do_split_large_file(src_file, dest_dir):
    """
    Perform the actual large db file splitting on source node
    """
    single_instance_lock = SingleInstance(flavor_id = os.path.basename(src_file) + '.split')
    # other split instances will exit at this point
    split_heartbeat = Heartbeat('localhost', dest_dir, instance_id = HEARTBEAT_ID_SPLIT)
    split_heartbeat.start()
    file_splitter = SplitMerge(src_file, dest_dir)
    ret_code = file_splitter.split()
    split_heartbeat.stop()
    return ret_code

def source_side_do_checkpoint(port, tmp_env):
    """
    Perform the actual checkpoint and wait on source node
    """
    single_instance_lock = SingleInstance(flavor_id = 'chkpoint.%s' % port)
    # other chkpoint instances will exit at this point

    log.info("Trigger checkpoint for MDS %s" % port)
    
    ret_code = 0

    chkpoint_heartbeat = Heartbeat('localhost', tmp_env, instance_id = HEARTBEAT_ID_CHKPOINT)
    chkpoint_heartbeat.start()

    # clear the done flag file
    done_flag = os.path.join(tmp_env, CHKPOINT_DONE_FLAG)
    if not os.path.isfile(done_flag):
        _, db_env = get_db_env_by_port('localhost', port)
        last_log_file = get_latest_log_file('localhost', db_env)
        if last_log_file == '':
            # there is no BDB log file
            # so set the last log file to log.0000000000
            last_log_file = os.path.join(db_env, 'log.0000000000')

        log.info('Last log file: %s' % last_log_file)

        # do checkpoint CHKPOINT_NUM times
        cmd = 'mauisvcmgr -s mauimds -c triggercheckpoint -m localhost:%s' % port
        for i in xrange(CHKPOINT_NUM):
            # trigger checkpoint
            log.info('Trigger checkpoint #%s' % (i+1))
            status, output = commands.getstatusoutput(cmd)
            if status != 0:
                ret_code = 1
                log.error(output)
                break
            # wait for CHKPOINT_SLEEP_SEC seconds
            log.info('Wait for %s seconds' % CHKPOINT_SLEEP_SEC)
            time.sleep(CHKPOINT_SLEEP_SEC)

        # write checkpoint done flag file if success
        if ret_code == 0:
            log.info("Done checkpoint for MDS %s" % port)
            done_flag_file = open(done_flag, 'w')
            done_flag_file.write(last_log_file + '\n')
            done_flag_file.close()
    else:
        log.info("Checkpoint for MDS %s already done" % port)

    chkpoint_heartbeat.stop()

    return ret_code


if __name__ == "__main__":
    watcher = Watcher()

    usage = r'''
    %prog [options]

    %prog -u <UUID> [options] for drive sync
    %prog -p <PORT> [options] for port sync
    Example: %prog -p 10401
    '''

    parser = OptionParser(usage=usage)
    parser.add_option("-V", "--version", dest="version", action="store_true", help="for version")
    parser.add_option("-f", "--fast", dest="fast", action="store_true", help="for fast option.")
    parser.add_option("-s", "--stop", dest="stop", action="store_true", help="for don't start MDS option after rsync.")
    parser.add_option("-d", "--debug", dest="debug", action="store_true", help="enable debug mode")
    parser.add_option("-r", "--dryrun", dest="dryrun", action="store_true", help="for dryrun")
    parser.add_option("-u", "--UUID", dest="UUID", help="for drive sync.")
    parser.add_option("-p", "--port", dest="port", type="int", help="for syncing individual port")
    parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="more verbose message")
    parser.add_option("-k", "--keep", dest="keep_split", action="store_true", help="Keep the splitted data")
    parser.add_option("-P", "--path", dest="path", help="path of the mdssync.py script")
    parser.add_option("--ignore-space", dest="ignore_space", action="store_true", help="force run even if disk space low")
    parser.add_option("--force", dest="force_run", action="store_true", help="force run even if peer is not master")
    parser.add_option("-R", "--reuse-split-data", dest="reuse_split", action="store_true", help="re-use the splitted data on peer node")
    # options for running as source side util
    parser.add_option("-o", "--operation", dest="operation", help="[Source Side Option] 'chkpoint' or 'split'")
    parser.add_option("--src_file", dest="src_file", help="[Source Side Option] <split> source file")
    parser.add_option("--dest_dir", dest="dest_dir", help="[Source Side Option]\n\
                                                                    \t\t<split> dest directory\
                                                                    \t\t<chkpoint> tmp env for heartbeat and flag file")
    (options, args) = parser.parse_args()

    if options.version:
        print_version_and_exit(sys.argv[0])
        sys.exit(0)

    # fast option will have no effect when peer node is in the same IS
    # because data will be transfered via eth0
    if options.fast:
        RSYNC_BANDWIDTH = '--bwlimit=10240'

    if options.stop:
        NOSTART = True
    if options.ignore_space:
        IGNORE_SPACE = True
    if options.force_run:
        FORCE_RUN = True
    if options.reuse_split:
        REUSE_SPLIT = True

    if options.dryrun:
        print_mdssync_steps()
        sys.exit(0)

    if options.dryrun:
        pdb.set_trace()

    if options.verbose:
        DEBUG_MODE = True

    if DEBUG_MODE:
        newlevel = logging.DEBUG
        log.setLevel(newlevel)
        for h in log.handlers:
            h.setLevel(newlevel)

    if options.keep_split:
        KEEP_SPLIT = True

    if options.path:
        SPLIT_RESTORE_FILE_UTIL = options.path

    # running the script as a source side tool for split db files,
    # trigger split large db files
    if options.operation == SOURCE_UTIL_OP_SPLIT:
        set_log_tag('SPLIT')
        if options.src_file and options.dest_dir:
            ret_code = source_side_do_split_large_file(options.src_file, options.dest_dir)
            sys.exit(ret_code)
        else:
            parser.print_help()
            sys.exit(1)

    # trigger check points
    elif options.operation == SOURCE_UTIL_OP_CHKPOINT:
        set_log_tag('CHKPOINT')
        if options.port and options.dest_dir:
            ret_code = source_side_do_checkpoint(options.port, options.dest_dir)
            sys.exit(ret_code)
        else:
            parser.print_help()
            sys.exit(1)

    set_log_tag('CLIENT')

    log.info("===== MDS SYNC STARTED =====")

    if options.debug:
        pdb.set_trace()

    if options.UUID:
        UUID = options.UUID

        ret_code = is_mounted(UUID)
        if ret_code == False:
            sys.exit(1)

        do_drive_sync(UUID)
        log.info("===== END =====")
        sys.exit(0)
    elif options.port:
        port = options.port
        log.info("===== Sync MDS for port %d =====" % port)

        ret_code = mds_service.is_valid_mds_port(port)
        if ret_code == False:
            sys.exit(1)

        UUID = ""
        do_port_sync(port, UUID)
        log.info("===== END =====")
        sys.exit(0)
    else:
        parser.print_help()
        sys.exit(1)

    sys.exit(0)
