#!/usr/bin/python
# __CR__
# Copyright (c) 2008-2013 EMC Corporation
# All Rights Reserved
#
# This software contains the intellectual property of EMC Corporation
# or is licensed to EMC Corporation from third parties.  Use of this
# software and the intellectual property contained therein is expressly
# limited to the terms and conditions of the License Agreement under which
# it is provided by or on behalf of EMC.
# __CR__

"""
 Author: Sunil Yadawad

 Since we are disabling BDB auto-init in order to improve the MDS safety.
 This impacts the MDS disk replacement procedure as we cannot rely on the
 BDB automated initialization + replication to sync up the new MDS databases
 on the replaced disk. MDS Sync script uses rsync internally and identifies
 the correct MDS set and host+port for the rsync target on the replaced MDS
 disk and the host+port for the running master for the sync source. 
 NOTE: The script can also take some additional options to sync up individual
 port
"""

import sys
import os
import re
import time
import datetime
import subprocess
import commands
import tempfile
import socket
import common
import service
import mds_service
import pdb
import string
import logging
import glob
import shutil
import threading
import Queue
import signal
from optparse import OptionParser


MINIMUM_MDS_DISK_COUNT = 1

LOGFILE = "/var/log/maui/mdssync.log"
log = common.get_file_logger(LOGFILE, 'mdssync')
log = common.get_console_logger('mdssync')

VERSION = "4.0.0"
RSYNC_BANDWIDTH = '--bwlimit=5120'
NOSTART      = False
IGNORE_SPACE = False
FORCE_RUN    = False

HOSTS_FILE                 = "/etc/hosts"
MGMT_HOST_SUFFIX           = "-mgmt"
DISK_SPACE_THRESHOLD_RATIO_LOCAL = 1.5
DISK_SPACE_THRESHOLD_RATIO_SRC   = 0.2
# reserve extra 5GB disk space
EXTRA_DISP_SPACE           = 5*1024*1024*1024
CHKPOINT_SLEEP_SEC         = 120
CHKPOINT_NUM               = 2

# only apply split-restore to files larger than 5GB
SPLIT_FILE_SIZE_THRESHOLD  = 5*1024*1024*1024

SPLIT_RESTORE_FILE_UTIL    = os.path.abspath(__file__)
SPLIT_DIR_SUFFIX           = ".split"
SRC_MDSSYNC_TMP_DIR        = "mdssync.d"
LOCAL_TMP_ENV_SUFFIX       = ".mdssync.tmp"
LOG_TARBALL                = "logs.mdssync.tgz"
LOG_TAR_DONE_FLAG_SUFFIX   = '.done'
FLAG_RESULT_SUCCESS        = 'SUCCESS'
FLAG_RESULT_FAILED         = 'FAILED'
CHKPOINT_DONE_FLAG         = 'chkpoint.done'


INITED_RETRY_TIMES         = 60
INITED_RETRY_INTERVAL      = 10
RSYNC_MAX_RETRY_COUNT      = 10

TAR_LOG_CHK_INTERVAL       = 10
CHKPOINT_CHK_INTERVAL      = 10

KEEP_SPLIT                 = False

HEARTBEAT_ID_SPLIT    = 'split'
HEARTBEAT_ID_TAR_LOG  = 'tarlog'
HEARTBEAT_ID_CHKPOINT = 'checkpoint'

SOURCE_UTIL_OP_SPLIT    = 'split'
SOURCE_UTIL_OP_TAR      = 'tar'
SOURCE_UTIL_OP_CHKPOINT = 'chkpoint'

class Watcher:
    """this class solves two problems with multithreaded 
    programs in Python, (1) a signal might be delivered 
    to any thread (which is just a malfeature) and (2) if 
    the thread that gets the signal is waiting, the signal 
    is ignored (which is a bug). 
    """  
  
    def __init__(self):  
        """ Creates a child process, which returns.  The parent 
            process waits for a KeyboardInterrupt and then kills 
            the child process. 
        """  
        self.child = os.fork()  
        if self.child == 0:  
            return  
        else:  
            self.watch()  
  
    def watch(self):  
        try:  
            os.wait()  
        except KeyboardInterrupt:
            self.kill()  
        sys.exit()  
  
    def kill(self):  
        try:  
            os.kill(self.child, signal.SIGKILL)  
        except OSError: pass


# SingleInstance uses a global logger, create an alias for log
logger = log
# copied from tendo library http://pypi.python.org/pypi/tendo
# LICENSE: PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# https://github.com/pycontribs/tendo/blob/master/tendo/singleton.py
class SingleInstance:
    """
    If you want to prevent your script from running in parallel just instantiate SingleInstance() class. If is there another instance already running it will exist the application with the message "Another instance is already running, quitting.", returning -1 error code.

    >>> import tendo
    ... me = SingleInstance()

    This option is very useful if you have scripts executed by crontab at small amounts of time.

    Remember that this works by creating a lock file with a filename based on the full path to the script file.
    """

    RET_CODE_EXISTING_INSTANCE = 64

    def __init__(self, flavor_id=""):
        import sys
        self.initialized = False
        basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace("/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
        # os.path.splitext(os.path.abspath(sys.modules['__main__'].__file__))[0].replace("/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
        self.lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename)

        logger.debug("SingleInstance lockfile: " + self.lockfile)
        if sys.platform == 'win32':
            try:
                # file already exists, we try to remove (in case previous execution was interrupted)
                if os.path.exists(self.lockfile):
                    os.unlink(self.lockfile)
                self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
            except OSError:
                _, e, _ = sys.exc_info()
                if e.errno == 13:
                    logger.error("Another instance is already running, quitting.")
                    # sys.exit(-1)
                    sys.exit(self.RET_CODE_EXISTING_INSTANCE)
                print(e.errno)
                raise
        else:  # non Windows
            import fcntl
            self.fp = open(self.lockfile, 'w')
            try:
                fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError:
                logger.warning("Another instance is already running, quitting.")
                # sys.exit(-1)
                sys.exit(self.RET_CODE_EXISTING_INSTANCE)
        self.initialized = True

    def __del__(self):
        import sys
        if not self.initialized:
            return
        try:
            if sys.platform == 'win32':
                if hasattr(self, 'fd'):
                    os.close(self.fd)
                    os.unlink(self.lockfile)
            else:
                import fcntl
                fcntl.lockf(self.fp, fcntl.LOCK_UN)
                #os.close(self.fp)
                if os.path.isfile(self.lockfile):
                    os.unlink(self.lockfile)
        except Exception, e:
            logger.warning(e)
            sys.exit(-1)


class SplitMerge(object):
    """
    Utility to:
        1. Split a large file into small blocks.
        2. Merge split blocks into one file.
    """

    # default block is 16k
    BLOCK_SIZE  = 16384
    
    # default chunk size is 10G
    KILO_BYTE   = 1024
    MEGA_BYTE   = 1048576
    GIGA_BYTE   = 1073741824
    CHUNK_UNIT  = GIGA_BYTE
    CHUNK_SIZE  = 5 * CHUNK_UNIT
    #CHUNK_UNIT = MEGA_BYTE
    #CHUNK_SIZE = 15 * CHUNK_UNIT
    
    BLOCK_BASE_NAME = 'block'
    BLOCK_TMP_NAME  = '.blocktmp'
    DONE_FLAG_FILE  = '.done'

    SPLIT_EXPIRATION_HOUR              = 12
    
    RESULT_OK                          = 0
    RESULT_SPLIT_OUTPUT_DIR_INCOMPLETE = 1
    RESULT_SPLIT_OUTPUT_DIR_WRONG_SRC  = 2
    RESULT_SPLIT_OUTPUT_DIR_EXPIRED    = 3
    RESULT_SPLIT_SRC_FILE_NOT_EXIST    = 4
    RESULT_SPLIT_FAILED_CREATE_DIR     = 5
    RESULT_SPLIT_FAILED_SPLIT          = 6
    RESULT_RESTORE_SPLIT_DIR_NOT_EXIST = 7
    RESULT_RESTORE_FAILED_RESTORE      = 8
    RESULT_RESTORE_INVALID_SRC_BLOCK   = 9

    def __init__(self, src_file, split_dir):
        super(SplitMerge, self).__init__()
        self.__src_file = src_file
        self.__split_dir = split_dir

    def __verify_output(self):
        """
        verify the output directory to check whether the splited file blocks could be reused or not
        """
        log.info('[SplitMerge] Validate existing split directory')
        split_done_file = os.path.join(self.__split_dir, self.DONE_FLAG_FILE)
        # if it's not a fully splitted output, maybe killed half way, then it's invalid
        if not os.path.isfile(split_done_file):
            log.error('[SplitMerge] <NEED MANUAL CHECK> Output directory %s already exists. It may be an incomplete copy of last split.' \
                % self.__split_dir)
            return self.RESULT_SPLIT_OUTPUT_DIR_INCOMPLETE

        # check if it's for different file splitting
        split_done = open(split_done_file, 'r')
        split_flag_file_txt = split_done.readline()
        [split_filename, split_timestamp] = split_flag_file_txt.split(':', 1)
        split_done.close()
        if split_filename != self.__src_file:
            log.error('[SplitMerge] <NEED MANUAL CHECK> Output directory %s already exists. It is split for a different source file %s.' \
                % (self.__split_dir, split_filename))
            return self.RESULT_SPLIT_OUTPUT_DIR_WRONG_SRC

        # if file was splitted more than 12 hours ago, we consider it's invalid
        # FIXME: skip expiration check. always re-use the complete split env
        # split_time = datetime.datetime.fromtimestamp(float(split_timestamp))
        # current_time = datetime.datetime.now()
        # time_delta = current_time - split_time
        # time_delta_hour = time_delta.days * 24 + time_delta.seconds / 3600
        # if time_delta_hour > self.SPLIT_EXPIRATION_HOUR:
        #     log.error('[SplitMerge] <NEED MANUAL CHECK> Output directory %s already exists. It has expired.' \
        #         % self.__split_dir)
        #     return self.RESULT_SPLIT_OUTPUT_DIR_EXPIRED

        log.info('[SplitMerge] Existing split directory is valid')
        return self.RESULT_OK

    def __do_split(self):
        log.info('[SplitMerge] Start a new split process')
        log.info('[SplitMerge] Split %s into %s' % (self.__src_file, self.__split_dir))
        offset = 0
        is_last_chunk = False
        block = 0
        tmp_block = os.path.join(self.__split_dir, self.BLOCK_TMP_NAME)
        while not is_last_chunk:
            src_file_size = os.path.getsize(self.__src_file)
            if offset > src_file_size:
                log.info('[SplitMerge] Split done! File: %s' % self.__src_file)
                break

            left = src_file_size - offset
            skip = int(offset / self.BLOCK_SIZE)
            log.info('[SplitMerge] Split block #%s. File: %s' % (block, self.__src_file))

            # cut and compress a chunk
            # split imcrement at self.BLOCK_SIZE
            # split the chunk to a tmp block
            if left > self.CHUNK_SIZE:
                count = int(self.CHUNK_SIZE / self.BLOCK_SIZE)
                cmd = 'dd if=%s bs=%s skip=%s count=%s | gzip > %s' \
                    % (self.__src_file, self.BLOCK_SIZE, skip, count, tmp_block)
                # set the new offset
                offset += self.CHUNK_SIZE
            else:
                # remaining data is smaller than the self.CHUNK_SIZE
                # it is the last chunk
                cmd = 'dd if=%s bs=%s skip=%s | gzip > %s' \
                    % (self.__src_file, self.BLOCK_SIZE, skip, tmp_block)

                # doesn't set the new offset, since the file may grow at the same time
                # is_last_chunk flag will just skip the next round dd
                is_last_chunk = True

            log.debug('[SplitMerge] <RUN> %s' % cmd)
            status, _ = commands.getstatusoutput(cmd)
            if status != 0:
                log.error('[SplitMerge] Failed to split file %s' % self.__src_file)
                return self.RESULT_SPLIT_FAILED_SPLIT

            # then rename the tmp block
            dest_block = os.path.join(self.__split_dir, '%s-%s.gz' % (self.BLOCK_BASE_NAME, block))
            shutil.move(tmp_block, dest_block)    

            block += 1

        # Make a mark file to indicate the splitting is done
        utc_seconds = int(time.mktime(datetime.datetime.now().timetuple()))
        split_done_txt = '%s:%s' % (self.__src_file, utc_seconds)
        split_done_file = os.path.join(self.__split_dir, self.DONE_FLAG_FILE)
        split_done = open(split_done_file, 'w')
        split_done.write(split_done_txt)
        split_done.close()

        return self.RESULT_OK

    def split(self):
        """
        Split self.__src_file -> self.__split_dir/BLOCKS
        """

        log.info('[SplitMerge] Split file %s into directory %s' % (self.__src_file, self.__split_dir))
        
        # check if src file exists
        if not os.path.isfile(self.__src_file):
            log.error('[SplitMerge] Source file %s does not exist!' % self.__src_file)
            return self.RESULT_SPLIT_SRC_FILE_NOT_EXIST

        # check if dest dir exists
        if os.path.isdir(self.__split_dir):
            verify_result = self.__verify_output()
            if verify_result == self.RESULT_OK:
                return self.RESULT_OK
            else:
                # there is some problem with the existing dest split dir
                # back up the existing dir
                current_time = datetime.datetime.now()
                utc_seconds = int(time.mktime(current_time.timetuple()))
                backup_directory = '%s.%s' % (self.__split_dir, utc_seconds)
                log.warning('[SplitMerge] <NEED MANUAL CHECK> Move existing output directory %s to %s.' % \
                    (self.__split_dir, backup_directory))
                shutil.move(self.__split_dir, backup_directory)

        # create the dest directory
        try:
            os.mkdir(self.__split_dir)
        except Exception, e:
            log.error(e)
            log.error('[SplitMerge] Failed to create output directory %s' % self.__split_dir)
            return self.RESULT_SPLIT_FAILED_CREATE_DIR

        # do the file splitting
        result = self.__do_split()
        return result

    def restore(self):
        """
        Merge self.__split_dir/BLOCKS -> self.__src_file
        """

        log.info('[SplitMerge] Restore file %s from directory %s.' % (self.__src_file, self.__split_dir))
        if not os.path.isdir(self.__split_dir):
            log.error('[SplitMerge] Directory %s does not exist!' % self.__split_dir)
            return self.RESULT_RESTORE_SPLIT_DIR_NOT_EXIST

        block = 0
        offset = 0
        while True:
            src_block = os.path.join(self.__split_dir, '%s-%s.gz' % (self.BLOCK_BASE_NAME, block))
            if not os.path.isfile(src_block):
                log.info('[SplitMerge] Restore finished.')
                break

            log.info('[SplitMerge] Restore file block: %s' % src_block)

            seek = int(offset / self.BLOCK_SIZE)
            cmd = 'gunzip -c %s | dd bs=%s seek=%s of=%s' \
                % (src_block, self.BLOCK_SIZE, seek, self.__src_file)
            log.debug('[SplitMerge] <RUN> %s' % cmd)
            status, _ = commands.getstatusoutput(cmd)
            if status != 0:
                log.error('[SplitMerge] Failed to restore file %s' % self.__src_file)
                return self.RESULT_RESTORE_FAILED_RESTORE

            offset += self.CHUNK_SIZE
            block += 1

        return self.RESULT_OK

    @classmethod
    def restore_block(cls, src_block, dest_file):
        result = cls.RESULT_OK

        if not os.path.isfile(src_block):
            log.error('[SplitMerge] Block %s does not exist' % src_block)
            result = cls.RESULT_RESTORE_INVALID_SRC_BLOCK
        else:

            log.info('[SplitMerge] Restore block %s to file %s' % (src_block, dest_file))

            try:
                # block-<block_num>.gz
                block_num_str = src_block.rsplit('-', 1)[-1].split('.', 1)[0]
                block_num = int(block_num_str)
                offset = block_num * cls.CHUNK_SIZE

                seek = int(offset / cls.BLOCK_SIZE)
                cmd = 'gunzip -c %s | dd bs=%s seek=%s of=%s' \
                    % (src_block, cls.BLOCK_SIZE, seek, dest_file)
                log.debug('[SplitMerge] <RUN> %s' % cmd)
                status, _ = commands.getstatusoutput(cmd)
                if status != 0:
                    log.error('[SplitMerge] Failed to restore block %s to file %s' % (src_block, dest_file))
                    result = cls.RESULT_RESTORE_FAILED_RESTORE
                else:
                    result = cls.RESULT_OK
            except Exception:
                result = cls.RESULT_RESTORE_INVALID_SRC_BLOCK
        return result


class Heartbeat(object):
    """
    Heartbeat class solve 2 problems:
        1. There might be multiple mdssync instances syncing from the same source mds.
           The split process on the source side split each db file only once for all mdssync clients
           and clear the temp split env when all mdssync clients are done successfully.
           All mdssync clients maintain a heartbeat in order to indicate the progress.
           The last mdssync client will find there is no heartbeat from other mdssync clients
           and clear the temp split env
        2. The spliting for a single block may take long time in systems with heavy load.
           The split process on the source side maintain a heartbeat to notify all mdssync clients
           that the split is in progress.
    """

    HEARTBEAT_FILE_PREFIX     = '.heartbeat'
    # if there is no heartbeat in (2 * __HEARTBEAT_INTERVAL) time
    # it is reasonable to determine that this instance is dead
    __HEARTBEAT_INTERVAL      = 60
    __CHECK_INSTANCE_INTERVAL = 10
    __CHECK_INSTANCE_RETRY    = int(2 * __HEARTBEAT_INTERVAL / __CHECK_INSTANCE_INTERVAL)

    def __init__(self, host, directory, instance_id = None):
        """
        The heartbeat flag file will be:
            <host>:<directory>/.heartbeat.<instance_id>
        """
        super(Heartbeat, self).__init__()
        self.__host = host
        if instance_id == None:
            instance_id = socket.gethostname()
        self.__instance_id = instance_id
        self.__heartbeat_file_dir = directory
        self.__heartbeat_file = os.path.join(self.__heartbeat_file_dir, 
            '%s.%s' % (self.HEARTBEAT_FILE_PREFIX, self.__instance_id))
        self.__heartbeat_thread = threading.Thread(target = self.__heartbeat_worker)
        self.__heartbeat_thread.daemon = True
        self.__stop_event = threading.Event()

    def start(self):
        log.debug('[Heartbeat] Start heartbeat: %s:%s' % (self.__host, self.__heartbeat_file))
        self.__heartbeat_thread.start()

    def __heartbeat_worker(self):
        while not self.__stop_event.isSet():
            # touch a flag file on peer node
            cmd = 'touch %s' % self.__heartbeat_file
            log.debug('[Heartbeat] <HEARTBEAT>: %s:%s' % (self.__host, self.__heartbeat_file))
            common.ssh(self.__host, cmd)
            self.__stop_event.wait(self.__HEARTBEAT_INTERVAL)

    def stop(self):
        log.debug('[Heartbeat] Stop heartbeat: %s:%s' % (self.__host, self.__heartbeat_file))
        self.__stop_event.set()
        self.__heartbeat_thread.join()
        cmd = 'rm -f %s' % self.__heartbeat_file
        common.ssh(self.__host, cmd)

    def is_alive(self):
        result = None
        # current time - last modification time
        cmd = 'echo \\$((\\$(date +%%s)-\\$(stat %s -c %%Y)))' % self.__heartbeat_file
        try:
            status, output = common.ssh(self.__host, cmd)
            if status == 0:
                try:
                    last_heartbeat_second = int(output)
                    log.debug('[Heartbeat] Last heartbeat %s sec ago' % last_heartbeat_second)
                    if last_heartbeat_second <= 2 * self.__HEARTBEAT_INTERVAL:
                        result = True
                    else:
                        result = False
                except Exception:
                    result = None
        except Exception, e:
            log.error(e)
            result = False
        return result

    def is_any_other_living_instance(self):
        """
        Check if there is any other running MDSSync instances for the same peer host:port
        """
        is_running_instance = False
        existing_instances_str = None
        cmd = 'ls -l %s/%s.*' % (self.__heartbeat_file_dir, self.HEARTBEAT_FILE_PREFIX)
        for _ in xrange(self.__CHECK_INSTANCE_RETRY):
            status, output = common.ssh(self.__host, cmd)
            log.debug('[Heartbeat] ' + output)
            if status != 0:
                if 'No such file or directory' in output:
                    # no other heartbeat flag file
                    break
                else:
                    log.error('[Heartbeat] ' + output)
            else:
                if existing_instances_str != None and output != existing_instances_str:
                    # some heartbeat flag file is refreshed
                    # there is some other running instance
                    is_running_instance = True
                    break
                existing_instances_str = output
            time.sleep(self.__CHECK_INSTANCE_INTERVAL)

        return is_running_instance


class LargeFileTransfer(object):
    """
    Manage the split, transfer and restore of all large db files, in async way.
    """

    __SPLIT_BLOCK_TMP      = ".blocktmp"
    __SPLIT_FLAG_DONE      = ".done"
    
    __SPLIT_STATUS_INIT    = 0
    __SPLIT_STATUS_UPDATED = 1
    __SPLIT_STATUS_DONE    = 2
    
    __SPLIT_CHK_INTERVAL   = 10
    
    __SPLIT_THREAD_COUNT   = 2
    __RESTORE_THREAD_COUNT = 3


    def __init__(self, file_list, src_host, src_path, is_same_segment):
        super(LargeFileTransfer, self).__init__()
        self.__SPLIT_Q   = Queue.Queue()
        self.__RESTORE_Q = Queue.Queue()

        self.__SPLIT_THREADS    = []
        self.__RESTORE_THREADS  = []

        self.__file_list = file_list
        self.__src_host = src_host
        self.__src_path = src_path
        self.__is_same_segment = is_same_segment
        
        self.ASYNC_SPLIT_RESTORE_RESULT = True

        self.__init__threads()

    def __init__threads(self):
        for i in xrange(self.__SPLIT_THREAD_COUNT):
            t = threading.Thread(target = self.__split_transfer_file_worker)
            t.daemon = True
            t.start()
            self.__SPLIT_THREADS.append(t)
        for i in xrange(self.__RESTORE_THREAD_COUNT):
            # t = threading.Thread(target = self.__restore_file_worker)
            t = threading.Thread(target = self.__restore_block_worker)
            t.daemon = True
            t.start()
            self.__RESTORE_THREADS.append(t)

    def start(self):
        ret_code = True
        try:
            log.info("[LargeFileTransfer] Syncing large db files")
            for db_file in self.__file_list:
                src_file = os.path.join(self.__src_path, os.path.basename(db_file))
                log.info("[LargeFileTransfer] Start async transfer %s:%s to %s" %(self.__src_host, src_file, db_file))
                self.__add_new_split_restore_task(src_file, db_file)
        except Exception, e:
            log.error("[LargeFileTransfer] Exception when starting syncing large db files")
            log.error(e)
            ret_code = False
        self.__all_tasks_added()
        return ret_code

    def __add_new_split_restore_task(self, src_file, dest_file):
        dest_tmp_dir = os.path.basename(src_file) + SPLIT_DIR_SUFFIX
        src_tmp_dir = os.path.join(self.__src_path, SRC_MDSSYNC_TMP_DIR, dest_tmp_dir)

        self.__SPLIT_Q.put((src_file, src_tmp_dir, dest_tmp_dir, dest_file))

    def __split_transfer_file_worker(self):
        while True:
            (src_file, src_tmp_dir, dest_tmp_dir, dest_file) = self.__SPLIT_Q.get(True)
            if src_file == None or self.ASYNC_SPLIT_RESTORE_RESULT == False:
                self.__RESTORE_Q.put((None, None))
                break

            try:
                self.__start_split_file(src_file, src_tmp_dir)
                self.__transfer_split_file(src_tmp_dir, dest_tmp_dir, dest_file)
                # self.__RESTORE_Q.put((dest_tmp_dir, dest_file))
            except Exception, e:
                log.error(e)
                self.ASYNC_SPLIT_RESTORE_RESULT = False
                self.__RESTORE_Q.put((None, None))
                break
            
    def __start_split_file(self, src_file, src_tmp_dir):
        cmd = "%s -o %s --src_file %s --dest_dir %s -v > /dev/null 2>&1 &" % \
            (SPLIT_RESTORE_FILE_UTIL, SOURCE_UTIL_OP_SPLIT, src_file, src_tmp_dir)
        log.debug("[LargeFileTransfer] Run: %s" % cmd)
        status, _ = common.ssh(self.__src_host, cmd)
        if status != 0 and status != SingleInstance.RET_CODE_EXISTING_INSTANCE:
            raise Exception("Start split file %s:%s failed." % (self.__src_host, src_file))

    def __transfer_split_file(self, src_tmp_dir, dest_tmp_dir, dest_file):
        split_block_list = []
        status = self.__SPLIT_STATUS_INIT

        split_heartbeat = Heartbeat(self.__src_host, src_tmp_dir, instance_id = HEARTBEAT_ID_SPLIT)

        while status != self.__SPLIT_STATUS_DONE:
            status, split_block_list, new_block_list = self.__wait_for_split_update(src_tmp_dir, split_block_list, split_heartbeat)
            self.__transfer_split_blocks(src_tmp_dir, dest_tmp_dir, not self.__is_same_segment)
            for block in new_block_list:
                src_block = os.path.join(dest_tmp_dir, block)
                self.__RESTORE_Q.put((src_block, dest_file))
            
        log.info("[LargeFileTransfer] %s tranfered successfully." % dest_tmp_dir)

    def __wait_for_split_update(self, src_tmp_dir, split_block_list, split_heartbeat):
        """
          Check if split process has generated new block

          return: self.__SPLIT_STATUS_DONE if split is done
                  self.__SPLIT_STATUS_UPDATED is new block is generated but entire process is not done
                  raise exception if connection error or split time out
        """
        log.info("[LargeFileTransfer] Wait for split at %s:%s" % (self.__src_host, src_tmp_dir))
        status = self.__SPLIT_STATUS_INIT
        while True:
            time.sleep(self.__SPLIT_CHK_INTERVAL)
            cur_split_block_list = self.__get_split_block_list(src_tmp_dir)
            if split_heartbeat.is_alive() == False:
                break
            if split_block_list == cur_split_block_list:
                continue
            else:
                if self.__SPLIT_FLAG_DONE in cur_split_block_list:
                    log.info("[LargeFileTransfer] Split done at %s:%s" % (self.__src_host, src_tmp_dir))
                    cur_split_block_list.remove(self.__SPLIT_FLAG_DONE)
                    status = self.__SPLIT_STATUS_DONE
                else:
                    log.info("[LargeFileTransfer] Split updated at %s:%s" % (self.__src_host, src_tmp_dir))
                    status = self.__SPLIT_STATUS_UPDATED

                new_block_list = filter(lambda block: block not in split_block_list, cur_split_block_list)
                return status, cur_split_block_list, new_block_list

        # no new heartbeat is detected after certain time
        # split timeout
        raise Exception("Split time out for %s:%s" % (self.__src_host, src_tmp_dir))

    def __get_split_block_list(self, src_tmp_dir):
        log.debug("[LargeFileTransfer] Get split block list from %s:%s" % (self.__src_host, src_tmp_dir))
        cmd = "/bin/ls -1Atr %s -I %s -I %s*" % (src_tmp_dir, self.__SPLIT_BLOCK_TMP, Heartbeat.HEARTBEAT_FILE_PREFIX)
        log.debug("[LargeFileTransfer] Run: %s" % cmd)
        status, output = common.ssh(self.__src_host, cmd)
        if status != 0:
            raise Exception("Cannot get split status from %s:%s" % (self.__src_host, src_tmp_dir))
        else:
            block_list = output.split("\n")
            block_list = filter(None, block_list)
            for i in xrange(len(block_list)):
                block_list[i] = os.path.basename(block_list[i])
            return block_list

    def __transfer_split_blocks(self, src_tmp_dir, split_dir, bw_limit_flag):
        """
          rsync the split file blocks to current dir, retry if failed.
        """
        path = "%s:%s/*" % (self.__src_host, src_tmp_dir)
        command = ['rsync', '-avc', '--progress', '-e', 'ssh -o StrictHostKeyChecking=no', 
            '--exclude=%s' % self.__SPLIT_BLOCK_TMP, '--exclude=%s*' % Heartbeat.HEARTBEAT_FILE_PREFIX, path, split_dir+'/']
        if bw_limit_flag:
            command.insert(1, RSYNC_BANDWIDTH)
        result = rsyncWithRetry(command)
        if not result:
            raise Exception("Failed to transfer split blocks from %s:%s" % (self.__src_host, src_tmp_dir))

    def __restore_file_worker(self):
        while True:
            (split_dir, dest_file) = self.__RESTORE_Q.get(True)
            if split_dir == None or self.ASYNC_SPLIT_RESTORE_RESULT == False:
                break
            result = self.__restore_file(split_dir, dest_file)
            if not result:
                log.error("[LargeFileTransfer] Restore file %s failed." % dest_file)
                self.ASYNC_SPLIT_RESTORE_RESULT = False
                break

    def __restore_block_worker(self):
        while True:
            (src_block, dest_file) = self.__RESTORE_Q.get(True)
            if src_block == None or self.ASYNC_SPLIT_RESTORE_RESULT == False:
                break
            result = self.__restore_block(src_block, dest_file)
            if not result:
                log.error("[LargeFileTransfer] Restore file '%s' failed." % dest_file)
                self.ASYNC_SPLIT_RESTORE_RESULT = False
                break

    @staticmethod
    def __restore_file(split_dir, dest_file):
        """
            Restore the dest file from the given split directory
        """
        log.info("[LargeFileTransfer] Restore file: %s" % dest_file)
        file_restorer = SplitMerge(dest_file, split_dir)
        result = file_restorer.restore()
        if result == SplitMerge.RESULT_OK:
            log.info("[LargeFileTransfer] Restore file '%s' succeeded" % dest_file)
            return True
        else:
            log.error("[LargeFileTransfer] Failed to restore file '%s'" % dest_file)
            return False

    @staticmethod
    def __restore_block(src_block, dest_file):
        """
            Restore the dest file from the given src block
        """
        result = SplitMerge.restore_block(src_block, dest_file)
        if result == SplitMerge.RESULT_OK:
            log.info("[LargeFileTransfer] Restore block %s succeeded" % src_block)
            return True
        else:
            log.error("[LargeFileTransfer] Failed to restore block %s to %s" % (src_block, dest_file))
            return False


    def __all_tasks_added(self):
        for i in xrange(self.__SPLIT_THREAD_COUNT):
            self.__SPLIT_Q.put((None, None, None, None))

    def wait(self):
        for i in xrange(self.__SPLIT_THREAD_COUNT):
            self.__SPLIT_Q.put((None, None, None, None))
        for t in self.__SPLIT_THREADS:
            t.join()
        log.debug('[LargeFileTransfer] All split and transfer tasks done')
        for i in xrange(self.__RESTORE_THREAD_COUNT):
            self.__RESTORE_Q.put((None, None))
        for t in self.__RESTORE_THREADS:
            t.join()
        log.debug('[LargeFileTransfer] All restore tasks done')


def is_mds_stopped(port):

    """
    Is the MDS service stopped?
    """
    mds_svc_name = mds_service.get_mds_service_name_by_port(port)

    running = False
    # only sync if mds is stopped
    try:
        # The reason to check the running status is because when the mds is
        # disabled, the call is_svc_stopped() will have issue mis-reporting
        # service is running.
        running = service.is_svc_running(mds_svc_name)
    except Exception:
        running = False

    return not running

def is_mds_initialized(host, port):
    cmd = "mauisvcmgr -s mauimds -c mauimds_isNodeInitialized -m %s:%s | egrep -vi 'sending|running|true|^$'" \
        % (host, port)
    status, _ = commands.getstatusoutput(cmd)
    if status == 0:
        return False
    else:
        return True

def waitMDSInitialized(host, port):
    for i in range(INITED_RETRY_TIMES):
        time.sleep(INITED_RETRY_INTERVAL)
        if is_mds_initialized(host, port):
            return True
    return False

def set_mds_status(port, action):

    """
    enable or disable the MDS
    """
    if not action in ["enable", "disable"]:
        raise Exception("Invalid action")

    if port > 10600:
        service_name = 'mauiremoterep'
    elif port > 10400:
        service_name = 'mauimds'
    else:
        raise Exception('Invalid port number: %s' % port)

    log.info("[Status] %s MDS: %d" % (action, port))

    cmd = "service %s %s %s" % (service_name, action, port)

    status, output = commands.getstatusoutput(cmd)
    if status != 0:
        msg = "Failed to %s mds %s\n" % (action, port)
        msg += cmd + "\n"
        msg +=  output + "\n"
        raise Exception(msg)
    else:
        return True

def validateUUID(UUID, target_path):

    """
    From target_path extract the UUID and validate the UUID
    Example of env root path is here :
    "/mauimds-db/mds-6c1082d0-1fbd-492a-b75c-c2e8ffd51e21/master"
    """

    path_elements = target_path.split('/')
    if len(path_elements) > 3:
        fsud = path_elements[2]
    else:
        log.error("[Status] could not get fsud from target path %s" % target_path)
        return False
    
    # Check the config file to make sure if the UUID that is passed is the
    # same as the extracted UUID value in bdbxmlEnvRoot
    # If they are different then go to the next port
    if  UUID != "" and UUID != string.lstrip(fsud, "mds-"):
        log.error("[Status] fs uuid not consist in mds config file, skip this mds")
        return False

    return True


def validateAndCreateEnv(target_path, tmp_target_path):

    """
      Check the env root existence, create the env root if necessary.
    """

    # If target directory not empty then abort. This is because we do not want to erase anything from the target.
    # Abort it and let the CE folks figure out what to do.
    if os.path.isdir(target_path) and len(os.listdir(target_path)) != 0:
        log.error("[TmpEnv] path %s not empty, skip this mds. Please back up the directory before perform mdssync" % (target_path))
        return False

    # We are now using a tmp local env for transferring files
    # and then remane it to the target env afterward
    if os.path.exists(tmp_target_path):
        if os.path.isdir(tmp_target_path):
            return True
        else:
            log.error("[TmpEnv] tmp env %s exists but is not a directory")
            return False

    # If the tmp directory does not exist we go ahead and create it
    try:
        log.info("[TmpEnv] tmp env %s does not exist, creating a new dir" % (tmp_target_path))
        os.mkdir(tmp_target_path)
    except OSError, msg:
        log.error("[TmpEnv] Error creating the tmp env %s. Error Message:%s"%(tmp_target_path, msg) )               
        return False

    return True

def checkSameSegment(host):
    """
    Determine if the host is reachable via mgmt network interface to speed up file transport
    """

    mgmt_host = host + MGMT_HOST_SUFFIX
    cmd = "grep %s %s" % (mgmt_host, HOSTS_FILE)
    status, _ = commands.getstatusoutput(cmd)
    if status != 0:
        return False, host
    else:
        log.info("[Status] Use mgmt interface for transfer")
        return True, mgmt_host

def get_src_size(host, src_path):
    cmd = "/usr/bin/du -s %s" % src_path
    status, output = common.ssh(host, cmd)
    if status != 0:
        raise Exception("Cannot get size for %s:%s" % (host, src_path))
    else:
        output = output.split()
        try:
            src_size = int(output[0])
            return src_size
        except Exception:
            raise Exception("Failed to parse size for %s:%s" % (host, src_path))

def get_local_available_space(path):
    disk = os.statvfs(path)
    available_space = disk.f_bsize * disk.f_bavail / 1024
    return available_space

def get_available_space(host, path):
    cmd = "df %s" % path
    status, output = common.ssh(host, cmd)
    if status != 0:
        raise Exception("Cannot get available space for %s:%s" % (host, path))
    else:
        output = output.split('\n')[-1].split()
        try:
            available_space = int(output[3])
            return available_space
        except Exception:
            raise Exception("Failed to parse available space for %s:%s" % (host, path))

def check_local_available_disk_space(host, src_path, tmp_target_path):
    """
    Check if there is enough space on the current disk for rsync and db_recover
    """

    try:
        src_size = get_src_size(host, src_path)
        # available_space = get_local_available_space(".")
        available_space = get_available_space('localhost', tmp_target_path)
    except Exception, e:
        log.error(e)
        return False

    if available_space > src_size * DISK_SPACE_THRESHOLD_RATIO_LOCAL:
        return True
    elif available_space > src_size + EXTRA_DISP_SPACE:
        log.warn("[Status] LOW DISK SPACE!")
        if IGNORE_SPACE:
            log.warn('-=================== WARNNING ===================-')
            log.warn('| You are running mdssync with --ignore-space option.')
            log.warn('| Please make sure there is enough disk space on localhost.')
            log.warn('--------------------------------------------------')
            return True
        else:
            log.warn("[Status] Use '--ignore-space' option to force mdssync run when disk space is low")
            return False
    else:
        log.error("[Status] Not enough local disk space for mds sync")
        return False

def check_src_available_disk_space(host, src_path):
    try:
        src_size = get_src_size(host, src_path)
        available_space = get_available_space(host, src_path)
    except Exception, e:
        log.error(e)
        return False

    if available_space > src_size * DISK_SPACE_THRESHOLD_RATIO_SRC:
        return True
    elif IGNORE_SPACE:
        log.warn('-=================== WARNNING ===================-')
        log.warn('| You are running mdssync with --ignore-space option.')
        log.warn('| Please make sure there is enough disk space on')
        log.warn('| %s:%s' % (host, src_path))
        log.warn('--------------------------------------------------')
        return True
    else:
        log.warn("[Status] Use '--ignore-space' option to force mdssync run when src disk space is low")
        log.error("[Status] Not enough src disk space for mds sync on %s" % host)
        return False

def checkAvailableDiskSpace(host, src_path, tmp_target_path):
    return (check_local_available_disk_space(host, src_path, tmp_target_path)
        and check_src_available_disk_space(host, src_path))

def getSrcMDSDbFileList(src_host, src_dir):
    """
      Get the MDS DB file list from the src MDS src_host node regarding with
      src_dir.

      Return: a list with the file name as each element on success.
              None on failure.
    """

    cmd = "/usr/bin/du -b %s/* --exclude='%s' | grep -v total | grep -v log\\.* | grep -v __db\\.*" % (src_dir, SRC_MDSSYNC_TMP_DIR)
    status, output = common.ssh(src_host, cmd)
    if status != 0:
        log.error("[Status] Cannot get MDS DB file list from %s:%s" % (src_host, src_dir))
        return None
    else:
        try:
            file_list = output.split('\n')
            for i in range(len(file_list)):
                # parse the output of 'du -b' command and get the file name and file size
                # e.g. '16384   /mauimds-db/mds-4385d679-a907-45b7-acd7-121969e50caf/mds/mds_10402/master/<SOME_FILE>'
                attrs = file_list[i].split()
                file_list[i] = (int(attrs[0]), os.path.basename(attrs[1]))
            return file_list
        except Exception, msg:
            log.error(msg)
            log.error("[Status] Failed to get MDS DB file list") 
            return None

def getSrcMDSLogFileList(src_host, src_dir):
    """
      Get the MDS DB file list from the src MDS src_host node regarding with
      src_dir.

      Return: a list with the file name as each element on success.
              None on failure.
    """

    cmd = "/bin/ls %s/log.*" % (src_dir)
    status, output = common.ssh(src_host, cmd)
    if status != 0:
        log.error("[Status] Cannot get MDS DB file list from %s:%s" % (src_host, src_dir))
        return None
    else:
        file_list = output.split()
        return file_list

def getSmallAndLargeDBFileList(host, src_path):
    # get db file names and their sizes
    # handle large and small db files separately
    raw_file_tuple_list = getSrcMDSDbFileList(host, src_path)
    if raw_file_tuple_list is None:
        raise Exception("There was a problem. Fail to get MDS DB file from %s:%s" % (host, src_path))

    # saperate small and large db files
    large_db_file_list = []
    small_db_file_list = []
    for db_file_tuple in raw_file_tuple_list:
        if db_file_tuple[0] > SPLIT_FILE_SIZE_THRESHOLD:
            large_db_file_list.append(db_file_tuple[1])
        else:
            small_db_file_list.append(db_file_tuple[1])

    return small_db_file_list, large_db_file_list

def rsyncWithRetry(command):
    log.debug("[Rsync] NEW RSYNC COMMAND %s" % command)
    ret_code = False
    try:
        for try_time in range(RSYNC_MAX_RETRY_COUNT):
            p = subprocess.Popen(command)
            p.wait()

            if p.returncode == 0:
                log.debug("[Rsync] Rsync succeeded")
                ret_code = True
                break
            elif p.returncode == 24:
                logging.debug("[Rsync] Ignore error code: 24 vanished file error")
                ret_code = True
                break
            else:
                log.error("[Rsync] %d time rsync failed, error code: %s" % ((try_time+1), p.returncode))

    except Exception, e:
        log.error('[Rsync] %s' % e)

    return ret_code

def transferSmallDBFiles(exclude_db_file_list, host, src_path, is_same_segment):
    log.info("[SyncDbEnv] Syncing small db files")

    path = "%s:%s/*" % (host, src_path)
    command = ['rsync', '-avzc', '--block-size=16384', '--progress', '-e', 'ssh -o StrictHostKeyChecking=no', 
        '--exclude=log.*', '--exclude=__db.*', '--exclude=%s' % SRC_MDSSYNC_TMP_DIR, path, '.']
    if not is_same_segment:
        command.insert(1, RSYNC_BANDWIDTH)
    # exclude large db files
    for db_file in exclude_db_file_list:
        exclude_file = '--exclude=%s' % os.path.basename(db_file)
        command.insert(-2, exclude_file)

    return rsyncWithRetry(command)

def trigger_check_point(host, src_tmp_dir, port):
    cmd = "%s -o %s -p %s --dest_dir %s -v > /dev/null 2>&1 &" % \
        (SPLIT_RESTORE_FILE_UTIL, SOURCE_UTIL_OP_CHKPOINT, port, src_tmp_dir)
    log.debug('[Checkpoint] <RUN> %s' % cmd)
    status, output = common.ssh(host, cmd)
    if status == 0 or status == SingleInstance.RET_CODE_EXISTING_INSTANCE:
        log.info("[Checkpoint] Checkpoint started")
    else:
        log.error('[Checkpoint] ' + output)
        return False

    log.info('[Checkpoint] Wait for checkpoint')
    chkpoint_heartbeat = Heartbeat(host, src_tmp_dir, instance_id = HEARTBEAT_ID_CHKPOINT)
    while True:
        # wait some time for the first heartbeat
        time.sleep(CHKPOINT_CHK_INTERVAL)
        if not chkpoint_heartbeat.is_alive():
            break

    cmd = "ls %s" % os.path.join(src_tmp_dir, CHKPOINT_DONE_FLAG)
    status, _ = common.ssh(host, cmd)
    if status == 0:
        log.info("[Checkpoint] Checkpoint done")
    else:
        log.error('[Checkpoint] Checkpoint failed')
        return False

def transferLogFiles(host, src_path, is_same_segment):
    """
    Tar log files on source node,
    rsync the log tarball,
    and untar the log tarball on localhost
    """
    log.info("[LogFiles] Syncing log files")

    # <MDS ENV>/mdssync.d/logs.mdssync.tgz
    log_tarball_path = os.path.join(src_path, SRC_MDSSYNC_TMP_DIR, LOG_TARBALL)

    return tarLogFiles(host, src_path, log_tarball_path) and \
        rsyncLogFiles(host, log_tarball_path, is_same_segment) and \
        untarLogFiles()
    # the logs.tgz will be cleaned up later

def tarLogFiles(host, src_path, log_tarball_path):
    """
    Trigger tar log files on source node
    and wait for tar to complete
    """
    log.info("[LogFiles] Tar log files")

    # start tar log file on remote side
    cmd = "%s -o %s --src_path %s --log_tarball_path %s -v > /dev/null 2>&1 &" % \
        (SPLIT_RESTORE_FILE_UTIL, SOURCE_UTIL_OP_TAR, src_path, log_tarball_path)
    log.debug('[LogFiles] <RUN> %s' % cmd)
    status, output = common.ssh(host, cmd)
    if status == 0 or status == SingleInstance.RET_CODE_EXISTING_INSTANCE:
        log.info("[LogFiles] Tar log files started")
    else:
        log.error('[LogFiles] ' + output)
        return False

    # wait for tar to finish
    tarlog_heartbeat = Heartbeat(host, os.path.dirname(log_tarball_path), instance_id = HEARTBEAT_ID_TAR_LOG)
    # wait for the tar log process to complete
    while True:
        # wait some time for the first heartbeat
        time.sleep(TAR_LOG_CHK_INTERVAL)
        if not tarlog_heartbeat.is_alive():
            break

    done_flag = log_tarball_path + LOG_TAR_DONE_FLAG_SUFFIX
    cmd = "cat %s" % done_flag
    status, output = common.ssh(host, cmd)
    if status == 0 and output == FLAG_RESULT_SUCCESS:
        return True
    else:
        log.error('[LogFiles] ' + output)
        return False

def rsyncLogFiles(host, log_tarball_path, is_same_segment):
    """
    Rsync the log file tarball from suoce node
    """
    log.info("[LogFiles] Rsync log tarball")
    path = "%s:%s" % (host, log_tarball_path)
    command = ['rsync', '-avc', '--block-size=16384', '--progress', path, '.']
    if not is_same_segment:
        command.insert(1, RSYNC_BANDWIDTH)

    return rsyncWithRetry(command)

def untarLogFiles():
    """
    Untar the log file tarball on localhost
    """
    log.info("[LogFiles] Untar log tarball")

    cmd = "tar zxf %s" % LOG_TARBALL
    log.debug('[LogFiles] <RUN> %s' % cmd)
    status, output = commands.getstatusoutput(cmd)
    if status == 0:
        log.info("[LogFiles] Untar log files succeeded")
        return True
    else:
        log.error('[LogFiles] ' + output)
        return False

def rSyncDbEnvAndLogFiles(host, src_path):
    """
      Sync Db files and log files to current directory.
    """

    # check if the peer node is in the same IS
    is_same_segment, host = checkSameSegment(host)

    # first sync db files
    # handle large and small db files separately
    try:
        (_, large_db_file_list) = getSmallAndLargeDBFileList(host, src_path)
    
        # use split-merge for large db files
        # result = startTransferLargeDBFiles(large_db_file_list, host, src_path, is_same_segment)
        large_db_transer = LargeFileTransfer(large_db_file_list, host, src_path, is_same_segment)
        result = large_db_transer.start()
        if result == False:
            raise Exception("Failed to transfer large db files")

        if large_db_transer.ASYNC_SPLIT_RESTORE_RESULT == False:
            raise Exception("Failed to split and restore large db files")
        # use rsync for small db files
        # the large_db_file_list is passed to be excluded
        result = transferSmallDBFiles(large_db_file_list, host, src_path, is_same_segment)
        if result == False:
            raise Exception("Failed to transfer small db files")

        if large_db_transer.ASYNC_SPLIT_RESTORE_RESULT == False:
            raise Exception("Failed to split and restore large db files")
        # use rsync for log files
        result = transferLogFiles(host, src_path, is_same_segment)
        if result == False:
            raise Exception("Failed to transfer log files")

        log.info("[SyncDbEnv] Wait for restoring file blocks")
        large_db_transer.wait()
        if large_db_transer.ASYNC_SPLIT_RESTORE_RESULT == False:
            raise Exception("Failed to split and restore large db files")

        log.info("[SyncDbEnv] All file blocks restored successfully")

    except Exception, e:
        log.error('[SyncDbEnv] %s' % e)
        return False
    return True

def startMDSService(port):
    if NOSTART == False:
        mds_svc_name = mds_service.get_mds_service_name_by_port(port)
        ret_code = service.start_service(mds_svc_name, log)
        return ret_code

    return True

def getPeerMountPathFromHostAndPortnum(host, portnum):

    '''
      # Get the PEER mount path from the peer host and portnum
    '''

    cmd = "mauisvcmgr -s mauimds -c mauimds_getmnt -m %s:%d | grep -vi 'Sending command to' | awk -F : '{print $2}'" \
        % (host, portnum)
    status, src_path = commands.getstatusoutput(cmd)
    if status != 0:
        log.error("[Status] Failed to get mount point from peer, skip this mds")
        return False, None

    return True, src_path

def createSrcTempDir(host, src_tmp_dir):
    """Check and create the temp directory on peer node"""

    cmd = "src_tmp_dir=%s; test -d \\$src_tmp_dir || mkdir \\$src_tmp_dir" % src_tmp_dir
    status, output = common.ssh(host, cmd)
    if status == 0:
        log.info("[TmpEnv] Create temp dir on peer node succeeded")
        return True
    else:
        log.error('[TmpEnv] ' + output)
        return False

def cleanup_local_tmp_env(target_path):
    """
    Clean up tmp env on localhost
    """
    # clean up local tmp data first
    log.info("[TmpEnv] Clean up %s tmp directories on local host..." % SPLIT_DIR_SUFFIX)
    tmp_dirs = glob.glob("%s/*%s" % (target_path, SPLIT_DIR_SUFFIX))
    for split_dir in tmp_dirs:
        if os.path.isdir(split_dir):
            try:
                log.debug("[TmpEnv] Clean up split directory %s..." % split_dir)
                shutil.rmtree(split_dir)
            except Exception, e:
                log.error('[TmpEnv] %s' % e)
                log.warn("[TmpEnv] Clean up local tmp dir %s failed, please remove manually later" \
                    % split_dir)

    # clean up the log tarball
    log_tarball = "%s/%s" % (target_path, LOG_TARBALL)
    if os.path.isfile(log_tarball):
        log.debug("[TmpEnv] Clean up log tarball %s..." % log_tarball)
        os.remove(log_tarball)

def cleanup_remote_tmp_env(host, src_path):
    """
    Clean up tmp env on remote node
    """
    src_tmp_env = os.path.join(src_path, SRC_MDSSYNC_TMP_DIR)
    log.info("[TmpEnv] Clean up tmp directory at %s:%s..." % (host, src_tmp_env))
    cmd = "rm -rf %s" % (src_tmp_env)
    status, _ = common.ssh(host, cmd)
    if status != 0:
        log.warn("[TmpEnv] Clean up remote tmp directory %s:%s failed, please remove manually later" \
            % (host, src_tmp_env))

def _mdscfg_get_peer_port(peer_host, rep_port):
    """
    Get MDS port from replication port.
    Cannot use mauisvcmgr -s mauimds -c mauimds_getrepport in 2.1.x.
    The mauisvcmgr command will timeout
    when there is only one running mds in the set.
    Get the peer MDS port by grep the replication port
    and parse the path of the mds_cfg.xml config file:
    /etc/maui/mds/<port>/mds_cfg.xml
    """
    cmd = "grep %s /etc/maui/mds/*/mds_cfg.xml | grep replicationPort" % rep_port
    status, output = common.ssh(peer_host, cmd)
    if status:
        raise Exception('Failed to get mds port from peer host %s' % peer_host)
    peer_port = int(output.split('/')[4])
    return peer_port

def _mdscfg_get_mdsset_members_post_210(mds_config):
    """
    For Atmos version >= 2.1.0, 'mauisvcmgr -s mauimds -c mauimds_getMdsSet'
    will timeout when there is only one living MDS.
    Parse MDS set members from mds_cfg.xml (including remote MDS).
    Parse and return a list of MDS members (host, port) of the mds set
    or an empty list [] if failed to parse any MDS member
    """
    mdsset_members = []

    peer_host = mds_config.get_value('masterHost')
    rep_port = mds_config.get_value('masterReplicationPort')

    if rep_port == "0000":
        raise Exception("This mds has not been configured yet, ignore")

    peer_port = _mdscfg_get_peer_port(peer_host, rep_port)
    mdsset_members.append((peer_host, peer_port))
    log.debug('[Status] MDS set member: %s:%s' % (peer_host, peer_port))

    # 2nd slave MDS and/or remote MDS
    other_replicas_str = mds_config.get_value('otherReplicas')
    if other_replicas_str:
        for other_replica in other_replicas_str.split():
            [peer_host, rep_port] = other_replica.split(':')
            peer_port = _mdscfg_get_peer_port(peer_host, rep_port)
            mdsset_members.append((peer_host, peer_port))
            log.debug('[Status] MDS set member: %s:%s' % (peer_host, peer_port))

    return mdsset_members

def _mdscfg_get_mdsset_members_pre_210(mds_config):
    pass

def _mdscfg_get_mdsset_members(port):
    # get all mds sets with mauimdlsutil first
    cmd = 'mauimdlsutil -q -m "*"'
    status, output = commands.getstatusoutput(cmd)
    if status != 0:
        raise Exception('Failed to get system MDS set infomation')
    # split output by mds set
    mds_sets = output.split('\n\n')
    # find the mds set the current node is in
    mds_svc_name = '%s:%s' % (socket.gethostname(), port)
    mds_set = None
    for ms in mds_sets:
        if mds_svc_name in ms:
            mds_set = ms
    if not mds_set:
        raise Exception('Failed to get MDS set infomation for %s' % mds_svc_name)
    # parse mds members from string like:
    # MdsSet ID : a1206d97051677a8222579
    # Master Mds : l4-203-r1s1-001:10401:Shanghai
    # Replica Mds : l4-203-r1s1-002:10401:Shanghai
    # Replica Mds : l4-203-r2s1-001:10601:Beijing
    mdsset_members = []
    mds_re = re.compile(r' : (.+?):(\d+?):(.+?)')
    for mds_str in mds_set.split('\n'):
        match =  mds_re.search(mds_str)
        if match:
            mds_port = int(match.group(2))
            # check if it is a master, slave or remote
            if 'master' in mds_str.lower():
                mds_type = 'master'
            elif mds_port > 10600:
                mds_type = 'remote'
            elif mds_port > 10400:
                mds_type = 'slave'
            else:
                # mds port is invalid '0000', skip this mds
                log.debug('[Status] Skip MDS set member %s:%s' % (match.group(1), mds_port))
                continue
            mdsset_members.append((match.group(1), mds_port, mds_type))

    return mdsset_members

def _mdscfg_filter_running_master(mds_list):
    """
    Given a MDS (host, port) list,
    filter and return the running master (host, port)
    or None if there is no running master
    """
    running_master = (None, None)

    for mds in mds_list:
        cmd = "mauisvcmgr -s mauimds -c mauimds_ismaster -m %s:%s | grep mauimds_ismaster=yes" % mds[:2]
        status, _ = commands.getstatusoutput(cmd)
        if status == 0:
            running_master = mds[:2]
            log.debug('[Status] Running master: %s:%s' % running_master)
            break
    return running_master

def _mdscfg_filter_running_mds(mds_list, mds_type='slave'):
    """
    Given a MDS (host, port) list,
    filter and return a running remote (host, port)
    or None if there is no running remote
    """
    running_mds = (None, None)

    for mds in mds_list:
        if mds[2] == mds_type:
            cmd = "mauisvcmgr -s mauimds -c mauimds_isNodeInitialized -m %s:%s | grep Initialized=true" % mds[:2]
            status, _ = commands.getstatusoutput(cmd)
            if status == 0:
                running_mds = mds[:2]
                log.debug('[Status] Running %s mds: %s:%s' % (mds_type, running_mds[0], running_mds[1]))
                break
    return running_mds

def _mdscfg_get_db_env_and_running_peer(port):
    """
    Get the DB env, mds peer host and port
    """
    bad_ret = (False, None, None, None, None, None)
    try:
        ret_code = True
        from_remote = False
        mds_cfgfile = "/etc/maui/mds/%d/mds_cfg.xml" % port
        mds_config  = common.Properties(mds_cfgfile)
        db_env = mds_config.get_value('bdbxmlEnvRoot')
        if mds_config.get_value('replicaMode') == '3':
            is_3way = True
        else:
            is_3way = False

        set_members = _mdscfg_get_mdsset_members(port)

        (peer_host, peer_port) = _mdscfg_filter_running_master(set_members)
        if peer_host == None:
            # fallback to running mds if --force option is set
            log.warn('[Status] No running master mds')
            if FORCE_RUN:
                # log.warn('[Status] Running with --force option. Fallback to running mds')
                (peer_host, peer_port) = _mdscfg_filter_running_mds(set_members, 'slave')
                if not peer_host:
                    (peer_host, peer_port) = _mdscfg_filter_running_mds(set_members, 'remote')
                    from_remote = True
                if peer_host:
                    log.warn('-=================== WARNNING ===================-')
                    log.warn('| You are running mdssync with --force option.')
                    log.warn('| There is no running MDS master in the MDS set.')
                    log.warn('| Will sync form MDS %s:%s' % (peer_host, peer_port))
                    log.warn('| Please make sure MDS %s:%s is in healthy status' % (peer_host, peer_port))
                    log.warn('--------------------------------------------------')
                else:
                    log.error('[Status] No initialized mds in the current mds set')
            else:
                log.warn('[Status] Use --force option to sync from running slave/remote mds')

        if peer_host == None:
            ret_code = False
        return (ret_code, db_env, peer_host, peer_port, from_remote, is_3way)
    except Exception, e:
        log.error('[Status] %s' % e)
        return bad_ret

def do_port_sync(port, UUID):

    """
    #  Below are the steps involved in doing the port sync
    #1. Get the env root for the port and the mds peer host and port
    #2. Validate UUID
    #3. Get the peer env root
    #4. Check the env root existence. And create the local tmp env
    #5. Check and create tmp env on souce node
    #6. Check free disk space
    #7. rsync the DB from the peer to this node
    #8. Run db_recover -cv
    #9. Enable and start the service
    #10. Wait until MDS service is initialized
    #11. Clean up tmp env on both remote and local host
    """

    single_instance_lock = SingleInstance(flavor_id = port)

    # First, Make sure the mds is stopped
    ret_code = is_mds_stopped(port)
    if ret_code == False:
        log.error("[Status] MDS %d is running." % (port))
        return

    #1. Get the env root for the port and the mds peer host and port
    ret_code, target_path, host, portnum, from_remote, is_3way = _mdscfg_get_db_env_and_running_peer(port)
    if ret_code == False:
        # log.error("[Status] Failed to get valid peer host and port")
        return
    
    #2. Validate UUID
    ret_code = validateUUID(UUID, target_path)
    if ret_code == False:
        return

    #3. Get the peer env root
    # Now, get the PEER mount path from the peer host and portnum
    ret_code, src_path = getPeerMountPathFromHostAndPortnum(host, portnum)
    if ret_code == False:
        log.error("[Status] Failed to get mount path for host %s, port %d" % (host, portnum))
        return

    # use a temp env for db files and log files transfer
    # rename to target env when transfer is done
    tmp_target_path = target_path + LOCAL_TMP_ENV_SUFFIX
    #4. Check the env root existence. And create the local tmp env
    ret_code = validateAndCreateEnv(target_path, tmp_target_path)
    if ret_code == False:
        return
    
    src_tmp_dir = os.path.join(src_path, SRC_MDSSYNC_TMP_DIR)

    #5. Check and create tmp env on souce node
    ret_code = createSrcTempDir(host, src_tmp_dir)
    if ret_code == False:
        return

    # start client heartbeat
    client_heartbeat = Heartbeat(host, src_tmp_dir)
    client_heartbeat.start()

    log.info("Start to sync data from %s:%s to %s" % (host, src_path, tmp_target_path))

    #disable the MDS
    set_mds_status(port, "disable")

    # We "cd" to the target direcory and do "sync" and "dbrecover"
    os.chdir(tmp_target_path)

    #6. Check free disk space
    ret_code = checkAvailableDiskSpace(host, src_path, tmp_target_path)
    if ret_code == False:
        log.error("[Status] Disk space check failed")
        return

    # skip check point if syncing from remote mds
    if from_remote:
        log.info('[Status] Skip trigger check point')
    else:
        ret_code = trigger_check_point(host, src_tmp_dir, portnum)
        if ret_code == False:
            log.error("[Status] Do checkpoint on source host failed")
            return

    #7. rsync the DB from the peer to this node
    ret_code = rSyncDbEnvAndLogFiles(host, src_path)
    if ret_code == False:
        log.error("Failed to sync DB and log files")
        return

    # rename the tmp env to target env
    try:
        shutil.move(tmp_target_path, target_path)
    except Exception, e:
        log.error(e)
        log.error("Failed to rename tmp env")
        return

    #8. Run db_recover -cv
    log.info("Running DB recover for MDS %d" % (port))
    ret_code, console_string = commands.getstatusoutput("nice db_recover -cv")
    log.info(console_string)
    if ret_code != 0:
        log.error("db_recover failed")
        return

    #9. Enable and start the service
    # the mds disk replacement script will skip mds service start and chkconfig
    # so we need to do it here
    #enable the MDS 
    set_mds_status(port, "enable")
    # Now go ahead and start the MDS service and chkconfig it    
    ret_code = startMDSService(port)
    if ret_code == False:
        log.error("Error: Failed to start mds %s" % port)
        return

    #10. Wait until MDS service is initialized
    # in 3-way mds systems, if there is only 1 living local mds, it won't initialize
    # skip wait initialize when it is 3-way mds and there is only 1 living local mds (syncing from remote)
    if is_3way and from_remote:
        log.info('[Status] Skip waiting for MDS initialized')
    else:
        ret_code = waitMDSInitialized('localhost', port)
        if ret_code == False:
            log.error("Error: mds %s not initialized" % port)
            return

    #11. Clean up tmp env on both remote and local host
    cleanup_local_tmp_env(target_path)
    client_heartbeat.stop()

    if KEEP_SPLIT:
        # do not clean up split dir on source side
        log.warn('-=================== WARNNING ===================-')
        log.warn('| You are running mdssync with -k option.')
        log.warn('| Please manually clean up temp directory on source side:')
        log.warn('| %s:%s' % (host, os.path.join(src_path, SRC_MDSSYNC_TMP_DIR)))
        log.warn('--------------------------------------------------')
    elif not client_heartbeat.is_any_other_living_instance():
        cleanup_remote_tmp_env(host, src_path)

def do_drive_sync(UUID):

    """
    #Get all the ports for the node
    """

    proc = os.popen("/bin/ls /etc/maui/mds/ | grep 10[0-9][0-9][0-9]")

    # Run thru all the ports and do sync
    for line in proc.readlines():
        port = int(line.rstrip())  
        log.info("=== For port %d" % (port))
        do_port_sync(port, UUID)
    
 
def is_mounted(FSUUID):

    """
    # The FSUUID is checked to see if it is mounted, If it is not mounted we abort it. Let CE folks mount the FSUUID
    # We could have mounted FSUUID ourself, but for now let us leave that it CE folks.
    """

    log.info("FSUUID %s" % (FSUUID))
    cmd = "grep -q %s /proc/mounts" % (FSUUID)
    ret_code = os.system(cmd)
    if ret_code == 0:
        log.info("%s is mounted" % (FSUUID)) 
        return True
    else:
        log.error("abort: %s is not mounted" % (FSUUID))
        return False

def printVersionAndExit(program):
    print ""
    print "%s version number %s" % (program, VERSION)
    print ""

def printMdsSyncSteps():

    print ""
    print "#  Below are the steps involved in doing the port sync"
    print "#1. Get the env root for the port and the mds peer host and port"
    print "#2. Validate UUID"
    print "#3. Get the peer env root"
    print "#4. Check the env root existence. And create the local tmp env"
    print "#5. Check and create tmp env on souce node"
    print "#6. Check free disk space"
    print "#7. rsync the DB from the peer to this node"
    print "#8. Run db_recover -cv"
    print "#9. Enable and start the service"
    print "#10. Wait until MDS service is initialized"
    print "#11. Clean up tmp env on both remote and local host"
    print ""
    print ""

def deploy_split_restore_util():
    """
    temporary function to deploy split_restore_largefile.sh to all atmos node for QA.
    """
    cmd = "mauiscp %s %s" % (os.path.basename(SPLIT_RESTORE_FILE_UTIL), SPLIT_RESTORE_FILE_UTIL)
    log.info(cmd)
    status, _ = commands.getstatusoutput(cmd)
    return (status == 0)

def source_side_do_split_large_file(src_file, dest_dir):
    single_instance_lock = SingleInstance(flavor_id = os.path.basename(src_file) + '.split')
    # other split instances will exit at this point
    split_heartbeat = Heartbeat('localhost', dest_dir, instance_id = HEARTBEAT_ID_SPLIT)
    split_heartbeat.start()
    file_splitter = SplitMerge(src_file, dest_dir)
    ret_code = file_splitter.split()
    split_heartbeat.stop()
    return ret_code

def source_side_do_tar_log_file(src_path, log_tarball_path):
    flavor_id = src_path.replace("/", "-").replace(":", "").replace("\\", "-")
    single_instance_lock = SingleInstance(flavor_id = flavor_id + '.tarlog')
    # other tar log instances will exit at this point
    log.info("[SourceSide] Tar log files")

    tarlog_heartbeat = Heartbeat('localhost', os.path.dirname(log_tarball_path), instance_id = HEARTBEAT_ID_TAR_LOG)
    tarlog_heartbeat.start()

    done_flag = log_tarball_path + LOG_TAR_DONE_FLAG_SUFFIX

    # clear the done flag first
    if os.path.isfile(done_flag):
        # TODO: check the timestamp of the last instance
        #       to see if we can resuse the log tarball
        os.remove(done_flag)

    cmd = "cd %s; tar zcf %s log.*" % (src_path, log_tarball_path)
    log.debug('[SourceSide] <RUN> %s' % cmd)
    status, output = commands.getstatusoutput(cmd)
    if status == 0:
        ret_code = 0
        result_str = FLAG_RESULT_SUCCESS
        log.info("[SourceSide] Tar log files succeeded")
    else:
        ret_code = 1
        result_str = FLAG_RESULT_FAILED
        log.error('[SourceSide] ' + output)

    # set the flag file
    done_flag_file = open(done_flag, 'w')
    done_flag_file.write(result_str)
    done_flag_file.close()

    tarlog_heartbeat.stop()
    return ret_code

def source_side_do_checkpoint(port, tmp_env):
    single_instance_lock = SingleInstance(flavor_id = 'chkpoint.%s' % port)
    # other chkpoint instances will exit at this point

    log.info("[SourceSide] Trigger checkpoint for MDS %s" % port)
    
    ret_code = 0

    chkpoint_heartbeat = Heartbeat('localhost', tmp_env, instance_id = HEARTBEAT_ID_CHKPOINT)
    chkpoint_heartbeat.start()

    # clear the done flag file
    done_flag = os.path.join(tmp_env, CHKPOINT_DONE_FLAG)
    if os.path.isfile(done_flag):
        os.remove(done_flag)

    # do checkpoint CHKPOINT_NUM times
    cmd = 'mauisvcmgr -s mauimds -c triggercheckpoint -m localhost:%s' % port
    for i in xrange(CHKPOINT_NUM):
        # trigger checkpoint
        log.info('[SourceSide] Trigger checkpoint #%s' % (i+1))
        status, output = commands.getstatusoutput(cmd)
        if status != 0:
            ret_code = 1
            log.error('[SourceSide] ' + output)
            break
        # wait for CHKPOINT_SLEEP_SEC seconds
        log.info('[SourceSide] Wait for %s seconds' % CHKPOINT_SLEEP_SEC)
        time.sleep(CHKPOINT_SLEEP_SEC)

    # write checkpoint done flag file if success
    if ret_code == 0:
        log.info("[SourceSide] Done checkpoint for MDS %s" % port)
        done_flag_file = open(done_flag, 'w')
        done_flag_file.write(FLAG_RESULT_SUCCESS)
        done_flag_file.close()

    chkpoint_heartbeat.stop()

    return ret_code


if __name__ == "__main__":
    watcher = Watcher()

    usage = r'''
    %prog [options]

    %prog -u <UUID> [options] for drive sync
    %prog -p <PORT> [options] for port sync
    Example: %prog -p 10401
    '''

    parser = OptionParser(usage=usage)
    parser.add_option("-V", "--version", dest="version", action="store_true", help="for version")
    parser.add_option("-f", "--fast", dest="fast", action="store_true", help="for fast option.")
    parser.add_option("-s", "--stop", dest="stop", action="store_true", help="for don't start MDS option after rsync.")
    parser.add_option("-d", "--debug", dest="debug", action="store_true", help="enable debug mode")
    parser.add_option("-r", "--dryrun", dest="dryrun", action="store_true", help="for dryrun")
    parser.add_option("-u", "--UUID", dest="UUID", help="for drive sync.")
    parser.add_option("-p", "--port", dest="port", type="int", help="for syncing individual port")
    parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="more verbose message")
    parser.add_option("-k", "--keep", dest="keep_split", action="store_true", help="Keep the splitted data")
    parser.add_option("-P", "--path", dest="path", help="path of the mdssync.py script")
    parser.add_option("--ignore-space", dest="ignore_space", action="store_true", help="force run even if disk space low")
    parser.add_option("--force", dest="force_run", action="store_true", help="force run even if peer is not master")
    # options for running as source side util
    parser.add_option("-o", "--operation", dest="operation", help="[Source Side Option] 'chkpoint', 'split' or 'tar'")
    parser.add_option("--src_file", dest="src_file", help="[Source Side Option] <split> source file")
    parser.add_option("--dest_dir", dest="dest_dir", help="[Source Side Option]\n\
                                                                    \t\t<split> dest directory\
                                                                    \t\t<chkpoint> tmp env for heartbeat and flag file")
    parser.add_option("--src_path", dest="src_path", help="[Source Side Option] <tar> src directory")
    parser.add_option("--log_tarball_path", dest="log_tarball_path", help="[Source Side Option] <tar> destination tarball path")
    (options, args) = parser.parse_args()

    if options.version:
        printVersionAndExit(sys.argv[0])
        sys.exit(0)

    if options.fast:
        RSYNC_BANDWIDTH = '--bwlimit=10240'

    if options.stop:
        NOSTART = True
    if options.ignore_space:
        IGNORE_SPACE = True
    if options.force_run:
        FORCE_RUN = True

    if options.dryrun:
        printMdsSyncSteps()
        sys.exit(0)

    if options.dryrun:
        pdb.set_trace()

    if options.verbose:
        newlevel = logging.DEBUG
        log.setLevel(newlevel)
        for h in log.handlers:
            h.setLevel(newlevel)

    if options.keep_split:
        KEEP_SPLIT = True

    if options.path:
        SPLIT_RESTORE_FILE_UTIL = options.path

    if options.operation == SOURCE_UTIL_OP_SPLIT:
        if options.src_file and options.dest_dir:
            ret_code = source_side_do_split_large_file(options.src_file, options.dest_dir)
            sys.exit(ret_code)
        else:
            parser.print_help()
            sys.exit(1)

    elif options.operation == SOURCE_UTIL_OP_TAR:
        if options.src_path and options.log_tarball_path:
            ret_code = source_side_do_tar_log_file(options.src_path, options.log_tarball_path)
            sys.exit(ret_code)
        else:
            parser.print_help()
            sys.exit(1)

    elif options.operation == SOURCE_UTIL_OP_CHKPOINT:
        if options.port and options.dest_dir:
            ret_code = source_side_do_checkpoint(options.port, options.dest_dir)
            sys.exit(ret_code)
        else:
            parser.print_help()
            sys.exit(1)

    log.info("===== MDS SYNC STARTED =====")

    if options.debug:
        pdb.set_trace()

    if options.UUID:
        UUID = options.UUID

        ret_code = is_mounted(UUID)
        if ret_code == False:
            sys.exit(1)

        do_drive_sync(UUID)
        log.info("===== END =====")
        sys.exit(0)
    elif options.port:
        port = options.port
        log.info("===== sync MDS for port %d =====" % port)

        ret_code = mds_service.is_valid_mds_port(port)
        if ret_code == False:
            sys.exit(1)

        UUID = ""
        do_port_sync(port, UUID)
        log.info("===== END =====")
        sys.exit(0)
    else:
        parser.print_help()
        sys.exit(1)

    sys.exit(0)
