#!/usr/bin/python
# __CR__
# Copyright (c) 2008-2014 EMC Corporation
# All Rights Reserved
#
# This software contains the intellectual property of EMC Corporation
# or is licensed to EMC Corporation from third parties.  Use of this
# software and the intellectual property contained therein is expressly
# limited to the terms and conditions of the License Agreement under which
# it is provided by or on behalf of EMC.
# __CR__

# Change history
# Ver 5.0.1:
# - Fix bug[31016, 28516, 31100, 31101, 31331, 31270]
#
# Ver 5.0.2:
# - Fix bug[29176, 31655]

# Ver 5.0.4:
# - Fix bug[31934, 31902]

"""
Author: Atmos L4 team
Purpose: This tools provide the feature to bring up the MDS which is out of sync
with its running peer. Please refer README file for details
Usage: python %prog --help
Version: 5.0.4
"""
import logging
import os
import sys
import re
import errno
import glob
import shutil
import commands
import pdb
import string
import tempfile
import socket
import time
import Queue
import datetime
import subprocess
import threading

from optparse import OptionParser

# Atmos modules
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), "../../lib"))
import common
import mds_service
import service
import logging_ext

import mdssyncutility
from mdssyncutility import MdssyncThread, MdssyncException, MdssyncStatus, Status_Mgr

VERSION                          = "5.0.4"
CUR_VERSION                      = ""
HOSTS_FILE                       = "/etc/hosts"
MGMT_HOST_SUFFIX                 = "-mgmt"
SPLIT_DIR_SUFFIX                 = ".split"
SRC_TMP_ENV_PREFIX               = "mdssync.src."
LOCAL_TMP_ENV_PREFIX             = "mdssync.local."
LOG_FILE_LIST                    = 'mdssync.log.list'
STATUS_FILE                      = ".status"
# init_status for status_mgr
STATUS_LIST                      = ["last_transfer_log", "file_sync"]

# for space check
DISK_SPACE_THRESHOLD_RATIO_LOCAL = 1.5
DISK_SPACE_THRESHOLD_RATIO_SRC   = 0.2
# reserve extra 5GB disk space
EXTRA_DISK_SPACE                 = 5*1024*1024*1024

# only apply split-restore to files larger than 20GB
SPLIT_FILE_SIZE_THRESHOLD        = 20*1024*1024*1024
# default block is 16k
BLOCK_SIZE                       = 16384
# default chunk size is 10G
KILO_BYTE                        = 1024
MEGA_BYTE                        = 1048576
GIGA_BYTE                        = 1073741824
CHUNK_UNIT                       = GIGA_BYTE
CHUNK_SIZE                       = 5 * CHUNK_UNIT
# default source buff block count is 5
BUFF_BLOCK_COUNT                 = 5

# rsync parameter
RSYNC_CHECK_INTERVAL             = 10
RSYNC_MAX_RETRY_COUNT            = 10
RSYNC_BANDWIDTH                  = 5120

INITED_RETRY_TIMES               = 60
INITED_RETRY_INTERVAL            = 10

# ssh parameter
SSH_RETRY_COUNT                  = 5
SSH_TIMEOUT                      = 300

# ss destage parameter
SS_DESTAGE_DURATION              = 900
SS_CHEAK_INTERVAL                = 120

NOSTART                          = False
IGNORE_SPACE                     = False
FORCE_RUN                        = False
DEBUG_MODE                       = False
REUSE                            = False
RSYC_FAST                        = False

#init log file
LOGFILE = "/var/log/maui/mdssync.log"
LOG_ID = os.path.basename(sys.argv[0])+'full'
log = logging.getLogger(LOG_ID)
log = common.get_file_logger(LOGFILE, log.name)
log = common.get_console_logger(log.name)

class LogFileTransfer(MdssyncThread):
    """
    transfer log files
    """
    __LOG_CHECK_INTERVAL = 360
    def __init__(self, host, env, target_tmp_path, is_same_segment, status_mgr):
        super(LogFileTransfer, self).__init__()
        self.__host = host
        self.__src_path = env
        self.__target_tmp_path = target_tmp_path
        self.__is_same_segment = is_same_segment
        self.__status_mgr = status_mgr
        self.__bandwidth = RSYNC_BANDWIDTH

    def run(self):
        log.info("[Log] Start log file transfer")
        # transfer log every at intervals of self.__LOG_CHECK_INTERVAL
        while not self.stop_event.isSet():
            result = self.transfer_log_files()
            if result == False:
                self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.ELOG, "Failed to do log transfer")
                break
            time.sleep(self.__LOG_CHECK_INTERVAL)

        log.info("[Log] Stop log transfer")

    def get_src_log_files(self):
        """
        Get the log file list from the src MDS host node regarding with
        env.
        Return: a list with the log file name as each element on success.
              a empty list for no log file.
              None for failure.
        """
        # We use db_archive -l to get the active bdb logs
        # Whether or not they are involved in active transactions
        # we cannot simply increment the highest inactive log to get the lowest active log
        # because db_archive may output nothing
        # so we get the active bdb log by ALL_LOG_SET - ARCHIVED_LOG_SET

        # Get source side last archived log file before transfer db
        last_archived_log_file = self.__status_mgr.get_status("last_transfer_log")
        if last_archived_log_file == None:
            return None
        # Get all log files
        cmd = "db_archive -l -h %s" % (self.__src_path)
        status, output = common.ssh(self.__host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
        if status != 0:
            # if there is no log file
            # 'db_archive -l' get no log file, so there is no log file need to be synced.
            # Return []
            if "DB_ENV->log_archive: DB_NOTFOUND: No matching key/data pair found" in output:
                return []
            log.error("[LOG] Cannot get MDS log files list from %s:%s" % (self.__host, self.__src_path))
            return None
        # all_log_files has at least 1 element
        all_log_files = filter(None, output.split('\n'))
        all_log_files.sort()

        # Find the last archived log file index.
        # INdex smaller then last_archived_log_file's index are all archived log file.
        try:
            if last_archived_log_file != self.__status_mgr.EMPTY:
                start_point = all_log_files.index(last_archived_log_file)
            else:
                start_point = 0
        except:
            log.error('[Log] last archived log file \'%s\' on source MDS %s was rotated out', last_archived_log_file, self.__host)
            log.error('[Log] Target MDS may have gone out of sync')
            log.info('[Log] Please increase "bdbNumLogFiles" for peer mds on %s', self.__host)
            return None

        # Ensure all log files that need to be archived will be synced
        # The biggest LSN log file will be added to the tranfer_log_files.
        # to_transfer_log_files = all_log_files[start_point:]
        # E.g.
        # all_log_file = [0, 1, 3, 5, 7, 9]
        # start_point = 2
        # then, to_transfer_log_files = all_log_file[2:] =[3, 5, 7, 9]
        to_transfer_log_files = all_log_files[start_point:]
        return to_transfer_log_files

    def transfer_log_files(self):
        """
        transfer db logs using rsync
        use the original rsync method instead of tar-rsync-untar method
        return False for failure
               True  for success
        """
        log.info("[Log] Syncing log files")
        log_file_list = self.get_src_log_files()
        if log_file_list == None:
            return False

        # Parse the get_src_log_files result.
        # False for failure, [] for no log, and list for log files.
        log.info('[Log] First log file: %s' % log_file_list[0])
        log.info('[Log] Last log file:  %s' % log_file_list[-1])
        # we have cd'ed to target side tmp env
        # so LOG_FILE_LIST is in tmp env
        log_file_list_file = open(LOG_FILE_LIST, 'w')
        for log_file in log_file_list:
            log_file_list_file.write(log_file + '\n')
            log.debug('[Log] Log file to transfer: %s' % log_file)

        log_file_list_file.close()

        path = "%s:%s/" % (self.__host, self.__src_path)
        command = ['rsync', '-avz', '--progress', '--files-from=%s' % LOG_FILE_LIST, path, self.__target_tmp_path]
        if not self.__is_same_segment:
            command.insert(1, self.__bandwidth)
        result = rsync_with_retry(command)
        if result == True:
            setstatus_result = self.__status_mgr.set_status("last_transfer_log", log_file_list[-1])
            if setstatus_result == False:
                log.error("Failed to set status management status, key: %s, value: %s", "last_transfer_log", log_file_list[-1])
                return False
            return True
        return False


class FileTransfer(MdssyncThread):
    """
    Manage the split, transfer and restore of all large db files, in async way.
    """
    __Splitter_COUNT  = 2
    __TRANSFER_COUNT = 1
    __RESTORER_COUNT = 2
    __CHECK_INTERVAL = 10

    def __init__(self, src_host, src_path, src_split_tmp_dir, target_tmp_path, is_same_segment, status_mgr):
        super(FileTransfer, self).__init__()
        self.__file_sync_status_lock = threading.Lock()
        self.__split_queue = Queue.Queue()
        self.__transfer_queue = Queue.Queue()
        self.__restore_queue = Queue.Queue()

        self.__host = src_host
        self.__src_path = src_path
        self.__src_tmp_dir = src_split_tmp_dir
        self.__target_tmp_path = target_tmp_path
        self.__is_same_segment = is_same_segment

        self.__Splitter_thread_pool = []
        self.__transfer_thread_pool = []
        self.__restorer_thread_pool = []

        self.__status_mgr = status_mgr

    def start(self):
        """
        start the SplitMergeWorker
        """
        #1. get status of "file_sync"
        status = self.__status_mgr.get_status("file_sync")
        if status == self.__status_mgr.DONE:
            log.warning("[File] File sync completed, skip this step")
            self.set_thread_status(status = MdssyncStatus.SUCCESS_END)
            return

        #2. get file list
        log.info("[File] Start file sync")
        small_db_file_list, large_db_file_list = self.__get_small_and_large_db_file_list()
        if small_db_file_list == None:
            log.error("[File] Failed to get db file list")
            self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.EFILE, "File: Failed to get db file list")
            return

        #3. init work queue
        for large_db_file in large_db_file_list:
            self.__split_queue.put(large_db_file)
        for i in xrange(self.__Splitter_COUNT):
            self.__split_queue.put(None)

        for small_db_file in small_db_file_list:
            src_db_file = os.path.join(self.__src_path, small_db_file)
            dest_db_file = os.path.join(self.__target_tmp_path, small_db_file)
            self.__transfer_queue.put((src_db_file, dest_db_file, False, 1))

        #4. init Splitter transfer and restorer, and start them
        for i in xrange(self.__Splitter_COUNT):
            Splitter = Splitter(self.__split_queue, self.__transfer_queue, self.__host,
                self.__src_path, self.__src_tmp_dir, self.__target_tmp_path, self.__status_mgr)
            self.__Splitter_thread_pool.append(Splitter)
            Splitter.start()

        for i in xrange(self.__TRANSFER_COUNT):
            transfer = Transfer(self.__transfer_queue, self.__restore_queue, self.__host, self.__is_same_segment, self.__status_mgr)
            self.__transfer_thread_pool.append(transfer)
            transfer.start()

        for i in xrange(self.__RESTORER_COUNT):
            restorer = Restorer(self.__restore_queue, self.__host, self.__target_tmp_path, self.__status_mgr, self.__file_sync_status_lock)
            self.__restorer_thread_pool.append(restorer)
            restorer.start()
        super(FileTransfer, self).start()

    def run(self):
        """
        FileTransfer do file Transfer
        """
        while not self.stop_event.isSet():
            #1. Check Splitter status
            any_Splitter_alive = False
            Splitter_error = False
            for Splitter in self.__Splitter_thread_pool:
                Splitterstatus = Splitter.get_thread_status()
                if Splitterstatus.status == MdssyncStatus.RUNNING:
                    any_Splitter_alive = True
                # Error happen, break
                elif Splitterstatus.status == MdssyncStatus.ERR:
                    log.error("[File] Error occur in Splitter %s", Splitter)
                    self.set_thread_status(MdssyncStatus.ERR, Splitterstatus.errcode, Splitterstatus.errmessage)
                    Splitter_error = True
                    break
            if Splitter_error == True:
                break
            # If no Splitter is alive, add transfer end condition
            if any_Splitter_alive == False:
                log.debug("[File] All Splitter completed task")
                self.__transfer_queue.put((None, None, None, None))

            #2. Check transfer status
            any_transfer_alive = False
            transfer_error = False
            for transfer in self.__transfer_thread_pool:
                transferstatus = transfer.get_thread_status()
                if transferstatus.status == MdssyncStatus.RUNNING:
                    any_transfer_alive = True
                # Error happen, break
                if transferstatus.status == MdssyncStatus.ERR:
                    log.error("[File] Error occur in transfer %s", transfer)
                    self.set_thread_status(MdssyncStatus.ERR, transferstatus.errcode, transferstatus.errmessage)
                    transfer_error = True
                    break
            if transfer_error == True:
                break
            # If no transfer is alive, add restorer end condition
            if any_transfer_alive == False:
                log.debug("[File] All transfer completed task")
                for i in xrange(self.__RESTORER_COUNT):
                    self.__restore_queue.put((None, None, None))

            #3 Check restore status
            any_restorer_alive = False
            restore_error = False
            for restorer in self.__restorer_thread_pool:
                restorerstatus = restorer.get_thread_status()
                if restorerstatus.status == MdssyncStatus.RUNNING:
                    any_restorer_alive = True
                # Error happen, break
                if restorerstatus.status == MdssyncStatus.ERR:
                    log.error("[File] Error occur in restorer %s", restorer)
                    self.set_thread_status(MdssyncStatus.ERR, restorerstatus.errcode, restorerstatus.errmessage)
                    restore_error = True
                    break
            if restore_error == True:
                break
            # No restorer is alive. File transfer complete successfully
            if any_restorer_alive == False:
                log.info("[File] File transfer Completed")
                self.set_thread_status(MdssyncStatus.SUCCESS_END)
                setstatus_result = self.__status_mgr.set_status("file_sync", self.__status_mgr.DONE)
                if setstatus_result == False:
                    log.error("Failed to set status management status, key: %s, value: %s", "file_sync", self.__status_mgr.DONE)
                    self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.EFILE, "File: Failed to set status_mgr status")
                    break
            time.sleep(self.__CHECK_INTERVAL) ## TODO isSet()

    def stop(self):
        """
        stop thread
        """
        for Splitter in self.__Splitter_thread_pool:
            Splitter.stop()
        for transfer in self.__transfer_thread_pool:
            transfer.stop()
        for restorer in self.__restorer_thread_pool:
            restorer.stop()
        self.stop_event.set()

    def __get_src_mds_db_file_list(self):
        """
          Get the MDS DB file list from the src MDS src_host node regarding with
          src_dir.

          Return: a list with the file name as each element on success.
                  None on failure.
        """
        # ignore subdirectories, log files and __db* files
        # use 'du -b' to get size in bytes
        log.info('[SyncDbEnv] Src dir: %s:%s', self.__host, self.__src_path)
        cmd = "find %s -maxdepth 1 -not -type d -and -not -path '*__db*' -and -not -path '*log*' | xargs -r du -b" % (self.__src_path)
        status, output = common.ssh(self.__host, cmd, retry_count = SSH_RETRY_COUNT, timeout =SSH_TIMEOUT)
        if status != 0:
            log.error("[Status] Cannot get MDS DB file list from %s:%s" % (self.__host, self.__src_path))
            return None

        else:
            try:
                # if the output is None, file_list is []
                file_list = filter(None, output.split('\n'))
                for i in xrange(len(file_list)):
                    # parse the output of 'du -b' command and get the file name and file size
                    # e.g. '16384   /mauimds-db/mds-4385d679-a907-45b7-acd7-121969e50caf/mds/mds_10402/master/<SOME_FILE>'
                    attrs = file_list[i].split()
                    file_list[i] = (int(attrs[0]), os.path.basename(attrs[1]))
                return file_list
            except Exception, msg:
                log.error("[Status] Failed to get MDS DB file list")
                log.debug("error message: %s", msg)
                return None

    def __get_small_and_large_db_file_list(self):
        # get db file names and their sizes
        # handle large and small db files separately
        raw_file_tuple_list = self.__get_src_mds_db_file_list()
        if raw_file_tuple_list is None:
            log.error("There was a problem. Fail to get MDS DB file from %s:%s" % (self.__host, self.__src_path))
            return (None, None)
        # saperate small and large db files
        large_db_file_list = []
        small_db_file_list = []
        for db_file_tuple in raw_file_tuple_list:
            if db_file_tuple[0] > SPLIT_FILE_SIZE_THRESHOLD:
                large_db_file_list.append(db_file_tuple[1])
            else:
                small_db_file_list.append(db_file_tuple[1])
        # record the large db list
        return small_db_file_list, large_db_file_list


class Splitter(MdssyncThread):
    """
    Splitter to split large file
    """
    BLOCK_BASE_NAME = 'block'
    BLOCK_TMP_NAME  = '.blocktmp'
    __Splitter_WAIT_SECONDS  = 20

    def __init__(self, input_queue, output_queue, host, src_path, src_split_tmp_dir, target_tmp_path, status_mgr):
        super(Splitter, self).__init__()
        self.__input_queue = input_queue
        self.__output_queue = output_queue
        self.__host = host
        self.__src_path = src_path
        self.__src_tmp_dir = src_split_tmp_dir
        self.__target_tmp_path = target_tmp_path
        self.__status_mgr = status_mgr
        self.__count = CHUNK_SIZE / BLOCK_SIZE

    def run(self):
        log.info("[Splitter] Splitter is running")
        while not self.stop_event.isSet():
            db_file = self.__input_queue.get(True)
            if db_file == None:
                log.info("[Splitter] Completed all task")
                self.set_thread_status(status = MdssyncStatus.SUCCESS_END)
                break

            cur_sts = self.__status_mgr.get_status(db_file)
            if cur_sts != self.__status_mgr.DONE:
                log.info("[Splitter] Will split %s", db_file)
                self.__split_file(db_file, cur_sts)
            else:
                log.info("[Splitter] %s sync completed, skip this file", db_file)

    def __split_file(self, db_file, cur_sts):
        """
        split one db_file
        """
        offset = 0
        last_chunk = False
        src_file = os.path.join(self.__src_path, db_file)
        dest_file = os.path.join(self.__target_tmp_path, db_file)
        split_dir_name = db_file + SPLIT_DIR_SUFFIX
        src_split_tmp_dir = os.path.join(self.__src_tmp_dir, split_dir_name)
        target_block_tmp_dir = os.path.join(self.__target_tmp_path, split_dir_name)

        #create the work temp directory
        create_tmp_dir(self.__host, src_split_tmp_dir)
        create_tmp_dir('localhost', target_block_tmp_dir)

        if cur_sts != self.__status_mgr.NO_KEY:
            log.info("[Splitter] %s synced incompletely, will continue", db_file)
            map_bits = cur_sts.split(',')
            split_status = [int(map_bit) for map_bit in map_bits]
            chunk_count = len(split_status)
        else:
            log.info("[Splitter] Will sync %s", db_file)
            db_size = get_src_size(self.__host, src_file)
            if db_file == None:
                log.error("[Splitter] Failed to get size of %s:%s", self.__host, src_file)
                self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.ESPLIT, "Fail to split file %s"% db_file)
                self.stop()
                return
            chunk_count = db_size / CHUNK_SIZE
            if chunk_count * CHUNK_SIZE < db_size:
                chunk_count = chunk_count + 1
            split_status = [0 for i in xrange(chunk_count)]
            status_string = ','.join([str(i) for i in split_status])
            setstatus_result = self.__status_mgr.set_status(db_file, status_string)
            if setstatus_result == False:
                log.error("Failed to set status management status, key: %s, value: %s", db_file, status_string)
                self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.ESPLIT, "Split: Fail to set status")
                return

        while not last_chunk:
            # Danimic check db_size
            # If current sync size is smaller then the db_size, add more chunk
            db_size = get_src_size(self.__host, src_file)
            if db_file == None:
                log.error("[Splitter] Failed to get size of %s:%s", self.__host, src_file)
                self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.ESPLIT, "Fail to split file %s"% db_file)
                self.stop()
                return
            while chunk_count * CHUNK_SIZE <= db_size:
                split_status.append(0)
                chunk_count += 1

            # last chunk check
            if offset * CHUNK_SIZE > db_size:
                last_chunk = True

            # 1 in split_status[offset] means this chunk has been successfully restored. Needn't to sync again
            if offset < chunk_count and split_status[offset] == 0:
                # Check the buff count in the src_split_tmp_dir
                # If the block count is bigger then the BUFF_BLOCK_COUNT, wait and recheck
                while (self.__get_block_count(src_split_tmp_dir) >= BUFF_BLOCK_COUNT):
                    log.info("[Splitter] Buff block count is larger then %s, wait for %s seconds and recheck", BUFF_BLOCK_COUNT, self.__Splitter_WAIT_SECONDS)
                    time.sleep(self.__Splitter_WAIT_SECONDS)

                tmp_block = os.path.join(src_split_tmp_dir, self.BLOCK_TMP_NAME + '-' +str(offset))
                blockname = '%s-%s.gz' % (self.BLOCK_BASE_NAME, offset)
                log.info("[Splitter] Spliting file %s, offset %s into %s", src_file, offset, blockname)

                # last_chunk, need to transfer the all left file blocks
                if last_chunk == True:
                    cmd = 'dd if=%s bs=%s skip=%s | gzip > %s' \
                        % (src_file, BLOCK_SIZE, offset*self.__count, tmp_block)
                else:
                    cmd = 'dd if=%s bs=%s skip=%s count=%s | gzip > %s' \
                        % (src_file, BLOCK_SIZE, offset*self.__count, self.__count, tmp_block)

                log.debug("[Splitter] <Run> %s", cmd)
                # Run the command
                (status, output) = common.ssh(self.__host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
                if status != 0:
                    log.error("[Splitter] Failed to split file %s", db_file)
                    log.debug(output)
                    self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.ESPLIT, "Fail to split file %s"% db_file)
                    break

                src_block = os.path.join(src_split_tmp_dir, blockname)
                dest_block = os.path.join(target_block_tmp_dir, blockname)
                cmd = "mv %s %s"% (tmp_block, src_block)
                status, output = common.ssh(self.__host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
                self.__output_queue.put((src_block, dest_block, True, chunk_count))
            offset += 1

        log.info("[Splitter] Finish split file %s", db_file)

    def __get_block_count(self, src_split_tmp_dir):
        """
        get split block count on src work directory
        """
        cmd = "ls -l %s 2>/dev/null | grep '%s*\|%s*' | wc -l"% (self.BLOCK_BASE_NAME, self.BLOCK_TMP_NAME, src_split_tmp_dir) ##TOTO 2 > /dev/null
        log.debug("[__get_block_count] cmd %s", cmd)
        status, output = common.ssh(self.__host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
        if status != 0:
            log.error("Failed to get block number on %s:%s", self.__host, src_split_tmp_dir)
            return  BUFF_BLOCK_COUNT
        else:
            log.debug("[__get_block_count] output %s", output)
            try:
                count = int(output)
            except Exception, e:
                log.debug("[__get_block_count] error message: %s", e)
                count = 0
            return count


class Transfer(MdssyncThread):
    """
    Transfer file
    input_queue unit format: (src_block, dest_block, islarge_file, chunk_count)
    islarge_file: True or False
    """
    def __init__(self, input_queue, output_queue, host, is_same_segment, status_mgr):
        super(Transfer, self).__init__()
        self.__input_queue = input_queue
        self.__output_queue = output_queue
        self.__host = host
        self.__is_same_segment = is_same_segment
        self.__status_mgr = status_mgr
        self.__bandwidth = RSYNC_BANDWIDTH
    def run(self):
        log.info("[Transfer] transfer start")
        while not self.stop_event.isSet():
            (src_block, dest_block, islarge_file, chunk_count) = self.__input_queue.get(True)
            if src_block == None:
                self.set_thread_status(status = MdssyncStatus.SUCCESS_END)
                log.info("[Transfer] All task completed")
                break
            file_name = os.path.basename(dest_block)

            if self.__status_mgr.get_status(file_name) != self.__status_mgr.DONE:
                log.info("[Transfer] rsync %s:%s to %s", self.__host, src_block, dest_block)
                command = ['rsync', '-avz', '--progress', '-e', 'ssh -o StrictHostKeyChecking=no', '%s:%s'
                        % (self.__host, src_block), '%s'% os.path.dirname(dest_block)] #
                log.debug("[Transfer] <Run> %s", command)
                if not self.__is_same_segment:
                    command = command + self.__bandwidth

                result = rsync_with_retry(command)
                if not result:
                    log.error("[Transfer] Failed to transfer split blocks from %s:%s", self.__host, src_block)
                    self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.ETRANS, "Transfer: Failed to transfer split blocks from %s"% self.__host)
                    break

                if islarge_file:
                    self.__output_queue.put((src_block, dest_block, chunk_count))
                else:
                    setstatus_result = self.__status_mgr.set_status(file_name, self.__status_mgr.DONE)
                    if setstatus_result == False:
                        log.error("Failed to set status management status, key: %s, value: %s", file_name, self.__status_mgr.DONE)
                        self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.ETRANS, "Failed to set status_mgr status")
                        break
            else:
                log.info("[Transfer] %s has been successfully transfered, skip this file", file_name)


class Restorer(MdssyncThread):
    """
    Restorer: to restorer block
    input_queue unit format: (src_block, dest_block, chunk_count)
    """
    def __init__(self, input_queue, host, target_tmp_path, status_mgr, mutux_lock):
        super(Restorer, self).__init__()
        self.__input_queue = input_queue
        self.__host = host
        self.__target_tmp_path = target_tmp_path
        self.__status_mgr = status_mgr
        # __restore_dict restore the status for every db_file, key is db_file name value is status dict
        # status dict contain "chunk_count" and "status_map"
        self.__restore_dict = {}
        self.__restore_mutux_lock = mutux_lock
    def run(self):
        log.info("[Restorer] restorer start")
        while True:
            (src_block, dest_block, chunk_count) = self.__input_queue.get(True)
            if dest_block == None:
                log.info("[Restorer] Finshed restore blocks")
                self.set_thread_status(status = MdssyncStatus.SUCCESS_END)
                break
            blockname = os.path.basename(dest_block)

            # block-<block_num>.gz
            block_num_str = blockname.rsplit('-', 1)[-1].split('.', 1)[0]
            block_num = int(block_num_str)
            offset = block_num * CHUNK_SIZE
            seek = int(offset / BLOCK_SIZE)

            # dest_block format:
            # src_tmp_dir/db_filename.dbpostfix.split/blockname
            #pathsplit_list get [db_filename, dbpostfix, split]
            pathsplit_list = os.path.basename(os.path.dirname(dest_block)).split('.')
            db_file = '.'.join([pathsplit_list[0], pathsplit_list[1]])
            dest_file = os.path.join(self.__target_tmp_path, db_file)

            # use 'conv=notrunc' so that dd will not truncate db file
            # when it is filling a block in the middle of the file
            log.info("[Restorer] Restore block %s into file %s", dest_block, db_file)
            cmd = 'gunzip -c %s | dd conv=notrunc bs=%s seek=%s of=%s' \
                % (dest_block, BLOCK_SIZE, seek, dest_file)
            log.debug('[Restorer] <RUN> %s', cmd)
            status, _ = commands.getstatusoutput(cmd)
            if status != 0:
                log.error('[Restorer] Failed to restore block %s to file %s', dest_block, dest_file)
                self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.EMERGE, "Failed to restore block for %s"% db_file)
                break
            # remove the src block
            cmd = "rm %s -f"% src_block
            status, output = common.ssh(self.__host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
            if status != 0:
                self.result = mdssyncutility.EMERGE
                log.error("[Restorer] Failed to remove the block %s:%s", self.__host, src_block) 
                break

            log.info("[Restorer] Restore block %s successfully", dest_block)

            # Update the status
            self.__restore_mutux_lock.acquire()
            value = self.__status_mgr.get_status(db_file)
            if value != self.__status_mgr.NO_KEY:
                map_bits = value.split(',')
                status_map = [int(map_bit) for map_bit in map_bits]
                self.__restore_dict[db_file] = {"chunk_count": len(status_map),
                                                "status_map": status_map}
            else:
                log.debug("[Restorer] Failed to get value in status_file")
                self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.EMERGE, "progress error")
                self.__restore_mutux_lock.release()
                break
            # update the chunk_count to the newlyest count
            if self.__restore_dict[db_file]["chunk_count"] < chunk_count:
                self.__restore_dict[db_file]["chunk_count"] = chunk_count
                while len(self.__restore_dict[db_file]["status_map"] < chunk_count):
                    self.__restore_dict[db_file]["status_map"].append(0)

            self.__restore_dict[db_file]["status_map"][block_num] = 1
            if sum(self.__restore_dict[db_file]["status_map"]) == self.__restore_dict[db_file]["chunk_count"]:
                setstatus_result = self.__status_mgr.set_status(db_file, self.__status_mgr.DONE)
                if setstatus_result == False:
                    log.error("Failed to set status management status, key: %s, value: %s", db_file, self.__status_mgr.DONE)
                    self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.EMERGE, "Restorer: Failed to set status")
                    break
            else:
                status_string = ','.join([str(i) for i in self.__restore_dict[db_file]["status_map"]])
                setstatus_result = self.__status_mgr.set_status(db_file, status_string)
                if setstatus_result == False:
                    log.error("Failed to set status management status, key: %s, value: %s", db_file, status_string)
                    self.set_thread_status(MdssyncStatus.ERR, mdssyncutility.EMERGE, "Restorer: Failed to set status")
                    break

            self.__restore_mutux_lock.release()

class SystemChecker(object):
    """
    Do system check every __CHECK_INTERVAL
    """
    __RETRY_COUNT = 5
    __RETRY_INTERVAL = 2
    def __init__(self):
        self.transfer_restore_thread_pool = []
        self.stop_event = threading.Event()

    def add_check_job(self, do, interval, *arg):
        log.debug('[SystemChecker] Add job do %s args %s', do, arg)
        check_thread = threading.Thread(target = self.__check_worker, args = (do,  interval, arg))
        self.transfer_restore_thread_pool.append(check_thread)

    def start(self):
        log.info('[SystemChecker] Start')
        for check_thread in self.transfer_restore_thread_pool:
            check_thread.start()

    def __check_worker(self, do, interval, args):
        result = True
        while not self.stop_event.isSet():
            for retry in xrange(self.__RETRY_COUNT):
                log.debug('[SystemChecker] %s times try do %s with args%s', retry, do, args)
                (status, output) = do(*args)
                if status == 0:
                    result = True
                    break
                else:
                    log.warn('[SystemChecker]<FAILED> %s times do %s with args%s, output %s', retry, do, args, status)
                    time.sleep(self.__RETRY_INTERVAL)
                    result = False

            if result == False:
                log.error("[SystemChecker] FAILED to do %s with args %s", do, args)

            self.stop_event.wait(interval)

    def stop(self):
        log.debug('[SystemChecker] Stop')
        self.stop_event.set()
        for check_thread in self.transfer_restore_thread_pool:
            check_thread.join()

def run_command(cmd):
    """
    run command and return status and output
    """
    log.debug("[SystemChecker] run cmd %s", cmd)
    status, output = commands.getstatusoutput(cmd)
    return (status, output)

def set_log_formatter():
    """
    Set a tag in logging format string to help to identify the logs from different operations
    e.g.
    2013-05-10 18:50:00,680 mdssync.py:1824 - PID:30087: - INFO: [CLIENT]: ===== MDS SYNC STARTED =====
    2013-05-21 16:30:12,231 mdssync.py:302 - PID:19275: - INFO: [SPLIT]: [SplitMerge] Split block #91. File: xxx
    2013-05-06 17:38:16,920 mdssync.py:1695 - PID:29578: - INFO: [CHECKLSN]: Trigger checklsn for MDS 10404
    """

    long_log_format = "%(asctime)s %(filename)s:%(lineno)d - PID:%(process)d: - %(levelname)s: %(message)s"
    long_formatter = logging.Formatter(long_log_format)
    short_format = "%(levelname)s: %(message)s"
    short_formatter = logging_ext.ColoredFormatter(short_format)

    for handler in log.handlers:
        if isinstance(handler, logging.FileHandler):
            handler.setFormatter(long_formatter)
        else:
            handler.setFormatter(short_formatter)

def check_maui_version(version_str):
    """
    Check whether this is a valid version string
    """
    descs = version_str.split(".")
    if(len(descs) != 5 and len(descs) != 4):
        return False
    else:
        for desc in descs:
            if not desc.isdigit():
                return False
    return True

def get_maui_version(host = "127.0.0.1"):
    """
    Get maui version of host, by " cat /etc/maui/version "
    """
    cmd = "test -f /etc/maui/maui_version && cat /etc/maui/maui_version | sed -e 's/-b/\\./'"
    ret = ""
    status, output = common.ssh(host, cmd, retry_count=3)
    if(status != 0):
        raise Exception("Failed to get version on %s: %s" % (host, output))
    else:
        version_str = output.strip()
        if(check_maui_version(version_str)):
            ret = version_str
        else:
            msg = "ERROR: Invalid Version string %s" % version_str
            raise Exception(msg)

    return ret

def start_mds_service(port):
    mds_svc_name = mds_service.get_mds_service_name_by_port(port)
    ret_code = service.start_service(mds_svc_name, log)
    return ret_code

def get_src_size(host, src_path):
    """
    Get the size of db env on peer node
    """
    # get output in bytes
    cmd = "/usr/bin/du -b -s %s" % src_path
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status != 0:
        log.error("Cannot get size for %s:%s" % (host, src_path))
        return None
    else:
        output = output.split()
        try:
            src_size = int(output[0])
            return src_size
        except Exception:
            log.error("Failed to parse size for %s:%s" % (host, src_path))
            return None

def get_available_space(host, path):
    # get available disk space in byte
    cmd = "df -B1 %s" % path
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status != 0:
        raise Exception("Cannot get available space for %s:%s" % (host, path))
    else:
        output = output.split('\n')[-1].split()
        try:
            available_space = int(output[3])
            return available_space
        except Exception:
            raise Exception("Failed to parse available space for %s:%s" % (host, path))

#TODO merge function check_local_available_disk_space and check_src_available_disk_space
def check_local_available_disk_space(host, src_path, tmp_target_path, ignore_space = False):
    """
    Check if there is enough space on the current disk for rsync and db_recover
    """

    src_size = get_src_size(host, src_path)
    if src_size == None:
        log.error("Failed to get size of %s:%s", host, src_path)
        return False
    available_space = get_available_space(host, src_path)
    log.info('[Status] Source size: %s bytes' % src_size)
    log.info('[Status] Local available space: %s bytes' % available_space)

    if available_space > src_size * DISK_SPACE_THRESHOLD_RATIO_LOCAL:
        return True
    elif available_space > src_size + EXTRA_DISK_SPACE:
        log.warn("[Status] LOW DISK SPACE!")
        if ignore_space:
            log.warn('-=================== WARNNING ===================-')
            log.warn('| You are running mdssync with --ignore-space option.')
            log.warn('| Please make sure there is enough disk space on localhost.')
            log.warn('--------------------------------------------------')
            return True
        else:
            log.warn("[Status] Use '--ignore-space' option to force mdssync run when disk space is low")
            return False
    else:
        log.error("[Status] Not enough local disk space for mds sync")
        return False

def check_src_available_disk_space(host, src_path, ignore_space = False):
    """
    Check if there is enough space on the current disk for rsync and db_recover
    """
    src_size = get_src_size(host, src_path)
    if src_size == None:
        log.error("Failed to get size of %s:%s", host, src_path)
        return False
    available_space = get_available_space(host, src_path)
    log.info('[Status] Source size: %s bytes' % src_size)
    log.info('[Status] Source available space: %s bytes' % available_space)

    if available_space > src_size * DISK_SPACE_THRESHOLD_RATIO_SRC:
        return True
    elif ignore_space:
        log.warn('-=================== WARNNING ===================-')
        log.warn('| You are running mdssync with --ignore-space option.')
        log.warn('| Please make sure there is enough disk space on')
        log.warn('| %s:%s' % (host, src_path))
        log.warn('--------------------------------------------------')
        return True
    else:
        log.warn("[Status] Use '--ignore-space' option to force mdssync run when src disk space is low")
        log.error("[Status] Not enough src disk space for mds sync on %s" % host)
        return False


def validateUUID(UUID, target_path):

    """
    From target_path extract the UUID and validate the UUID
    Example of env root path is here :
    "/mauimds-db/mds-6c1082d0-1fbd-492a-b75c-c2e8ffd51e21/master"
    """

    path_elements = target_path.split('/')
    if len(path_elements) > 3:
        fsud = path_elements[2]
    else:
        log.error("[Status] could not get fsud from target path %s" % target_path)
        return False

    # Check the config file to make sure if the UUID that is passed is the
    # same as the extracted UUID value in bdbxmlEnvRoot
    # If they are different then go to the next port
    if  UUID != "" and UUID != string.lstrip(fsud, "mds-"):
        log.error("[Status] fs uuid not consist in mds config file, skip this mds")
        return False

    return True

def check_same_segment(host):
    """
    Determine if the host is reachable via mgmt network interface to speed up file transport
    """
    mgmt_host = host + MGMT_HOST_SUFFIX
    cmd = "grep -e ' %s$' %s" % (mgmt_host, HOSTS_FILE)
    status, _ = commands.getstatusoutput(cmd)
    if status != 0:
        return False, host
    else:
        log.info("[Status] Use mgmt interface for transfer")
        return True, mgmt_host

def is_mds_stopped(port):

    """
    Is the MDS service stopped?
    """
    mds_svc_name = mds_service.get_mds_service_name_by_port(port)
    running = False
    # only sync if mds is stopped
    try:
        # The reason to check the running status is because when the mds is
        # disabled, the call is_svc_stopped() will have issue mis-reporting
        # service is running.
        running = service.is_svc_running(mds_svc_name)
    except Exception:
        running = False

    return not running

def is_mds_initialized(host, port):
    cmd = "mauisvcmgr -s mauimds -c mauimds_isNodeInitialized -m %s:%s | egrep -vi 'sending|running|true|^$'" \
        % (host, port)
    status, _ = commands.getstatusoutput(cmd)
    if status == 0:
        return False
    else:
        return True

def wait_mds_initialized(host, port):
    log.info('[Status] Wait for MDS %s to be initialized' % port)
    for i in range(INITED_RETRY_TIMES):
        time.sleep(INITED_RETRY_INTERVAL)
        if is_mds_initialized(host, port):
            return True
    return False

def set_mds_status(port, action):

    """
    enable or disable the MDS
    """
    if not action in ["enable", "disable"]:
        raise Exception("Invalid action")

    if mds_service.is_remote_mds_port(port):
        service_name = 'mauiremoterep'
    elif mds_service.is_valid_mds_port(port):
        service_name = 'mauimds'
    else:
        raise Exception('Invalid port number: %s' % port)

    log.info("[Status] %s MDS: %d" % (action, port))

    cmd = "service %s %s %s" % (service_name, action, port)

    status, output = commands.getstatusoutput(cmd)
    if status != 0:
        msg = "Failed to %s mds %s\n" % (action, port)
        msg += cmd + "\n"
        msg +=  output + "\n"
        raise Exception(msg)
    else:
        return True

def get_db_env_by_port(host, portnum):
    """
      Get the PEER mount path from the peer host and portnum
    """
    # Get the peer mount path
    cmd = "grep EnvRoot /etc/maui/mds/%s/mds_cfg.xml "% portnum
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status != 0:
        log.error("[Status] Failed to get mount point from peer, skip this mds")
        return False, None
    src_path = output.split('"')[3]

    log.info("%s:%s, db env %s", host, portnum, src_path)

    # Test if the src_path exists
    cmd = "test -d %s"% src_path
    status, output = common.ssh(host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status != 0:
        log.error("[Status] Db env %s for %s:%s does not exist", src_path, host, portnum)
        return False, None
    return True, src_path

def _mdscfg_get_peer_port(peer_host, rep_port):
    """
    Get MDS port from replication port.
    Cannot use mauisvcmgr -s mauimds -c mauimds_getrepport in 2.1.x.
    The mauisvcmgr command will timeout
    when there is only one running mds in the set.
    Get the peer MDS port by grep the replication port
    and parse the path of the mds_cfg.xml config file:
    /etc/maui/mds/<port>/mds_cfg.xml
    """
    cmd = "grep %s /etc/maui/mds/*/mds_cfg.xml | grep replicationPort" % rep_port
    status, output = common.ssh(peer_host, cmd, retry_count=SSH_RETRY_COUNT, timeout=SSH_TIMEOUT)
    if status != 0:
        log.error(output)
        raise Exception('Failed to get mds port from peer host %s' % peer_host)
    peer_port = int(output.split('/')[4])
    return peer_port

def _mdscfg_get_mdsset_members(mds_config):
    """
    Parse and return a list of MDS members (host, port) of the mds set
    or an empty list [] if failed to parse any MDS member
    """
    mdsset_members = []

    peer_host = mds_config.get_value('masterHost')
    rep_port = mds_config.get_value('masterReplicationPort')

    if  rep_port == "0000":
        raise Exception("This mds has not been configured yet, ignore")

    try:
        peer_port = _mdscfg_get_peer_port(peer_host, rep_port)
        mdsset_members.append((peer_host, peer_port))
        log.info('[Status] MDS set member: %s:%s' % (peer_host, peer_port))
    except Exception, e:
        # this node may be down
        log.error(e)

    # 2nd slave MDS and/or remote MDS
    other_replicas_str = mds_config.get_value('otherReplicas')
    if other_replicas_str:
        for other_replica in other_replicas_str.split():
            [peer_host, rep_port] = other_replica.split(':')
            if rep_port.startswith('105'):
                try:
                    peer_port = _mdscfg_get_peer_port(peer_host, rep_port)
                    mdsset_members.append((peer_host, peer_port))
                    log.info('[Status] MDS set member: %s:%s' % (peer_host, peer_port))
                except Exception, e:
                    # this node may be down
                    log.error(e)

    # for Atmos before version 2.1.0, there is no 'otherReplicas' field in mds_cfg.xml
    # if we are on a remote node, we can only see the master host from mds_cfg.xml
    # we should parse the mds_cfg.xml of the master node
    # to get the 3rd (slave) MDS
    replication_port = mds_config.get_value('replicationPort')
    if replication_port.startswith('107'):
        # it is a remote MDS
        try:
            if get_maui_version() < '2.1':
                mds_cfgfile = '/etc/maui/mds/%s/mds_cfg.xml' % peer_port
                cmd = 'cat %s' % mds_cfgfile
                status, output = common.ssh(peer_host, cmd)
                if status == 0:
                    # this recursion will only happen on a remote node
                    # the recursion will stop when called for a local MDS
                    # so the max recursion depth is 2
                    xmlconfObj = mdssyncutility.Properties(output)
                    mdsset_members.extend(_mdscfg_get_mdsset_members(xmlconfObj))
                else:
                    raise Exception('Failed to parse mds cfg file from %s:%s' % (peer_host, peer_port))
        except Exception, e:
            log.error(e)

    return list(set(mdsset_members))

def _mdscfg_filter_running_master(mds_list):
    """
    Given a MDS (host, port) list,
    filter and return the running master (host, port)
    or None if there is no running master
    """
    running_master = (None, None)

    for mds in mds_list:
        cmd = "mauisvcmgr -s mauimds -c mauimds_ismaster -m %s:%s | grep mauimds_ismaster=yes" % mds
        status, _ = commands.getstatusoutput(cmd)
        if status == 0:
            running_master = mds
            log.info('[Status] Running master: %s:%s' % mds)
            break
    return running_master

def _mdscfg_filter_running_mds(mds_list):
    """
    Given a MDS (host, port) list,
    filter and return a running mds (host, port)
    or None if there is no running mds
    """
    running_mds = (None, None)

    for mds in mds_list:
        cmd = "mauisvcmgr -s mauimds -c mauimds_isNodeInitialized -m %s:%s | grep Initialized=true" % mds
        status, _ = commands.getstatusoutput(cmd)
        if status == 0:
            running_mds = mds
            log.info('[Status] Running mds: %s:%s' % mds)
            break
    return running_mds

def _mdscfg_get_db_env_and_running_peer(port, forcerun = False):
    """
    Get the DB env, mds peer host and port
    """
    bad_ret = (False, None, None, None)
    try:
        ret_code = True
        mds_cfgfile = "/etc/maui/mds/%d/mds_cfg.xml" % port
        mds_config  = common.Properties(mds_cfgfile)
        db_env = mds_config.get_value('bdbxmlEnvRoot')

        set_members = _mdscfg_get_mdsset_members(mds_config)

        (peer_host, peer_port) = _mdscfg_filter_running_master(set_members)
        if peer_host == None:
            # failback to running mds if --force option is set
            log.warn('[Status] No running master mds')
            if forcerun:
                # log.warn('[Status] Running with --force option. Failback to running slave MDS')
                (peer_host, peer_port) = _mdscfg_filter_running_mds(set_members)
                if peer_host:
                    log.warn('-=================== WARNNING ===================-')
                    log.warn('| You are running mdssync with --force option.')
                    log.warn('| There is no running MDS master in the MDS set.')
                    log.warn('| Will sync form MDS %s:%s' % (peer_host, peer_port))
                    log.warn('| Please make sure MDS %s:%s is in healthy status' % (peer_host, peer_port))
                    log.warn('--------------------------------------------------')
                else:
                    log.error('[Status] No initialized local MDS in the current MDS set')
                    log.error('[Status] Please escalate to L4')
            else:
                log.warn('[Status] Use --force option to sync from running slave MDS')

        if peer_host == None:
            ret_code = False
        return (ret_code, db_env, peer_host, peer_port)
    except Exception, e:
        log.error('[Status] %s' % e)
        return bad_ret

def rsync_with_retry(command):
    log.info("[Rsync] New rsync command %s" % command)
    ret_code = False
    try:
        for try_time in range(RSYNC_MAX_RETRY_COUNT):
            p = subprocess.Popen(command)
            p.wait()

            if p.returncode == 0:
                log.info("[Rsync] Rsync succeeded")
                ret_code = True
                break
            elif p.returncode == 24:
                log.warn("[Rsync] Ignore error code: 24 vanished file error")
                ret_code = True
                break
            else:
                log.error("[Rsync] %d time rsync failed, error code: %s" % ((try_time+1), p.returncode))

    except Exception, e:
        log.error('[Rsync] %s' % e)

    return ret_code

def check_instance_exist(host, pid):
    """
    check instance exist on host
    return None Failed to check
           False not exist
           True  exist
    """
    cmd = "ps -ef | awk '\$2 == %s'"% pid
    (status, output) = common.ssh(host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
    if status != 0:
        log.error("Failed to check instance %s on %s", pid, host)
        log.debug("Error message %s: ", output)
        return None
    if output == "":
        return False

    return True

def get_disk_path(disk_uuid):
    """
    get disk path from its uuid
    NOTE: This return code is used by DEV's code. Before redefine the return code, please contact them.
    """
    if not is_mounted(disk_uuid):
        return(mdssyncutility.EUNMOUNED, "")
    cmd = 'test -f /etc/fstab && grep \'%s\' /etc/fstab'% disk_uuid
    (status, output) = commands.getstatusoutput(cmd)
    if(status != 0):
        log.error("Failed to get disk path with FSUUID %s", disk_uuid)
        return(mdssyncutility.ECOMMON, "")
    target_path = output.split()[1]
    return(mdssyncutility.SUCCESS, target_path)

def is_mounted(FSUUID):

    """
    # The FSUUID is checked to see if it is mounted, If it is not mounted we abort it. Let CE folks mount the FSUUID
    # We could have mounted FSUUID ourself, but for now let us leave that it CE folks.
    """

    log.info("FSUUID %s" % (FSUUID))
    cmd = "grep -q '%s ' /proc/mounts" % (FSUUID)
    ret_code = os.system(cmd)
    if ret_code == 0:
        log.info("%s is mounted" % (FSUUID))
        return True
    else:
        log.error("abort: %s is not mounted" % (FSUUID))
        return False

def create_tmp_dir(host, tmp_dir):
    cmd = "src_tmp_dir=%s; test -d \\$src_tmp_dir || mkdir \\$src_tmp_dir" % tmp_dir
    status, output = common.ssh(host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
    if status == 0:
        log.info("[TmpEnv] Create temp dir on peer node succeeded")
        return True
    else:
        log.error('[TmpEnv] Failed to create temp dir on peer node')
        log.error('[TmpEnv] ' + output)
        return False

def add_ss_destage(checker, host, src_path, target_path):
    """
    add both src side and target side ss_destage cmd to checker
    @return: False for failure
             True  for success
    """
    #1 get src side uuid
    UUID_PATTERN = re.compile('(\w{8}-\w{4}-\w{4}-\w{4}-\w{12})')
    match = UUID_PATTERN.search(src_path)
    if not match:
        log.error("[SS_DESTAGE] Failed to get src side UUID from %s", src_path)
        return False
    src_UUID = match.group(1)

    #2 get target side uuid
    match = UUID_PATTERN.search(target_path)
    if not match:
        log.error("[SS_DESTAGE] Failed to get target side UUID from %s", target_path)
        return False
    target_UUID = match.group(1)

    log.info("%s %s", src_UUID, target_UUID)

    #3 add job to checker
    # src_ss_destage_cmd and target_ss_destage_cmd will alway return 0 whether run the cmd successful or failed.
    src_ss_destage_cmd = 'ssh %s "mauisvcmgr -s mauiss -c mauiss_disk_destage_set -a \'fsuuid=%s, duration=%s\'";echo $?'% (host, src_UUID, SS_DESTAGE_DURATION)
    target_ss_destage_cmd = "mauisvcmgr -s mauiss -c mauiss_disk_destage_set -a 'fsuuid=%s, duration=%s';echo $?"% (target_UUID, SS_DESTAGE_DURATION)
    checker.add_check_job(run_command, SS_CHEAK_INTERVAL, src_ss_destage_cmd)
    checker.add_check_job(run_command, SS_CHEAK_INTERVAL, target_ss_destage_cmd)

def validate_and_create_env(target_path, tmp_target_path):
    """
      Check the env root existence, create the env root if necessary.
    """
    # If target directory not empty then abort. This is because we do not want to erase anything from the target.
    # Abort it and let the CE folks figure out what to do.
    if os.path.isdir(target_path) and len(os.listdir(target_path)) != 0:
        log.error("[TmpEnv] path %s not empty, skip this mds. Please back up the directory before perform mdssync" % (target_path))
        return False

    # We are now using a tmp local env for transferring files
    # and then remane it to the target env afterward
    if os.path.exists(tmp_target_path):
        if os.path.isdir(tmp_target_path):
            if len(os.listdir(tmp_target_path)) == 0:
                return True
            if REUSE == True:
                log.warning("[TmpEnv] REUSE option specified. Will reuse current work directory %s", tmp_target_path)
                return True
            else:
                log.error("[TmpEnv] Directory %s already exists, please clean up the directory or use -R option to reuse the directory", tmp_target_path)
                return False
        else:
            log.error("[TmpEnv] tmp env %s exists but is not a directory", tmp_target_path)
            return False

    # If the tmp directory does not exist we go ahead and create it
    try:
        log.info("[TmpEnv] tmp env %s does not exist, creating a new dir" % (tmp_target_path))
        os.mkdir(tmp_target_path)
    except OSError, msg:
        log.error("[TmpEnv] Error creating the tmp env %s. Error Message:%s"%(tmp_target_path, msg) )
        return False

    return True

def get_src_tmp_env(src_path):
    """
    Get the path of temp env for mdssync on the peer node
    """
    src_split_tmp_dir = SRC_TMP_ENV_PREFIX + os.path.basename(src_path)
    src_split_tmp_dir = os.path.join(os.path.dirname(src_path), src_split_tmp_dir)
    return src_split_tmp_dir

def get_local_tmp_env(db_env):
    """
    Get the path of temp env for mdssync on the local host
    """
    local_tmp_dir = LOCAL_TMP_ENV_PREFIX + os.path.basename(db_env)
    local_tmp_dir = os.path.join(os.path.dirname(db_env), local_tmp_dir)
    return local_tmp_dir

def get_src_checklsn(host, src_path, status_mgr):
    """
    src side do checkLSN
    """
    log_sn = status_mgr.get_status("last_transfer_log")
    if log_sn == status_mgr.NO_START:
        last_log_file = get_last_archived_log_file(host, src_path)
        if last_log_file == None:
            return False
        if last_log_file == "":
            status_mgr.set_status("last_transfer_log", status_mgr.EMPTY)
        else:
            status_mgr.set_status("last_transfer_log", last_log_file)
        log.debug("last_log_file: %s", last_log_file)
    else:
        log.warning("CheckLSN has already been done, skip this step")
        log.debug("last_log_file: %s", log_sn)
    return True

def get_last_archived_log_file(host, env):

    """
    Get the last archived BDB log file which has the largest log sequence
    Return: log file name string on success.
          an empty string for no log file.
          None for failure.
    TODO: refine the failure return value.
    """
    log.info('[Log] Get last archived BDB log file at %s:%s' % (host, env))

    # db_archive output all archived bdb logs
    cmd = "db_archive -h %s" % (env)
    status, output = common.ssh(host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
    if status != 0:
        log.error("[Log] Cannot get MDS log files list from %s:%s" % (host, env))
        return None
    if output:
        archived_log_files = sorted(output.split('\n'))
        last_log_file = archived_log_files[-1]
    else:
        last_log_file = ""

    return last_log_file

def rsync_db_env_and_log_files(host, src_path, src_split_tmp_dir, target_tmp_path, status_mgr):
    """
      Sync Db files and log files to current directory.
    """

    # check if the peer node is in the same IS
    is_same_segment, host = check_same_segment(host)

    # first sync db files
    # handle large and small db files separately

    log_transfer = LogFileTransfer(host, src_path, target_tmp_path, is_same_segment, status_mgr)
    log_transfer.start()
    # use split-merge for large db files
    # result = startTransferLargeDBFiles(large_db_file_list, host, src_path, is_same_segment)
    file_transer = FileTransfer(host, src_path,
        src_split_tmp_dir, target_tmp_path, is_same_segment, status_mgr)
    file_transer.start()

    result = mdssyncutility.SUCCESS
    while True:
        logstatus = log_transfer.get_thread_status()
        if logstatus.status == logstatus.ERR:
            log.error("[SyncDbEnv] Failed to sync dbENV")
            log.debug("[SyncDbEnv] Error message: %s, %s", logstatus.errcode, logstatus.errmessage)
            result = logstatus.errcode
            break

        filestatus = file_transer.get_thread_status()
        if filestatus.status == filestatus.ERR:
            log.error("[SyncDbEnv] Failed to sync dbENV")
            log.debug("[SyncDbEnv] Error message: %s, %s", filestatus.errcode, filestatus.errmessage)
            result = filestatus.errcode
            break

        if filestatus.status == filestatus.SUCCESS_END:
            log.info("[SyncDvEnv] Do db env sync successfully")
            break
        time.sleep(RSYNC_CHECK_INTERVAL)

    log_transfer.stop()
    file_transer.stop()
    return result

def create_src_temp_dir(host, src_tmp_dir):
    """
    Check and create the temp directory on peer node
    """
    # Check if tmp dir exists on src node
    cmd = "ls -A %s" % src_tmp_dir
    status, output = common.ssh(host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
    if status == 0:
        if output:
            # tmp dir exists
            # need to make sure reuse option is set by user
            log.warning('[TmpEnv] %s:%s already exists and is not empty' % (host, src_tmp_dir))
            cmd = "find %s -name '*work'"% src_tmp_dir
            sta, out = common.ssh(host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
            if sta != 0:
                log.error("[TmpEnv] Failed to check %s status", src_tmp_dir)
                return False
            else:
                if out:
                    host_pid = out.split('/')[-1].split('.')
                    host_src = host_pid[0].strip()
                    pid = host_pid[1].strip()
                    result = check_instance_exist(host_src, pid)
                    if result == None:
                        return False
                    if result == True:
                        log.error("Another instance already running on this mdsset, quiting")
                        return False
            log.info("[TmpEnv] No instance running on this directory%s:%s. Will clean up it", host, src_tmp_dir)
            #clean the tmpEnv
            cmd = "rm %s/* -rf"% src_tmp_dir
            sta, out = common.ssh(host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
            if sta != 0:
                log.error("[TmpEnv] Failed to clean up %s ", src_tmp_dir)
                return False
            else:
                return True

    elif not 'No such file or directory' in output:
        log.error('[TmpEnv] Failed to check temp dir on peer node')
        log.error('[TmpEnv] ' + output)
        return False

    cmd = "src_tmp_dir=%s; test -d \\$src_tmp_dir || mkdir \\$src_tmp_dir" % src_tmp_dir
    status, output = common.ssh(host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
    if status == 0:
        log.info("[TmpEnv] Create temp dir on peer node succeeded")
        return True
    else:
        log.error('[TmpEnv] Failed to create temp dir on peer node')
        log.error('[TmpEnv] ' + output)
        return False


def cleanup_local_tmp_env(target_path):
    """
    Clean up tmp env on localhost
    """
    # clean up local tmp data first
    log.info("[TmpEnv] Clean up %s tmp directories on local host..." % SPLIT_DIR_SUFFIX)

    log.info('[TmpEnv] Clean up log file list')
    os.remove(os.path.join(target_path, LOG_FILE_LIST))

    tmp_dirs = glob.glob("%s/*%s" % (target_path, SPLIT_DIR_SUFFIX))
    for split_dir in tmp_dirs:
        if os.path.isdir(split_dir):
            try:
                log.info("[TmpEnv] Clean up split directory %s..." % split_dir)
                shutil.rmtree(split_dir)
            except Exception, e:
                log.error('[TmpEnv] %s' % e)
                log.warn("[TmpEnv] Clean up local tmp dir %s failed, please remove manually later" \
                    % split_dir)

def cleanup_remote_tmp_env(host, src_path):
    """
    Clean up tmp env on remote node
    """
    src_tmp_env = get_src_tmp_env(src_path)
    log.info("[TmpEnv] Clean up tmp directory at %s:%s..." % (host, src_tmp_env))
    cmd = "rm -rf %s" % (src_tmp_env)
    status, _ = common.ssh(host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
    if status != 0:
        log.warn("[TmpEnv] Clean up remote tmp directory %s:%s failed, please remove manually later" \
            % (host, src_tmp_env))

def move_tmp_env(tmp_target_path, target_path):
    """
    create target directory and move the content of the temp directory into target
    """
    result = True
    try:
        if os.path.isfile(target_path):
            raise Exception('%s is a regular file' % target_path)
        if not os.path.isdir(target_path):
            os.mkdir(target_path)

        # if the tmp_target_path is empty, there is no need to move the path.
        cmd = 'ls %s | xargs -i -r mv {} %s' % (tmp_target_path, target_path)
        status, output = commands.getstatusoutput(cmd)
        if status != 0:
            raise Exception(output)
        shutil.rmtree(tmp_target_path)
    except Exception, e:
        result = False
        log.error(e)
        log.error("Failed to create target MDS env")
    return result

def create_work_file(host, src_split_tmp_dir):
    """
    create work file on src source side
    return: True for mdssyncutility.SUCCESS
            False for Failed
    """
    localname = socket.gethostname()
    localpid = str(os.getpid())
    workfile_basename = ".".join([localname, localpid, "work"])
    workfile = os.path.join(src_split_tmp_dir, workfile_basename)

    cmd = "touch %s"% workfile
    status, _ = common.ssh(host, cmd, retry_count = SSH_RETRY_COUNT, timeout = SSH_TIMEOUT)
    if status != 0:
        log.error("Fail to create work file")
        return False
    log.info("Create work file%s on %s:%s", workfile, host, src_split_tmp_dir)
    return True

def create_status_file(tmp_target_path):
    """
    Create status management object
    """
    status_file = os.path.join(tmp_target_path, STATUS_FILE)
    status_mgr = mdssyncutility.Status_Mgr(STATUS_LIST, status_file)
    return status_mgr

def do_port_sync(port, UUID):

    """
    #  Below are the steps involved in doing the port sync
    #1. Get the env root for the port and the mds peer host and port, along with check mdssync version
    #2. Validate UUID
    #3. Get the peer env root
    #4. Check the env root existence. And create the local tmp env
    #5. Check and create tmp env on souce node
    #6. Check free disk space
    #7. rsync the DB from the peer to this node
    #8. Run db_recover -cv
    #9. Enable and start the service
    #10. Wait until MDS service is initialized
    #11. Clean up tmp env on both remote and local host
    """

    single_instance_lock = mdssyncutility.SingleInstance('mdssync', port)

    try:
        system_checker = None
        # First, Make sure the mds is stopped
        ret_code = is_mds_stopped(port)
        if ret_code == False:
            raise Exception('MDS %d is running' % (port))

        #1. Get the env root for the port and the mds peer host and port, check the mdssync version
        ret_code, target_path, host, portnum = _mdscfg_get_db_env_and_running_peer(port, FORCE_RUN)
        if ret_code == False:
            raise MdssyncException(mdssyncutility.ENPEER, 'Failed to get valid peer host and port')

        #2. Validate UUID
        ret_code = validateUUID(UUID, target_path)
        if ret_code == False:
            raise MdssyncException(mdssyncutility.EUNMOUNED, 'Invalic UUID')

        #3. Get the peer env root
        # Now, get the PEER mount path from the peer host and portnum
        ret_code, src_path = get_db_env_by_port(host, portnum)

        if ret_code == False:
            raise Exception('Failed to get mount path for host %s, port %d' % (host, portnum))

        # use a temp env for db files and log files transfer
        # rename to target env when transfer is done
        tmp_target_path = get_local_tmp_env(target_path)
        tmp_src_path = get_src_tmp_env(src_path)

        #4. Check the env root existence. And create the local tmp env
        ret_code = validate_and_create_env(target_path, tmp_target_path)
        if ret_code == False:
            raise Exception('Failed to create local tmp env')
        #5. Check and create tmp env on souce node
        ret_code = create_src_temp_dir(host, tmp_src_path)
        if ret_code == False:
            raise Exception('Failed to create source tmp env')
        # create work file
        ret_code = create_work_file(host, tmp_src_path)
        if ret_code == False:
            raise Exception('Failed to create work file')
        # create status
        status_mgr = create_status_file(tmp_target_path)

        #system checker
        system_checker = SystemChecker()
        add_ss_destage(system_checker, host, src_path, target_path)
        system_checker.start()

        log.info("Start to sync data from %s:%s to %s" % (host, src_path, tmp_target_path))
        # disable the MDS
        set_mds_status(port, "disable")

        # We "cd" to the target direcory and do "sync" and "dbrecover"
        os.chdir(tmp_target_path)

        #6. Check free disk space
        ret_code = check_local_available_disk_space(host, src_path, tmp_target_path, IGNORE_SPACE)
        if ret_code == False:
            raise MdssyncException(mdssyncutility.ENSPCTS, 'Target Disk space check failed')

        ret_code = check_src_available_disk_space(host, src_path, IGNORE_SPACE)
        if ret_code == False:
            raise MdssyncException(mdssyncutility.ENSPCSS, 'Src Disk space check failed')

        #7. rsync the DB from the peer to this node
        ret_code = get_src_checklsn(host, src_path, status_mgr)
        if ret_code == False:
            raise Exception('Failed to checkLSN on source node')

        ret_code = rsync_db_env_and_log_files(host, src_path, tmp_src_path, tmp_target_path, status_mgr)
        if ret_code != mdssyncutility.SUCCESS:
            raise MdssyncException(ret_code, 'Failed to sync DB and log files')

        # move the tmp env to target env
        ret_code = move_tmp_env(tmp_target_path, target_path)
        if ret_code == False:
            raise Exception('Failed to move tmp env to target env')

        os.chdir(target_path)

        #8. Run db_recover -cv
        log.info("Running DB recover for MDS %d" % (port))
        ret_code, console_string = commands.getstatusoutput("nice db_recover -cv")
        log.info(console_string)
        if ret_code != 0:
            log.error(console_string)
            raise Exception('Failed to recover DB')

        #9. Enable and start the service
        # the mds disk replacement script will skip mds service start and chkconfig
        # so we need to do it here
        # enable the MDS
        set_mds_status(port, "enable")
        # Now go ahead and start the MDS service and chkconfig it
        if not NOSTART:
            ret_code = start_mds_service(port)
            if ret_code == False:
                log.error('Failed to start mds %s', port)
                log.info('You may need to restart the mds service manually')
                raise Exception('Failed to start mds %s' % port)

            #10. Wait until MDS service is initialized
            ret_code = wait_mds_initialized('localhost', port)
            if ret_code == False:
                raise Exception('MDS %s not initialized' % port)

        #11. Clean up tmp env on both remote and local host
        cleanup_local_tmp_env(target_path)
        system_checker.stop()
        cleanup_remote_tmp_env(host, src_path)
        log.info('=== MDSSync result for port %s: SUCCEEDED', port)
        return mdssyncutility.SUCCESS

    except MdssyncException, e:
        log.error('=== MDSSync result for port %s: FAILED', port)
        log.debug("Error message: %s", e.errstr)
        if 'system_checker' != None:
            system_checker.stop()
        return e.errcode

    except Exception, e:
        log.error('=== MDSSync result for port %s: FAILED', port)
        log.error(e)
        # if the client syschecker start
        if 'system_checker' != None:
            system_checker.stop()
        return mdssyncutility.ECOMMON

def do_drive_sync(UUID):
    """
    #Get all the ports for the node
    """
    proc = os.popen("/bin/ls /etc/maui/mds/ | grep 10[0-9][0-9][0-9]")

    # Run thru all the ports and do sync
    try:
        for line in proc.readlines():
            port = int(line.rstrip())
            log.info("=== For port %d" % (port))
            ret_code = do_port_sync(port, UUID)
            if ret_code != mdssyncutility.SUCCESS:
                log.error('=== Drive sync result: FAILED')
                break
        if ret_code == mdssyncutility.SUCCESS:
            log.info('=== Drive sync result: SUCCEEDED')
        return ret_code
    except Exception, e:
        log.error(e)
        return ret_code

def do_precheck(port, dest_disk):
    """
    do_precheck
    NOTE: This return code is used by DEV's code. Before redefine the return code, please contact them.
    """
    log.info("[PRECHECK] Start for port: %s", port)

    #1 Check peer availability
    ret_code, target_path, host, portnum = _mdscfg_get_db_env_and_running_peer(port)
    if ret_code == False:
        log.error("[PRECHECK] Failed to get mdsset")
        return mdssyncutility.ENPEER

    #2 Get src mds ENV
    ret_code, src_path = get_db_env_by_port(host, portnum)
    if ret_code == False:
        log.error("[PRECHECK] Failed to get src mds ENV")
        return mdssyncutility.ECOMMON

    #3 Get target disk path
    if dest_disk:
        ret_code, target_path = get_disk_path(dest_disk)
        if ret_code != 0:
            return(ret_code)
        log.info("[PRECHECK] Will use dest_disk specified, FSUUID: %s, path: %s", dest_disk, target_path)
        check_path = target_path
    else:
        log.info("[PRECHECK] No dest_disk specified. Will use the configured path: %s", target_path)
        # For many instance, target_path is missing, so its directory name is used
        check_path = os.path.dirname(target_path)

    #4 Check target disk availability
    ret_code = check_local_available_disk_space(host, src_path, check_path)
    if ret_code == False:
        log.error("[PRECHECK] No enough space available in target path: %s", check_path)
        return mdssyncutility.ENSPCTS
    else:
        log.info("[PRECHECK] Enough space to run mdssync in target path: %s", check_path)

    #5 Check src disk availability
    ret_code = check_src_available_disk_space(host, src_path)
    if ret_code == False:
        log.error("[PRECHECK] No enough space available in src path %s:%s", host, src_path)
        return mdssyncutility.ENSPCSS
    else:
        log.info("[PRECHECK] Enough space to run mdssync in src path %s:%s", host, src_path)
    log.info("[PRECHECK] mdssyncutility.SUCCESS")
    return mdssyncutility.SUCCESS

def print_version_and_exit(program):
    print ""
    print "%s version number %s" % (program, VERSION)
    print ""

def print_mdssync_steps():

    print ""
    print "#  Below are the steps involved in doing the port sync"
    print "#1. Get the env root for the port and the mds peer host and port"
    print "#2. Validate UUID"
    print "#3. Get the peer env root"
    print "#4. Check the env root existence. And create the local tmp env"
    print "#5. Check and create tmp env on souce node"
    print "#6. Check free disk space"
    print "#7. rsync the DB from the peer to this node"
    print "#8. Run db_recover -cv"
    print "#9. Enable and start the service"
    print "#10. Wait until MDS service is initialized"
    print "#11. Clean up tmp env on both remote and local host"
    print ""
    print ""

if __name__ == "__main__":
    try:
        watcher = mdssyncutility.Watcher()
        set_log_formatter()

        try:
            CUR_VERSION = get_maui_version()
            log.info("maui_version: %s", CUR_VERSION)
        except Exception, e:
            raise e

        usage = r'''
        %prog [options]

        %prog -r -p <PORT> [options] for dryrun
        %prog -u <UUID> [options] for drive sync
        %prog -p <PORT> [options] for port sync
        Example: %prog -p 10401
        '''

        parser = OptionParser(usage=usage)
        parser.add_option("-V", "--version", dest="version", action="store_true", help="for version")
        parser.add_option("-f", "--fast", dest="fast", action="store_true", help="for fast option.")
        parser.add_option("-s", "--stop", dest="stop", action="store_true", help="for don't start MDS option after rsync.")
        parser.add_option("-d", "--debug", dest="debug", action="store_true", help="enable debug mode")
        parser.add_option("-r", "--dryrun", dest="dryrun", action="store_true", help="for dryrun")
        parser.add_option("-u", "--UUID", dest="UUID", help="for drive sync.")
        parser.add_option("-p", "--port", dest="port", type="int", help="for syncing individual port")
        parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="more verbose message")
        parser.add_option("-P", "--path", dest="path", help="path of the mdssync.py script")
        parser.add_option("--ignore-space", dest="ignore_space", action="store_true", help="force run even if disk space low")
        parser.add_option("--force", dest="force_run", action="store_true", help="force run even if peer is not master")
        parser.add_option("-R", "--reuse-split-data", dest="reuse_split", action="store_true", help="re-use the splitted data on peer node")
        parser.add_option("--dest-disk", dest="dest_disk", help="dest disk FSUUID")
        #TODO remove option path??

        (options, args) = parser.parse_args()

        # version indication
        log.info('Run with %s' % ' '.join(sys.argv))
        log.info("%s version: %s", sys.argv[0], VERSION)

        if options.version:
            print_version_and_exit(sys.argv[0])
            sys.exit(mdssyncutility.SUCCESS)

        # fast option will have no effect when peer node is in the same IS
        # because data will be transfered via eth0
        if options.fast:
            RSYC_FAST = True
        #TODO implement fast option

        if options.stop:
            NOSTART = True
        if options.ignore_space:
            IGNORE_SPACE = True
        if options.force_run:
            FORCE_RUN = True

        if options.verbose:
            DEBUG_MODE = True

        # always set FileHandler to DEBUG level
        log.setLevel(logging.DEBUG)
        for h in log.handlers:
            if isinstance(h, logging.FileHandler) or DEBUG_MODE:
                h.setLevel(logging.DEBUG)
            else:
                h.setLevel(logging.INFO)

        if options.reuse_split:
            REUSE = True

        if options.path:
            SPLIT_RESTORE_FILE_UTIL = options.path
            # running the script as a source side tool for split db files,
            # trigger split large db files

        if options.debug:
            pdb.set_trace()
        # dryrun mode
        if options.dryrun:
            if options.port:
                port = options.port
                if not mds_service.is_valid_mds_port(port):
                    log.error("[PRECHECK] Invalid port number%s", port)
                    sys.exit(mdssyncutility.ECOMMON)
                dest_disk = ""
                if options.dest_disk:
                    dest_disk = options.dest_disk
                sys.exit(do_precheck(port, dest_disk))
            else:
                parser.print_help()
                sys.exit(mdssyncutility.ECOMMON)

        log.info("===== MDS SYNC STARTED =====")
        if options.UUID:
            UUID = options.UUID

            ret_code = is_mounted(UUID)
            if ret_code == False:
                sys.exit(mdssyncutility.ECOMMON)

            sys.exit(do_drive_sync(UUID))
        elif options.port:
            port = options.port
            log.info("===== Sync MDS for port %d =====" % port)

            ret_code = mds_service.is_valid_mds_port(port)
            if ret_code == False:
                sys.exit(mdssyncutility.ECOMMON)

            UUID = ""
            sys.exit(do_port_sync(port, UUID))
            log.info("===== END =====")
            sys.exit(mdssyncutility.SUCCESS)
        else:
            parser.print_help()
            sys.exit(mdssyncutility.ECOMMON)

        sys.exit(mdssyncutility.SUCCESS)

    except (KeyboardInterrupt):
        log.info("KeyboardInterrupt")
    except (SystemExit):
        raise
    except Exception, e:
        log.debug("Exception: %s", e)
        log.debug("Detailed exc_info: ", exc_info = True)
        sys.exit(mdssyncutility.ECOMMON)
