# default block is 16k
BLOCK_SIZE  = 16384

# default chunk size is 10G
KILO_BYTE   = 1024
MEGA_BYTE   = 1048576
GIGA_BYTE   = 1073741824
CHUNK_UNIT  = GIGA_BYTE
CHUNK_SIZE  = 5 * CHUNK_UNIT
class LargeFileTransfer(object):
    """
    Manage the split, transfer and restore of all large db files, in async way.
    """
    __WORKER_COUNT = 2
    ASYNC_SPLIT_RESTORE_RESULT = True

    def __init__(self, file_list, src_host, src_path, target_tmp_path, is_same_segment, status_mgr):
        super(LargeFileTransfer, self).__init__()
        self.__WORK_Q = Queue.Queue()

        self.__file_list = file_list
        self.__src_host = src_host
        self.__src_path = src_path
        self.__target_tmp_path = target_tmp_path
        self.__is_same_segment = is_same_segment
        self.__worker_pool = []
        self.ASYNC_SPLIT_RESTORE_RESULT = True
        self.status_mgr = status_mgr
        self.status_dict = {}
        self.__init_status()
    def __init_status(self):
        for db_file in self.__file_list:
            cur_sta = self.status_mgr.get_status(db_file):
            if cur_sta  && cur_sta != "done":
                map_bits = cur_sta.split(',')
                self.status_dict[db_file] = [int(map_bit) for map_bit in map_bits]
                self.__WORK_Q.put(db_file)
            if not cur_sta:
                src_db_file = os.join(self.__src_path, db_file)
                db_size = os.path.getsize(src_db_file)
                chunk_count = db_size / CHUNK_SIZE
                if chunk_count * CHUNK_SIZE < db_size:
                    chunk_count = chunk_count + 1

                self.status_dict[db_file] = [ 0 for i in xrange(chunk_count)]
                status_string = ','.join([str(i)for i in self.status_dict[db_file]])##mutux risk
                self.status_mgr.set_status(self.db_file, status_string)
                self.__WORK_Q.put(db_file)
        for i in self.__WORKER_COUNT:
            self.__WORK_Q.put(None)

    def start(self):

        # init worker
        for i in xrange(self.__WORKER_COUNT):
            worker = SplitMergeWorker(self.__src_host, 
                self._src_path, self._target_tmp_path, self.__work_Q, not self.is_same_segment, self.status_mgr)
            self.__worker_pool.append(worker)
            worker.run()
        return True
    def wait(self):
        # wait until all the worker join
        try:
            for worker in self.__worker_pool:
            worker.join()
        except Exception, e:
            log.error(e)
            return False
        return True

class SplitMergeWorker(threading.Thread):
    __SPLIT_THREAD_COUNT = 2
    __RESTORE_THREAD_COUNT = 2

    BLOCK_BASE_NAME = 'block'
    BLOCK_TMP_NAME  = '.blocktmp'

    RESULT_OK                          = 0
    RESULT_SPLIT_OUTPUT_DIR_INCOMPLETE = 1
    RESULT_SPLIT_OUTPUT_DIR_WRONG_SRC  = 2
    RESULT_SPLIT_OUTPUT_DIR_EXPIRED    = 3
    RESULT_SPLIT_SRC_FILE_NOT_EXIST    = 4
    RESULT_SPLIT_FAILED_CREATE_DIR     = 5
    RESULT_SPLIT_FAILED_SPLIT          = 6
    RESULT_RESTORE_SPLIT_DIR_NOT_EXIST = 7
    RESULT_RESTORE_FAILED_RESTORE      = 8
    RESULT_RESTORE_INVALID_SRC_BLOCK   = 9
    def __init__(self, hostname, _src_path, _target_tmp_path, work_Q, __bw_limit_flag, sta_mgr):
        threading.Thread.__init__(self)
        self.setdaemon() = True
        work_queue = work_Q
        split_queue = Queue.Queue()
        transfer_queue = Queue.Queue()
        restore_queue = Queue.Queue()
        
        thread_pool = []
        block_num = 0
        
        host = hostname
        src_path = __src_path
        target_tmp_path = _target_tmp_path
        bw_limit_flag = __bw_limit_flag
        db_file = ""
        src_file = ""
        dest_file = ""
        dest_tmp_dir = ""
        src_tmp_dir = ""
        status = []
        status_mgr = sta_mgr

        split_done = False
        count = CHUNK_SIZE / BLOCK_SIZE
        mutux_lock = threading.Lock()

    def run(self):
        while True:
            (db_file, sta) = self.work_Q.get(True)
            if db_file == None:
                break
            self.db_file = db_file
            self.status = sta
            self.src_file = os.path.join(self.src_path, db_file)
            self.dest_file = os.path.join(self.target_tmp_path, db_file)
            self.split_tmp_dir = os.path.basename(src_file) + SPLIT_DIR_SUFFIX 
            self.src_tmp_dir = os.path.join(get_src_tmp_env(self.src_path), self.split_tmp_dir)
            self.dest_tmp_dir = os.path.join(self.target_tmp_path, self.split_tmp_dir)
            #create the work temp directory
            create_tmp_dir(self.host, src_tmp_dir)
            create_tmp_dir('localhost', dest_tmp_dir)
            #thread
            chunk_count = len(self.status)
            self.block_num = 0
            # buff block count
            for i in xrange(chunk_count):
                if status_dict[db_file][i] == 0;
                    self.split_queue.put(src_file, src_tmp_dir, dest_tmp_dir, i)
            
            for i in xrange(self.__SPLIT_THREAD_COUNT):
                self.split_queue.put(None, None, None, None)

            __init_thread()

            for th in self.thread_pool:
                th.start()
            # wait for task stop
            for th in self.thread_pool:
                th.join() 

    def __init_thread(self):
        for i in xrange(self.__SPLIT_THREAD_COUNT):
            split_th = threading.Thread(target = spliter)
            split_th.daemon = True
            self.thread_pool.append(split_th)
        tran_th = threading.Thread(target = transfer)
        tran_th.daemon = True
        self.thread_pool.append(tran_th)
        restore_th = threading.Thread(target = restorer)
        restore_th.daemon =True
        self.thread_pool.append(restore_th)   

    def spliter(self):
        tmp_block = os.path.join(self.src_tmp_dir, self.BLOCK_TMP_NAME)
        while True:
            (src_file, src_tmp_dir, dest_tmp_dir, offset) = split_queue.get(True)
            if (src_file == None):
                break
            # wait for the block_num update
            while (self.__get_block_num() >= BUFF_BLOCK_COUNT): time.sleep(5)

            cmd = 'dd if=%s bs=%s skip=%s count=%s | gzip > %s' \
                    % (self.src_file, BLOCK_SIZE, offset*self.count, self.count, tmp_block) 

            #run the command
            (status, output) = common.ssh(self.host, cmd, )
            if status != 0:
                log.error("[Split Worker]Failed to split file %s", db_file)
                log.debug(output)
                sys.exit(self.RESULT_SPLIT_FAILED_SPLIT)##TODO modify this code
            blockname = '%s-%s.gz' % (self.BLOCK_BASE_NAME, offset)
            src_block = os.path.join(self.src_tmp_dir, blockname)
            dest_block = os.path.join(self.dest_tmp_dir, blockname)
            cmd = "mv %s %s"%s (tmp_block, src_block)
            status, output = common.ssh(self.host, cmd)
            self.transfer_queue.put(src_block, dest_block)

        self.transfer_queue.put(None)
    def transfer(self):
        """
          rsync the split file blocks to current dir, retry if failed.
        """
        while True:
            (src_block, dest_block) = self.transfer_queue.get(True)
            if blockname == None:
                break
            command = "rsync -avc --progress -e ssh -o StrictHostKeyChecking=no' %s:%s %s"% (host, src_block dest_block)
            if self.bw_limit_flag == True:
                command = command + RSYNC_BANDWIDTH
            result = rsync_with_retry(shutil.split(command))
            if not result:
                log.error("Failed to transfer split blocks from %s:%s" % (self.host, self.src_tmp_dir))
                break ##Modify this
            self.restore_queue.put(src_block, dest_block)
        for i in xrange(self.__RESTORE_THREAD_COUNT):
            self.restore_queue.put(None)

    def restorer(self):
        """
        restore Worker
        """
        while True:
            (src_block, dest_block) = self.restore_queue.get(True)
            if dest_block == None:
                break
            blockname = os.path.basename(dest_block)
            try:
                # block-<block_num>.gz
                block_num_str = blockname.rsplit('-', 1)[-1].split('.', 1)[0]
                block_num = int(block_num_str)
                offset = block_num * self.CHUNK_SIZE

                seek = int(offset / self.BLOCK_SIZE)
                # use 'conv=notrunc' so that dd will not truncate db file
                # when it is filling a block in the middle of the file
                cmd = 'gunzip -c %s | dd conv=notrunc bs=%s seek=%s of=%s' \
                    % (dest_block, self.BLOCK_SIZE, seek, self.dest_file)
                log.debug('[SplitMerge] <RUN> %s' % cmd)
                status, _ = commands.getstatusoutput(cmd)
                if status != 0:
                    log.error('[SplitMerge] Failed to restore block %s to file %s' % (dest_block, self.dest_file))
                    return self.RESULT_RESTORE_FAILED_RESTORE
                else:
                    result = self.RESULT_OK

                # remove the src block
                cmd = "rm src_block -f"
                status, output = common.ssh(self.host, cmd)
                if status != 0:
                    log.error("Failed to remove the block %s:%s", src_block)
                    return 
                self.mutux_lock.acquire()
                self.status[block_num] = 1
                status_string = ','.join([str(i)for i in self.status])
                self.status_mgr.set_status(self.db_file, status_string)
                self.mutux_lock.release()

            except Exception:
                result = self.RESULT_RESTORE_INVALID_SRC_BLOCK
        return result
    def __get_block_num(self):
        cmd = "ls %s -al %s* %s* | wc -l"% (self.src_tmp_dir, self.BLOCK_BASE_NAME, self.BLOCK_TMP_NAME)
        status, output = common.ssh(self.host, cmd)
        if status != 0:
            log.error("Failed to get block number on %s:%s", self.host, self.src_tmp_dir)
            output = BUFF_BLOCK_COUNT ## how define this
        return int(output)

def create_tmp_dir(host, tmp_dir):
    cmd = "src_tmp_dir=%s; test -d \\$src_tmp_dir || mkdir \\$src_tmp_dir" % tmp_dir
    status, output = common.ssh(host, cmd, retry_count = mdssyncutility.SSH_RETRY_COUNT, timeout = mdssyncutility.SSH_TIMEOUT)
    if status == 0:
        log.info("[TmpEnv] Create temp dir on peer node succeeded")
        return True
    else:
        log.error('[TmpEnv] Failed to create temp dir on peer node')
        log.error('[TmpEnv] ' + output)
        return False