#encoding: utf-8

'''
    file_sync_fod.py
    ----------------
    file sync by fragment over directory
    :copyright: (c) 2018 by tizzybec.
    :license: MIT/X11, see LICENSE for more details.

    a few detail.

    filename rule:
        name: filename_md5_partMd5_total_from_to_chunkSize.part
        content: chunk data

    eg:
        filename_md5_5_1_1_partMd5_3000.part
        filename_md5_5_2_2_partMd5_3000.part
        filename_md5_5_3_3_partMd5_3000.part
        filename_md5_5_4_4_partMd5_3000.part
        filename_md5_5_5_5_partMd5_3000.part

    partial merge:
        filename_md5_5_1_3_partMd5_9000.part
        filename_md5_5_4_5_partMd5_6000.part

    check merge result:
        send:
            {
                filename: filename,
                md5: merged_file_md5
                missing_parts: [] //empty means succeed
            }

        receive:
            {
                filename: filename,
                status: "succeed/failed"
            }

    usage:
        from file_sync_fod import Config as FileSyncFODConfig, FileSyncFOD

        cfg = FileSyncFODConfig()
        instance = FileSyncFOD(cfg)

'''

import hashlib
import os
import datetime
import shutil
import json
import re
from collections import OrderedDict

import gevent
from watchdog_gevent import Observer
from watchdog.events import FileSystemEventHandler
import sqlalchemy
from sqlalchemy.sql import select

import logging

# constant configuration
# {basename}-{md5}-{total}-{from}-{end}-{part_md5}-{part_size}-{part_file_size}.part
_PART_NAME_TEMPLATE = '{}-{}-{}-{:0%s}-{:0%s}-{}-{}-{}.part'
# {basename}-{size}.ack
_ACK_NAME_TEMPLATE = '{}-{}.ack'
_PART_SUFFIX = '.part'
_ACK_SUFFIX = '.ack'
_DATETIME_FORMATE = '%Y-%m-%d %H:%M:%S.%f'

# task status enum values
_TASK_STATUS_QUEUED = 'queued'
_TASK_STATUS_PROCESSING = 'processing'
_TASK_STATUS_FINISHED = 'finished'
_TASK_STATUS_FAILED = 'failed'
_TASK_STATUS_MERGED = 'merged'
_TASK_STATUS_MERGING = 'merging'

# ack status from remote
_ACK_STATUS_SUCCEED = 'succeed'
_ACK_STATUS_FAILED = 'failed'

logging.basicConfig(format='[%(asctime)s][%(levelname)s][%(filename)s][%(lineno)s]:%(message)s', level=logging.WARNING)

class Config():
    data_dir = 'data'
    part_inbox = data_dir + '/part_inbox' # directory to put file
    tmp_part_inbox = data_dir + '/_tmp_part_inbox'  # directory to put file
    inbox = data_dir + '/_inbox' # directory to put file parts
    part_outbox = data_dir + '/part_outbox' # directory to receive parts
    outbox = data_dir + '/_outbox' # directory to receive file
    part_size = 3 * 1000 # default split by 3k
    queue_size = 100 # default handle 100 parts
    cost_per_part = 1000 # default wait 1s for per part
    min_cost_time = 10000 # mininal cost time per file
    max_cost_time = 300000 # maxmize const time per file
    tick = 1000
    retry_interval = 3000
    high_priority_pattern = '.*_netTest.xml$'
    auto_clear_dir = ''

def _makedirs(path):
    try:
        if not os.path.exists(path):
            os.makedirs(path)
    except Exception as ex:
        logging.error(ex)

def _move(src_path, dst_dir):
    try:
        shutil.move(src_path, dst_dir)
    except Exception as ex:
        logging.error(ex)

class FileSyncFOD():
    def __init__(self, cfg = Config(), db_engine=None):
        self.cfg = cfg

        _makedirs(self.cfg.data_dir)
        _makedirs(self.cfg.inbox)
        _makedirs(self.cfg.outbox)
        _makedirs(self.cfg.part_inbox)
        _makedirs(self.cfg.tmp_part_inbox)
        _makedirs(self.cfg.part_outbox)

        self.metadata = sqlalchemy.MetaData()

        self.engine = db_engine
        if not self.engine:
            self.engine = sqlalchemy.create_engine(
                'sqlite:///{0}/file-sync-fod.db'.format(os.path.abspath(self.cfg.data_dir)))
        self.conn = self.engine.connect()

        self.file_record_table = sqlalchemy.Table( \
            'fod_file_record',
            self.metadata, \
            sqlalchemy.Column('file_name', sqlalchemy.String(100), primary_key=True), \
            sqlalchemy.Column('size', sqlalchemy.Integer), \
            sqlalchemy.Column('md5', sqlalchemy.String(50)), \
            sqlalchemy.Column('status', sqlalchemy.String(10)), \
            sqlalchemy.Column('offset', sqlalchemy.Integer), \
            sqlalchemy.Column('create_t', sqlalchemy.String(50)), \
            sqlalchemy.Column('modify_t', sqlalchemy.String(50)))

        self.metadata.create_all(self.conn)

        self.queued_tasks = OrderedDict()
        self.failed_tasks = OrderedDict()
        self.handling_tasks = OrderedDict()

        # store failed calls to be retried
        self.failed_calls = []

        self.loop = gevent.get_hub().loop

        self.handling_split_task = False
        self.handling_merge_task = True

        self.merging_tasks = {}

        # first scan inbox
        gevent.spawn(self.scan_inbox_dir)

        # then scan outbox for ack file
        gevent.spawn(self.scan_part_outbox_dir)

        class PartOutboxChangedHandler(FileSystemEventHandler):
            def __init__(self, parent):
                self.parent = parent

            def on_created(self, event):
                if not event.is_directory:
                    file_path = event.src_path
                    file_name = os.path.basename(file_path)
                    if file_name.endswith(_PART_SUFFIX):
                        basename, md5, part_md5, total, from_n, to_n, part_size, part_file_size = FileSyncFOD.parse_part_file_name(file_name)
                        if total == to_n:
                            self.parent.loop.run_callback(self.parent.ack_with_retry, basename)

        class InboxChangedHandler(FileSystemEventHandler):
            def __init__(self, parent):
                self.parent = parent

            def on_created(self, event):
                if not event.is_directory:
                    file_path = event.src_path
                    file_name = os.path.basename(file_path)
                    if self.parent.is_valid_inbox_file(file_name):
                        logging.debug('found %s' % file_path)
                        self.parent.loop.run_callback(self.parent.handle_inbox_file_with_retry, file_name)

        class PartInboxChangedHandler(FileSystemEventHandler):
            def __init__(self, parent):
                self.parent = parent

            def on_deleted(self, event):
                pass

        self.observer = Observer()
        part_outbox_changed_handler = PartOutboxChangedHandler(self)
        self.observer.schedule(part_outbox_changed_handler, self.cfg.part_outbox, recursive=False)

        self.inbox_observer = Observer()
        inbox_changed_handler = InboxChangedHandler(self)
        self.observer.schedule(inbox_changed_handler, self.cfg.inbox, recursive=False)

        self.tmp_part_inbox_observer = Observer()
        tmp_part_inbox_changed_handler = PartInboxChangedHandler(self)
        self.observer.schedule(tmp_part_inbox_changed_handler, self.cfg.tmp_part_inbox, recursive=False)

        self.observer.start()

        self.retry_timer = self.loop.timer(3, self.cfg.retry_interval / 1000)
        self.retry_timer.start(self.retry)

        self.tick_timer = self.loop.timer(0, self.cfg.tick / 1000)
        self.tick_timer.start(self.tick)

    def ack_with_retry(self, file_name):
        self.invoke_with_retry(self._ack_with_retry, file_name)

    def _ack_with_retry(self, file_name):
        missing_parts = []
        if self.is_task_merged(file_name):
            missing_parts = []
            self.remove_related_files(self.cfg.part_outbox, file_name)
        else:
            missing_parts = self.merge_file(self.cfg.part_outbox, file_name)
        self.ack_file(file_name, missing_parts)

    def sort(self, ordered_dict):
        return OrderedDict(sorted(ordered_dict.items(), key=lambda t: t[1]['create_t']))

    def get_task_status(self, file_name):
        task = self.get_file_task_record(file_name)
        if task:
            return task['status']
        return _TASK_STATUS_QUEUED

    def scan_part_outbox_dir(self):
        for file_name in os.listdir(self.cfg.part_outbox):
            if file_name.endswith(_ACK_SUFFIX):
                self.handle_part_outbox_ack_file(file_name)

        for file_name in os.listdir(self.cfg.part_outbox):
            if file_name.endswith(_PART_SUFFIX):
                basename, _, _, total, _, to_n, _, _ = self.parse_part_file_name(file_name)
                if total == to_n:
                    self.ack_with_retry(basename)

    @staticmethod
    def now():
        return datetime.datetime.now().strftime(_DATETIME_FORMATE)

    @staticmethod
    def epoch():
        return datetime.datetime(1970, 1, 1, 0, 0, 0).strftime(_DATETIME_FORMATE)

    @staticmethod
    def to_time_str(time):
        return time.strftime(_DATETIME_FORMATE)

    def from_time_str(self, time_str):
        return datetime.datetime.strptime(time_str, _DATETIME_FORMATE)

    # for failed calls retry
    def retry(self):
        if not self.handling_merge_task:
            self.schedule_merge_task()

        failed_calls, self.failed_calls = self.failed_calls, []

        for failed_call in failed_calls:
            self.invoke_with_retry(failed_call[0], *failed_call[1], **failed_call[2])

    def get_timeout_task(self, tasks):
        timeout_tasks = []
        now = datetime.datetime.now()
        for file_name, task in tasks.items():
            modify_t = self.from_time_str(task['modify_t'])
            size = task['size']
            delta = datetime.timedelta(milliseconds=self.predicate_latest_finish_time_by_size(size))
            if (now - modify_t) > delta:
                timeout_tasks.append(task)
        return timeout_tasks

    @staticmethod
    def remove_dir(dir, keep_dir=True):
        try:
            if os.path.isdir(dir):
                if keep_dir is True:
                    for entry in os.listdir(dir):
                        file_path = os.path.join(dir, entry)
                        if os.path.isfile(file_path):
                            os.remove(file_path)
                        elif os.path.isdir(file_path):
                            shutil.rmtree(file_path, ignore_errors=True)
                else:
                    shutil.rmtree(dir, ignore_errors=True)
        except Exception as ex:
            logging.warning(ex)

    def move_dir(self, src_dir, dst_dir):
        for root, dirs, files in os.walk(src_dir):
            for file in files:
                src_path = os.path.join(root, file)
                dst_path = os.path.join(dst_dir, file)
                if not os.path.exists(dst_path):
                    _move(src_path, dst_path)

    # for logical tick
    def tick(self):
        self.move_dir(self.cfg.tmp_part_inbox, self.cfg.part_inbox)

        if not self.handling_split_task:
            self.schedule_split_task()

        timeout_tasks = []

        timeout_tasks += self.get_timeout_task(self.handling_tasks)
        timeout_tasks += self.get_timeout_task(self.failed_tasks)

        for timeout_task in timeout_tasks:
            file_name = timeout_task['file_name']
            timeout_task['modify_t'] = self.now()
            md5 = timeout_task['md5']
            self.invoke_with_retry(self.send_last_part, file_name, md5)
            self.update_file_task_record(file_name,
                                         status=timeout_task['status'])

        self.queued_tasks = self.sort(self.queued_tasks)

        self.schedule_merge_task()

    def send_last_part(self, file_name, md5):
        file_path = os.path.join(self.cfg.inbox, file_name)

        if not os.path.exists(file_path):
            return

        size = os.path.getsize(file_path)
        if size <= 0:
            return

        last_size = size % self.cfg.part_size
        if last_size == 0:
            last_size = self.cfg.part_size

        offset = size - last_size

        self._split_file(file_path, md5, offset, 1, self.cfg.part_size, self.cfg.tmp_part_inbox)

    def is_file_task_record_exists(self, file_name):
        stmt = select([self.file_record_table]).where(self.file_record_table.c.file_name == file_name)
        val = self.conn.execute(stmt).fetchone()
        return val != None

    def get_file_task_record(self, file_name):
        stmt = select([self.file_record_table]).where(self.file_record_table.c.file_name == file_name)
        val = self.conn.execute(stmt).fetchone()
        if val:
            return {
                'file_name': val[self.file_record_table.c.file_name],
                'size': val[self.file_record_table.c.size],
                'md5': val[self.file_record_table.c.md5],
                'status': val[self.file_record_table.c.status],
                'offset': val[self.file_record_table.c.offset],
                'missing_parts': [],
                'create_t': val[self.file_record_table.c.create_t],
                'modify_t': val[self.file_record_table.c.modify_t]
            }
        return None

    def is_file_task_record_exists(self, file_name):
        return self.get_file_task_record(file_name) != None

    def get_file_task_offset(self, file_name):
        task = self.get_file_task_record(file_name)
        if task:
            return task['offset']
        return 0

    def insert_file_task_record(self,
                                file_name,
                                md5='',
                                offset=0,
                                size=0,
                                status=_TASK_STATUS_QUEUED,
                                create_t=None):
        now = self.now()
        stmt = self.file_record_table.insert().values(file_name=file_name, \
                                                      status=status, \
                                                      md5=md5, \
                                                      offset=offset, \
                                                      size=size, \
                                                      create_t=now if create_t is None else create_t, \
                                                      modify_t=now)
        self.conn.execute(stmt)

    def update_file_task_record(self, file_name, status=None, md5=None, offset=None):
        stmt = self.file_record_table.update(). \
            where(self.file_record_table.c.file_name == file_name). \
            values(modify_t=self.now())
        if status:
            stmt = stmt.values(status=status)
        if offset != None:
            stmt = stmt.values(offset=offset)
        if md5 != None:
            stmt = stmt.values(md5=md5)
        self.conn.execute(stmt)

    @staticmethod
    def file_md5(file_path_arr, chunk_size=5 * 1000 * 1024):
        hash_md5 = hashlib.md5()
        for file_path in file_path_arr:
            if os.path.exists(file_path):
                with open(file_path, "rb") as f:
                    for chunk in iter(lambda: f.read(chunk_size), b""):
                        hash_md5.update(chunk)
        return hash_md5.hexdigest()

    @staticmethod
    def file_md5_with_offset(file_path_arr, chunk_size=5 * 1000 * 1024):
        hash_md5 = hashlib.md5()
        for item in file_path_arr:
            file_path = item[0]
            offset = item[1]
            if os.path.exists(file_path):
                with open(file_path, "rb") as f:
                    f.seek(offset)
                    for chunk in iter(lambda: f.read(chunk_size), b""):
                        hash_md5.update(chunk)
        return hash_md5.hexdigest()

    @staticmethod
    def data_md5(data_arr):
        hash_md5 = hashlib.md5()
        for data in data_arr:
            hash_md5.update(data)
        return hash_md5.hexdigest()

    @staticmethod
    def is_valid_inbox_file(file_name):
        return not file_name.startswith('.')

    @staticmethod
    def is_valid_part_outbox_file(file_name):
        return not file_name.startswith('.')

    def scan_inbox_dir(self):
        for file_name in os.listdir(self.cfg.inbox):
            if self.is_valid_inbox_file(file_name):
                task = self.fetch_from_all_task_queue(file_name)

                if task is None:
                    task = self.get_file_task_record(file_name)

                if task is None:
                    task = self.make_inbox_file_task(file_name)
                    self.queued_tasks[file_name] = task

                task['modify_t'] = self.now()

                self.add_to_proper_queue(task)

        # sort all task queue
        self.queued_tasks = self.sort(self.queued_tasks)
        self.handling_tasks = self.sort(self.handling_tasks)
        self.failed_tasks = self.sort(self.failed_tasks)

    def _invoke_split_task(self, tasks, avail_parts, split_method):
        count = 0
        split_tasks = []

        invalid_tasks = []

        for file_name, task in tasks.items():
            try:
                file_path = os.path.join(self.cfg.inbox, file_name)
                if not os.path.exists(file_path):
                    invalid_tasks.append(file_name)
                    continue

                parts = split_method(file_name, task, avail_parts)
                count += parts
                avail_parts -= parts
                if parts > 0:
                    task['modify_t'] = self.now()
                split_tasks.append(task)
            except Exception as ex:
                logging.error(ex)

            if avail_parts <= 0:
                break

        for invalid_task in invalid_tasks:
            tasks.__delitem__(invalid_task)

        return count, split_tasks

    def schedule_split_task(self):
        self.handling_split_task = True

        avail_parts = self.available_parts()

        # handling_tasks first
        if avail_parts > 0:
            count, _ = self._invoke_split_task(self.handling_tasks,
                                            avail_parts,
                                            self.split_file)
            avail_parts -= count

        # then failed_tasks
        if avail_parts > 0:
            count, _ = self._invoke_split_task(self.failed_tasks,
                                            avail_parts,
                                            self.split_file)
            avail_parts -= count

        # then queue_tasks
        if avail_parts > 0:
            count, affected_tasks = self._invoke_split_task(self.queued_tasks,
                                            avail_parts,
                                            self.split_file)
            for task in affected_tasks:
                task['status'] = _TASK_STATUS_PROCESSING
                file_name = task['file_name']
                self.handling_tasks[file_name] = task
                self.queued_tasks.__delitem__(file_name)
            avail_parts -= count

        self.handling_split_task = False

    def add_merging_task(self, file_name, md5, status=_TASK_STATUS_PROCESSING):
        self.merging_tasks[file_name] = {
            'file_name': file_name,
            'md5': md5,
            'status': status
        }
        if self.is_file_task_record_exists(file_name):
            self.update_file_task_record(file_name, status=status)
        else:
            self.insert_file_task_record(file_name=file_name, md5=md5, status=status)

    def update_merging_task(self, file_name, status):
        if status == _TASK_STATUS_MERGED:
            self.update_file_task_record(file_name=file_name, status=status)
            if file_name in self.merging_tasks:
                self.merging_tasks.__delitem__(file_name)

    def is_task_merged(self, file_name):
        task = self.get_file_task_record(file_name)
        if task:
            return task['status'] == _TASK_STATUS_MERGED or task['status'] == _TASK_STATUS_FINISHED
        return False

    def schedule_merge_task(self):
        files = {}

        for file_name in os.listdir(self.cfg.part_outbox):
            if file_name.endswith(_PART_SUFFIX):
                basename, md5, part_md5, total, from_n, to_n, part_size, part_file_size = FileSyncFOD.parse_part_file_name(file_name)
                if len(basename) > 0:
                    files[basename] = (md5, total)
            elif file_name.endswith(_ACK_SUFFIX):
                _, file_size = self.parse_ack_file_name(file_name)
                file_path = os.path.join(self.cfg.part_outbox, file_name)
                if os.path.getsize(file_path) == file_size:
                    self.handle_part_outbox_ack_file(file_name)

        for file_name, file_info in files.items():
            md5 = file_info[0]
            if self.is_task_merged(file_name):
                self.remove_related_files(self.cfg.part_outbox, file_name)
                continue
            elif file_name not in self.merging_tasks:
                self.add_merging_task(file_name, md5=md5, status=_TASK_STATUS_MERGING)

            self.merge_task(file_name)

    def merge_task(self, file_name):
        missing_parts = self.merge_file(self.cfg.part_outbox, file_name)
        if len(missing_parts) == 0:
            self.ack_file(file_name, missing_parts)

    def add_to_proper_queue(self, task):
        assert 'status' in task
        status = task['status']
        if status == _TASK_STATUS_PROCESSING:
            self.handling_tasks[task['file_name']] = task
            self.handling_tasks = self.sort(self.handling_tasks)
        elif status == _TASK_STATUS_QUEUED:
            self.queued_tasks[task['file_name']] = task
            self.queued_tasks = self.sort(self.queued_tasks)
        elif status == _TASK_STATUS_FAILED:
            self.failed_tasks[task['file_name']] = task
            self.failed_tasks = self.sort(self.failed_tasks)

    def handle_inbox_file(self, file_name):
        file_path = os.path.join(self.cfg.inbox, file_name)
        if not os.path.exists(file_path):
            return

        task = self.get_from_all_task_queue(file_name)
        if task != None:
            return

        task = self.get_file_task_record(file_name)
        if not task:
            task = self.make_inbox_file_task(file_name)
            self.insert_file_task_record(file_name, md5=task['md5'], size=task['size'])

        self.add_to_proper_queue(task)

    def handle_inbox_file_with_retry(self, file_name):
        self.invoke_with_retry(self.handle_inbox_file, file_name)

    def make_inbox_file_task(self, file_name, missing_parts=[]):
        file_path = os.path.join(self.cfg.inbox, file_name)
        assert os.path.exists(file_path)

        size = os.path.getsize(file_path)
        md5 = self.file_md5([file_path])

        create_t = ''
        if re.match(self.cfg.high_priority_pattern, file_name):
            create_t = self.epoch()
        else:
            create_t = self.now()

        return { \
            'file_name': file_name, \
            'status': _TASK_STATUS_QUEUED, \
            'md5': md5, \
            'size': size, \
            'offset': 0, \
            'missing_parts': missing_parts, \
            'create_t': create_t, \
            'modify_t': self.now() \
        }

    def remove_from_all_task_queue(self, file_name):
        queues = [self.queued_tasks, self.handling_tasks, self.failed_tasks]
        for queue in queues:
            if file_name in queue:
                queue.__delitem__(file_name)

    def fetch_from_all_task_queue(self, file_name):
        task = None
        queues = [self.queued_tasks, self.handling_tasks, self.failed_tasks]
        for queue in queues:
            if file_name in queue:
                task = queue[file_name]
                queue.__delitem__(file_name)
                break
        return task

    def get_from_all_task_queue(self, file_name):
        task = None
        queues = [self.queued_tasks, self.handling_tasks, self.failed_tasks]
        for queue in queues:
            if file_name in queue:
                task = queue[file_name]
                break
        return task

    def move_to_failed_task_queue(self, file_name):
        task = self.fetch_from_all_task_queue(file_name)
        if task:
            self.failed_tasks[file_name] = task

    def schedule_split_failed_parts_task(self, file_name, missing_parts):
        self.missing_parts_tasks[file_name] = self.make_inbox_file_task(file_name, missing_parts)

    def handle_part_outbox_ack_file(self, file_name):
        file_path = os.path.join(self.cfg.part_outbox, file_name)

        if not os.path.exists(file_path):
            return

        try:
            with open(file_path, 'rb') as fp:
                ack_data = fp.read().decode()
                ack_json = json.loads(ack_data)

                ack_file_name = ack_json['file_name']

                task_status = self.get_task_status(ack_file_name)
                if task_status != _TASK_STATUS_FINISHED:
                    ack_status = ack_json['status']

                    if ack_status == _ACK_STATUS_SUCCEED:
                        self.finish_task(ack_file_name)
                    else:
                        missing_parts = ack_json['missing_parts']

                        if self.is_file_task_record_exists(ack_file_name):
                            self.update_file_task_record(
                                ack_file_name,
                                status=_TASK_STATUS_FAILED)

                        task = self.fetch_from_all_task_queue(ack_file_name)

                        if task is None:
                            task = self.get_file_task_record(ack_file_name)

                        if task is not None:
                            task['status'] = _TASK_STATUS_FAILED
                            task['modify_t'] = self.now()
                            task['missing_parts'] = missing_parts
                            self.failed_tasks[ack_file_name] = task

            self.invoke_with_retry(self.remove_file, file_path)
        except Exception as ex:
            logging.error(ex)

    def remove_file(self, file_path):
        if os.path.exists(file_path):
            os.remove(file_path)

    def retry_later(self, fn, *args, **kargs):
        self.failed_calls.append((fn, args, kargs))

    def invoke_with_retry(self, fn, *args, **kargs):
        try:
            fn(*args, **kargs)
        except Exception as ex:
            logging.error(ex)
            self.retry_later(fn, *args, **kargs)

    def remove_related_files(self, dir, basename):
        for file_name in os.listdir(dir):
            if file_name.startswith(basename):
                self.retry_later(self.remove_file, os.path.join(dir, file_name))

    def finish_task(self, file_name):
        self.remove_from_all_task_queue(file_name)
        self.update_file_task_record(file_name, status=_TASK_STATUS_FINISHED)

        self.invoke_with_retry(self.remove_file, os.path.join(self.cfg.inbox, file_name))
        self.remove_related_files(self.cfg.tmp_part_inbox, file_name)

    def ack_file(self, file_name, missing_parts = []):
        if len(missing_parts) == 0:
            merged_file_path = os.path.join(self.cfg.part_outbox, file_name)

            if os.path.exists(merged_file_path):
                shutil.move(merged_file_path, os.path.join(self.cfg.outbox, file_name))

            self.remove_related_files(self.cfg.part_outbox, file_name)

            self.update_merging_task(file_name, status=_TASK_STATUS_MERGED)

        d = {
            "file_name": file_name,
            "status": _ACK_STATUS_FAILED if len(missing_parts) > 0 else _ACK_STATUS_SUCCEED,
            "missing_parts": missing_parts
        }
        d_json = json.dumps(d).encode()

        ack_file_name = _ACK_NAME_TEMPLATE.format(file_name, len(d_json))
        with open(os.path.join(self.cfg.tmp_part_inbox, ack_file_name), 'wb') as fp:
            fp.write(d_json)

    def get_inbox_file_md5(self, file_name):
        task = self.get_from_all_task_queue(file_name)
        if task:
            return task['md5']
        return self.file_md5(os.path.join(self.cfg.inbox, file_name))

    def split_file(self, file_name, task, parts = 1000):
        assert 'missing_parts' in task
        missing_parts = task['missing_parts']

        if len(missing_parts) > 0:
            return self.split_file_missing_parts(file_name, task, parts)
        else:
            return self.split_file_complete(file_name, task, parts)

    def split_file_complete(self, file_name, task, parts = 1000):
        md5 = self.get_inbox_file_md5(file_name)
        file_path = os.path.join(self.cfg.inbox, file_name)

        self.update_file_task_record(file_name, status=_TASK_STATUS_PROCESSING)

        offset = self.get_file_task_offset(file_name)

        size = os.path.getsize(file_path)

        if offset >= size:
            return 0

        offset, part_number = self._split_file(
            file_path,
            md5,
            offset,
            parts,
            self.cfg.part_size,
            self.cfg.tmp_part_inbox)

        self.update_file_task_record(file_name, offset=offset)

        # update task offset here
        if task:
            task['offset'] = offset

        return part_number

    def split_missing_parts(self, missing_parts, count):
        '''
        :param missing_parts: missing part list
        :param count: total count
        :return: two list holds handled parts and unhandled parts

        example:
        input: missing_parts = [(1, 1), (3, 5)], count=2
        output [(1, 1), (3, 3)], [(4, 5)]
        '''
        handled_parts = []
        unhandled_parts = []
        total = count
        for part in missing_parts:
            parts = part[1] - part[0] + 1
            if total > 0:
                if parts < total:
                    handled_parts.append(part)
                    total -= parts
                else:
                    pre_start = part[0]
                    pre_end = part[0] + total - 1
                    next_start = part[0] + total
                    next_end = part[1]
                    handled_part = (pre_start, pre_end)
                    handled_parts.append(handled_part)
                    unhandled_part = (next_start, next_end)
                    unhandled_parts.append(unhandled_part)
                    total = 0
                    pass
            else:
                unhandled_parts.append(part)
        return handled_parts, unhandled_parts

    def split_file_missing_parts(self, file_name, task, parts = 1000):
        part_total_number = 0

        missing_parts = task['missing_parts']
        handled_parts, unhandled_parts = self.split_missing_parts(missing_parts, parts)

        task['missing_parts'] = unhandled_parts

        for part in handled_parts:
            start = part[0]
            count = part[1] - start + 1
            offset = (start - 1) * self.cfg.part_size
            md5 = self.get_inbox_file_md5(file_name)
            file_path = os.path.join(self.cfg.inbox, file_name)
            _, part_number = FileSyncFOD._split_file(
                file_path,
                md5,
                offset,
                count,
                self.cfg.part_size,
                self.cfg.tmp_part_inbox)
            part_total_number += part_number
        return part_total_number

    @staticmethod
    def _split_file(file_path, md5, offset, parts, part_size, target_dir):
        '''
        args:
            file_path: file to split
            offset: current split offset
            parts: number of parts to generate
            part_size: size of split part
            target_dir: target directory

        Returns:
            (offset, part_number)
        '''
        file_name = os.path.basename(file_path)
        file_size = os.path.getsize(file_path)
        total = int(file_size / part_size) + 1
        count = 0
        with open(file_path, 'rb') as fp:
            index = int(offset / part_size)
            fp.seek(offset)
            while count < parts and index < total:
                chunk_data = fp.read(part_size)
                if chunk_data is None or len(chunk_data) == 0:
                    break
                total_bits = len(str(total))
                part_name_tmpl = _PART_NAME_TEMPLATE % (total_bits, total_bits)
                frag_file_name = part_name_tmpl.format(file_name, md5, total, index + 1, index + 1, FileSyncFOD.data_md5([chunk_data]), part_size, len(chunk_data))
                with open(os.path.join(target_dir, frag_file_name), 'wb') as frag_fp:
                    frag_fp.write(chunk_data)
                index += 1
                offset += len(chunk_data)
                count += 1
        return (offset, count)

    @staticmethod
    def parse_part_file_name(file_name):
        '''
        Returns:
            part infomation

            example:
                (basename, md5, part_md5, total, from, end, part_size, part_file_size)
        '''
        m = re.match('(.*)-([^-]+)-([^-]+)-([^-]+)-([^-]+)-([^-]+)-([^-]+)-([^-]+).part$', file_name)
        if m:
            return (m[1], m[2], m[6], int(m[3]), int(m[4]), int(m[5]), int(m[7]), int(m[8]))
        return ('', '', '', 0, 0, 0, 0, 0)

    @staticmethod
    def parse_ack_file_name(file_name):
        '''(file_name, file_size)'''
        m = re.match('(.*)-([^-]+).ack', file_name)
        if m:
            return (m[1], int(m[2]))
        return ('', 0)

    @staticmethod
    def parse_file_name(file_name: str):
        d = FileSyncFOD.parse_part_file_name(file_name)
        if len(d) > 0:
            return d[0]
        else:
            return file_name

    @staticmethod
    def count_parts(dir):
        return len(os.listdir(dir))

    def available_parts(self):
        return self.cfg.queue_size - FileSyncFOD.count_parts(self.cfg.part_inbox)

    @staticmethod
    def copy_part_to_fp(part_path, offset, fp):
        chunk_size = 5 * 1000 * 1024
        with open(part_path, 'rb') as r_fp:
            r_fp.seek(offset)
            while True:
                chunk_data = r_fp.read(chunk_size)
                if chunk_data and len(chunk_data) > 0:
                    fp.write(chunk_data)
                else:
                    break

    def merge_two_parts(self, dir, file_name, part1, part2):
        '''merge two parts
        Args:
            dir: file directory
            part: (file_name, md5, part_md5, total, from, to, part_size, part_file_size)

        Returns:
            merged part name
        '''
        part1_path = os.path.join(dir, part1[0])
        part2_path = os.path.join(dir, part2[0])

        part1_from = part1[4]
        part2_from = part2[4]
        part1_to = part1[5]
        part2_to = part2[5]

        part_size = part2[6]

        part1_size = part1[7]
        part2_size = part2[7]

        assert part2_from >= part1_from
        assert part1_to + 1 >= part2_from

        offset = 0
        if part2_to <= part1_to:
            offset = part2_size
        elif part2_from <= (part1_to + 1):
            offset = (part1_to + 1 - part2_from) * part_size

        part_md5 = FileSyncFOD.file_md5_with_offset([(part1_path, 0), (part2_path, offset)])

        total = part1[3]
        md5 = part1[1]
        mrege_from = min(part1_from, part2_from)
        merge_end = max(part1_to, part2_to)

        merged_size = part1_size + part2_size - offset

        total_bits = len(str(total))
        part_name_tmpl = _PART_NAME_TEMPLATE % (total_bits, total_bits)
        merged_name = part_name_tmpl.format(file_name, md5, total, mrege_from, merge_end, part_md5, part_size, merged_size)
        merged_path = os.path.join(dir, merged_name)
        shutil.move(part1_path, merged_path)

        with open(merged_path, 'ab') as fp:
            FileSyncFOD.copy_part_to_fp(part2_path, offset, fp)

        self.invoke_with_retry(self.remove_file, part1_path)
        self.invoke_with_retry(self.remove_file, part2_path)
        return merged_name, md5, merged_size

    def merge_file(self, file_dir, file_name):
        '''
        Returns:
            missing fragement
        '''

        total = 0
        part_size = 0
        missing_parts = []
        parts = []
        for fname in os.listdir(file_dir):
            if fname.startswith(file_name):
                try:
                    file_path = os.path.join(self.cfg.part_outbox, fname)

                    basename, md5, part_md5, total, from_n, to_n, part_size, part_file_size = FileSyncFOD.parse_part_file_name(fname)

                    if os.path.getsize(file_path) != part_file_size:
                        continue

                    file_md5 = self.file_md5([file_path])

                    if file_md5 != part_md5:
                        self.invoke_with_retry(self.remove_file, file_path)
                        continue

                    parts.append((fname, md5, part_md5, total, from_n, to_n, part_size, part_file_size))
                except Exception as ex:
                    logging.error(ex)

        if total == 0:
            return missing_parts

        parts.sort(key=lambda x: x[0])

        pre_part = ()
        for part in parts:
            if len(pre_part) == 0:
                pre_part = part
                from_n = part[4]
                if from_n > 1:
                    missing_parts.append((1, from_n - 1))
                continue

            # support complex merge even the two parts are not sibling
            if pre_part[5] + 1 >= part[4]:
                merged_name, merged_md5, merged_size = self.merge_two_parts(file_dir, file_name, pre_part, part)
                merged_from = min(pre_part[4], part[4])
                merged_end = max(pre_part[5], part[5])
                pre_part = (merged_name,
                            pre_part[1],
                            merged_md5,
                            pre_part[3],
                            merged_from,
                            merged_end,
                            part_size,
                            merged_size)
            elif part[4] >= pre_part[4] and part[5] <= pre_part[5]:
                self.invoke_with_retry(self.remove_file, os.path.join(file_dir, part[0]))
            elif part[4] <= pre_part[4] and part[5] >= pre_part[5]:
                self.invoke_with_retry(self.remove_file, os.path.join(file_dir, pre_part[0]))
                pre_part = part
            else:
                missing_parts.append((pre_part[5] + 1, part[4] - 1))
                pre_part = part

        if len(pre_part) == 0:
            missing_parts = [0, total]
        else:
            if pre_part[5] < total:
                missing_parts.append((pre_part[5] + 1, total))

        if len(pre_part) > 0:
            merged_name = pre_part[0]
            basename, md5, part_md5, total, from_n, to_n, part_size, part_file_size = FileSyncFOD.parse_part_file_name(pre_part[0])
            if from_n == 1 and to_n == total:
                merged_file_path = os.path.join(file_dir, merged_name)
                merged_md5 = FileSyncFOD.file_md5([merged_file_path])
                if md5 != merged_md5:
                    missing_parts = [(1, total)]
                    self.invoke_with_retry(self.remove_file, merged_file_path)
                else:
                    missing_parts = []
                    shutil.move(merged_file_path, os.path.join(self.cfg.outbox, basename))

        return missing_parts

    def normalize_time(self, t):
        if t < self.cfg.min_cost_time:
            return self.cfg.min_cost_time
        elif t > self.cfg.max_cost_time:
            return self.cfg.max_cost_time
        else:
            return t

    def predicate_latest_finish_time_by_size(self, file_size):
        '''
        Returns:
             milliseconds to merge
        '''
        t = file_size / self.cfg.part_size * self.cfg.cost_per_part
        return self.normalize_time(t)

    def predicate_latest_finish_time_by_parts(self, parts):
        '''
        Returns:
             milliseconds to merge
        '''
        t = parts * self.cfg.cost_per_part
        return self.normalize_time(t)




