from tqdm import tqdm as tqdm
from time import sleep
import multiprocessing as mp
from random import random, shuffle
from multiprocessing import get_context
from typing import Callable
from common.logger import logger
import operator


def test_function(var1, var2):
    """Testing function to illustrate function format required by `pool_apply_async`.
    For each job, the function will sleep for random amount of time.
    ----------
    var1 and var2 are examples of input arguments.

    Returns
    -------
    result
        Example of job return
    """
    t = random() * var2
    logger.info(f"Task {var1}, sleeping {t} seconds...")
    sleep(t)
    return var1 + var2


def _wrapper_process_function(**kwargs):
    """The internal function to wrap the function for parallel processing"""
    _idx = kwargs["idx"]
    _process_function = kwargs["process_function"]
    del kwargs["idx"]
    del kwargs["process_function"]
    result = _process_function(**kwargs)
    return (_idx, result)


def pool_apply_async(
    process_function: Callable,
    function_args: list,
    time_wait_seconds: int = 120,
    process_num: int = 20,
    retry_max: int = None,
    quit_if_stuck_n: int = 5,
    timeout_cnt_n: int = 1,
    sleep_while_retry: int = 2,
):
    """Wrapper function to execute a function in multi-processing mode.
    This wrapper is designed with the following funtionality in mind.
    1. execute a function in pool.apply_async mode over queue_tasks.
    2. when a function took longer than expected to complete, drop all on-going proecess by terminating the pool.
    3. if pool is terminated, check what tasks have been sucessfully executed, updating queue_tasks while collecting
       processed information.
    4. retry by executing a function in pool.apply_async mode over the updated queue_tasks.
    5. quit ths function and return progress result, if multi-processing got stuck for `quit_if_stuck_n` times.
    6. wait for (retry_# * sleep_while_retry) seconds before re-initiating the pool.

    Parameters
    ----------
    process_function : Callable
        select a callable function that takes `job` input and return `job` and `job_result` outputs,
        see `test_function` for better understanding,
        by default test_function
    function_args : list of dictionary
        list of `job` input arguments to be feed into `process_function`.
        E.g., [{arg1, arg2, ...}, {arg1, arg2, ...}] See `test_function` for better understanding,
    time_wait_seconds : int, optional
        how many seconds to wait for each task to process, by default 120
    process_num : int, optional
        how many cpu to use for multi-processing, by default 20
    retry_max : int, optional
        maximum times allowed for retry, by default None, which set retry_max = number of tasks // 10
    quit_if_stuck_n : int, optional
        if no progress made (e.g. 0 tasks done) for consecutive `quit_if_stuck_n` times, quit multi-processing,
        by default 5
    timeout_cnt_n : int, optional
        if the counts of timeout jobs exceed the threshold, reset the thread pool. By default: 1
    sleep_while_retry : int, optional
        wait for (retry_# * sleep_while_retry) seconds before re-initiating the pool.
        by default 2, so the process will wait for 2, 4, 6, ... seconds for each additional retry.

    Returns
    -------
    job_result_list
        A list of job results in-order mapping to the inputs (function_args).
    """

    if retry_max is None:
        retry_max = len(function_args) // 10

    # Enable tqdm progress bar and result collection
    def _update_pbar(insert_value, **kwargs):
        pbar.update(1)
        tmp_result.append(insert_value)

    # Initiate variables
    retry_count, stuck_count, done_count = 0, 0, 0
    job_list, job_result_list = [], []
    # Assign each task a key (_idx) to record the order of return
    job_dict = {_idx: args for _idx, args in enumerate(function_args)}

    # Retry pool multiple times
    unprocessed_tasks = 0
    for retry_count in range(retry_max):
        queue_len = len(job_dict)
        if queue_len == 0:
            break
        # Rest for retry_count*sleep_while_retry seconds
        if sleep_while_retry > 0 and retry_count > 0:
            seconds_to_wait = retry_count * sleep_while_retry
            logger.info(
                f"Retry = {retry_count} : waiting for {seconds_to_wait}s before restarting pool."
            )
            sleep(seconds_to_wait)
        # Set a new progress bar
        pbar = tqdm(total=queue_len)
        tmp_result = []
        logger.info(f"{queue_len} tasks are processing...")

        # Do pooling
        with get_context("spawn").Pool(min(process_num, queue_len)) as pool:
            multi_res = []
            _idxes = list(job_dict.keys())
            # Shuffle all tasks in each iteration to avoid stucking on specific tasks.
            shuffle(_idxes)
            for _idx in _idxes:
                input_dict = {"process_function": process_function, "idx": _idx}
                input_dict.update(job_dict[_idx])
                multi_res.append(
                    pool.apply_async(
                        _wrapper_process_function,
                        kwds=input_dict,
                        callback=_update_pbar,
                        error_callback=print,
                    )
                )

            timeout_cnt = 0
            for res in multi_res:
                try:
                    res.get(timeout=time_wait_seconds)
                except mp.TimeoutError:
                    logger.warning(f"== Timeout found: {timeout_cnt} jobs. ==")
                    timeout_cnt += 1
                    if timeout_cnt >= timeout_cnt_n:
                        unprocessed_tasks += timeout_cnt
                        break
                    # Because res.get timeout does not stop the child process,
                    # our strategy here is to kill the on-going pool.
            pool.terminate()

        done_count = len(tmp_result)
        logger.info(f"Retry = {retry_count} : {len(tmp_result)} tasks completed")
        # Decide if a progress is stuck
        stuck_count = stuck_count + 1 if done_count == 0 else 0
        if (stuck_count >= quit_if_stuck_n) & (done_count == 0):
            logger.warning(
                f"Retry = {retry_count} : process got stuck for {stuck_count} times, "
                + f"exceeding quit_if_stuck_n {quit_if_stuck_n}, terminate process."
            )
            break
        # Update queue_tasks
        if done_count > 0:
            job_list_part, job_result_list_part = zip(*tmp_result)
            job_list_part = list(job_list_part)
            job_result_list_part = list(job_result_list_part)
            # remove completed tasks
            for _idx in job_list_part:
                del job_dict[_idx]

            # Update job_list, result_list
            job_list += job_list_part
            job_result_list += job_result_list_part

    if unprocessed_tasks > 0:
        logger.warning(f"=== {unprocessed_tasks} tasks are left unprocessed. ===")

    assert len(job_list) == len(
        job_result_list
    ), f"{logger.error(f'Size mismatch between jobID {len(job_list)} and its result {len(job_result_list)}.')}"

    output_dict = {id: result for id, result in zip(job_list, job_result_list)}
    sorted_dict = sorted(output_dict.items(), key=operator.itemgetter(0))
    return [result[1] for result in sorted_dict]


if __name__ == "__main__":
    ############
    # Test case
    ############
    queue_tasks = [{"var1": i, "var2": i + 1} for i in range(10)]
    results = pool_apply_async(
        process_function=test_function,
        function_args=queue_tasks,
        time_wait_seconds=10,
        process_num=5,
        quit_if_stuck_n=5,
        sleep_while_retry=2,
    )
    # The results is a list of the returns of test_function() in-order to the inputs.
    logger.info(f"Results: {results}")
