import datetime as dt
import multiprocessing as mp
import time
from typing import Tuple, Any, Sequence
import sys

import numpy as np


def lin_parts(numAtoms, numThreads):
    # partition of atoms with a single loop
    parts = np.linspace(0, numAtoms, min(numThreads, numAtoms) + 1)
    parts = np.ceil(parts).astype(int)
    return parts


def nested_parts(numAtoms, numThreads, upperTriang=False):
    # partition of atoms with an inner loop
    parts, numThreads_ = [0], min(numThreads, numAtoms)
    for num in range(numThreads_):
        part = 1 + 4 * (parts[-1] ** 2 + parts[-1] + numAtoms * (numAtoms + 1.) / numThreads_)
    part = (-1 + part ** .5) / 2.
    parts.append(part)
    parts = np.round(parts).astype(int)
    if upperTriang:  # the first rows are the heaviest
        parts = np.cumsum(np.diff(parts)[::-1])
    parts = np.append(np.array([0]), parts)
    return parts


def mpPandasObj(func,
                pdObj: Tuple[str, Sequence[Any]],
                numThreads=24, mpBatches=1, linMols=True, **kwargs):
    import pandas as pd

    if linMols:
        parts = lin_parts(len(pdObj[1]), numThreads * mpBatches)
    else:
        parts = nested_parts(len(pdObj[1]), numThreads * mpBatches)
    jobs = []
    for i in range(1, len(parts)):
        name, obj = pdObj
        job = {
            name: obj[parts[i - 1]:parts[i]],
            "func": func,
        }
        job.update(kwargs)
        jobs.append(job)
    if numThreads == 1:
        out = process_jobs_(jobs)
    else:
        out = process_jobs(jobs, numThreads=numThreads)
    if isinstance(out[0], pd.DataFrame):
        df = pd.DataFrame()
    elif isinstance(out[0], pd.Series):
        df = pd.Series()
    else:
        return out
    for i in out:
        df.append(i)
    df = df.sort_index()
    return df


def process_jobs_(jobs):
    # Run jobs sequentially, for debugging
    out = []
    for job in jobs:
        out_ = expandCall(job)
        out.append(out_)
    return out


def reportProgress(jobNum, numJobs, time0, task):
    # Report progress as async jobs are completed
    msg = [float(jobNum) / numJobs, (time.time() - time0) / 60.]
    msg.append(msg[1] * (1 / msg[0] - 1))
    timeStamp = str(dt.datetime.fromtimestamp(time.time()))
    msg = timeStamp + ' ' + str(round(msg[0] * 100, 2)) + '% ' + task + ' done after ' + \
          str(round(msg[1], 2)) + ' minutes. Remaining ' + str(round(msg[2], 2)) + ' minutes.'
    if jobNum < numJobs:
        sys.stderr.write(msg + '\r')
    else:
        sys.stderr.write(msg + '\n')
    return


def process_jobs(jobs, task=None, numThreads=24):
    # Run in parallel.
    # jobs must contain a ’func’ callback, for expandCall
    if task is None:
        task = jobs[0]['func'].__name__
    pool = mp.Pool(processes=numThreads)
    outputs, out, time0 = pool.imap_unordered(expandCall, jobs), [], time.time()
    # Process asynchronous output, report progress
    for i, out_ in enumerate(outputs, 1):
        out.append(out_)
        reportProgress(i, len(jobs), time0, task)
    pool.close()
    pool.join()  # this is needed to prevent memory leaks
    return out


def expandCall(kargs):
    # Expand the arguments of a callback function, kargs[’func’]
    func = kargs['func']
    del kargs['func']
    out = func(**kargs)
    return out
