from __future__ import division

import sys, os.path, logging, itertools

import numpy, scipy.constants, scipy.optimize, scipy.stats

import common, wlc

logger = logging.getLogger("kinetics_opt")
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)

bestfloat = numpy.longdouble
default_diffuse_k0_lower_bound = bestfloat(1e-10)
default_diffuse_k0_upper_bound = bestfloat(1e10)

def wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
    kbt_over_p, upper_bound_units, newtons_per_unit):
    """
    Params:
    upper_bound_units: exclusive
    Returns: h
    """
    y = numpy.arange(upper_bound_units, dtype=bestfloat) * newtons_per_unit
    h = wlc.wlc_inverse_derived_times_v_over_l_minus_1_over_kl(kbt_over_p, y)
    return h

def interval_covering(se):
    """
    Parameters:
        se=list of tuple (start, end) with start < end
    Returns:
        tuple of sorted list of start or end values of size n, and for each segment
        a list of indices of input tuples that cover that segment. The second element has size n-1.
        n <= len(se)
    """
    if not se:
        return (), ()
    for p in se:
        common.check_lt(p[0], p[1])
    soe = [
        (se[i][0], i) for i in range(len(se))] + [
        (se[i][1], i) for i in range(len(se))]
    item0 = lambda x : x[0]
    soe.sort(key=item0)
    m = tuple(map(lambda x : (x[0], tuple(x[1])), itertools.groupby(soe, key=item0)))
    n = len(m)
    a = set(x for (_, x) in m[0][1] if m[0][0] == se[x][0])
    relevant = []
    for i in range(n - 1):
        relevant.append(a.copy())
        ending = set(x for (_, x) in m[i + 1][1] if m[i + 1][0] == se[x][1])
        assert ending <= a, (ending, a)
        a -= ending
        starting = set(x for (_, x) in m[i + 1][1] if m[i + 1][0] == se[x][0])
        a |= starting
    common.check_False(a)
    assert relevant[-1] == set(x for (_, x) in m[n - 1][1] if m[n - 1][0] == se[x][1])
    return tuple(zip(*m)[0]), tuple(relevant)

def array_interval_covering(se):
    positions, relevance = interval_covering(se)
    return positions, [list(r) for r in relevance]

def number_of_ranges(relevance):
    relevances = sum(relevance, [])
    if not relevances:
        return 0
    else:
        # Some intervals may be empty.
        return max(relevances) + 1

def compute_differences(kbt_over_dx, newtons_per_unit, h, a, b, only_sums):
    """
    a is inclusive; b is exclusive
    """
    r = numpy.arange(a, b, dtype=int)
    e = numpy.exp(r * newtons_per_unit / kbt_over_dx)
    left_r, right_r = r[:-1], r[1:]
    left_h, right_h = h[a:b - 1], h[a + 1:b]
    left_e, right_e = e[:-1], e[1:]
    diff0 = e[-1] - e[0] if only_sums else right_e - left_e
    diff1 = (
        (left_h * right_r - right_h * left_r) * (right_e - left_e))
    ez = e * (r - kbt_over_dx / newtons_per_unit)
    left_ez, right_ez = ez[:-1], ez[1:]
    diff2 = ((right_h - left_h) * (right_ez - left_ez))
    return (diff0, (diff1.sum() + diff2.sum())) if only_sums else numpy.array(
        [diff0, diff1 + diff2], dtype=bestfloat).transpose()

def partial_sums(kbt_over_dx, newtons_per_unit, h, positions, relevance):
    common.check_True(
        len(positions) == 1 + len(relevance) or (not positions and not relevance))
    n = number_of_ranges(relevance)
    ps = numpy.zeros((n, 2), dtype=bestfloat)
    for i in range(len(relevance)):
        if relevance[i]:
            a, b = positions[i:i+2]
            diff0, diff1 = compute_differences(
                kbt_over_dx, newtons_per_unit, h, a, b + 1, True)
            ps[relevance[i]] += [diff0, diff1]
    return ps

def full_partial_sums(kbt_over_dx, newtons_per_unit, h, y_start_list):
    upper_bound_units = len(h)
    # The routine works even with out of range y_start, but a check does not hurt.
    numpy.testing.assert_array_less(y_start_list, upper_bound_units)
    ps = compute_differences(
        kbt_over_dx, newtons_per_unit, h, 0, upper_bound_units, False)
    common.check_equal((upper_bound_units - 1, 2), ps.shape)
    #common.check_True(ps.all())
    n = len(y_start_list)
    range_tile = numpy.tile(numpy.arange(upper_bound_units, dtype=int), (n, 1))
    assert range_tile.shape == (n, upper_bound_units)
    y_start_arrays = numpy.asarray(y_start_list, dtype=int)[:, numpy.newaxis]
    a = numpy.empty((len(y_start_list), upper_bound_units, 2), dtype=bestfloat)
    a[:, 1:, :] = ps
    a[range_tile <= y_start_arrays] = 0  # fill doesn't work
    a.cumsum(axis=1, out=a)
    a[range_tile < y_start_arrays] = numpy.inf
    return a

def peak_integral_params_to_start_end_tuples(peak_integral_params_list):
    # The covering stuff is inefficient for gaussian convolutions.
    se = []
    for peak_integral_params in peak_integral_params_list:
        for i in range(len(peak_integral_params.y_units) - 1):
            se.append((
                    peak_integral_params.y_units[i],
                    peak_integral_params.y_units[i + 1]))
    return se

def accumulate_one_level(ps, peak_counts):
    common.check_equal(2, ps.shape[1])
    result = numpy.empty(ps.shape, dtype=bestfloat)
    i = 0
    for peak_count in peak_counts:
        j = i + peak_count
        ps[i:j].cumsum(axis=0, out=result[i:j])
        i = j
    common.check_equal(i, ps.shape[0])
    return result

def _nonempty_integral_values(kbt_over_dx, peak_value_count_list, cps):
    common.check_equal(PeakValueCount.sum_counts(peak_value_count_list), len(cps))
    i = 0
    result = numpy.empty(cps.shape[0], dtype=bestfloat)
    for pvc in peak_value_count_list:
        j = i + pvc.count
        sub_cps = cps[i:j]
        result[i:j] = kbt_over_dx / pvc.v_over_l * (
            sub_cps[:, 0] / pvc.kl + sub_cps[:, 1])
        # Empty when i == j, because count is zero.
        i = j
    common.check_True((result > 0).all())
    return result

class PeakIntegralParams(object):
    def __init__(self, v_over_l, kl, y_units):
        """
        Params:
        y_units: Starts with y_start, followed by all y_ends.
        """
        self.v_over_l = bestfloat(v_over_l)
        self.kl = bestfloat(kl)
        self.y_units = y_units
        common.check_True(len(y_units))
        common.check_increasing(y_units)
        # When the digitised unfolding force is equal to the digitised starting
        # force, we have only one force, the starting one, and nothing to compute.

    def count(self):
        return len(self.y_units) - 1

    @staticmethod
    def sum_counts(peak_integral_params_list):
        return sum(pip.count() for pip in peak_integral_params_list)

    @staticmethod
    def counts(peak_integral_params_list):
        return [pip.count() for pip in peak_integral_params_list]

    @staticmethod
    def from_basic_peak(basic_peak):
        return PeakIntegralParams(
            basic_peak.v_over_l(), basic_peak.kl(), basic_peak.y_units())

    @staticmethod
    def from_basic_peaks(basic_peaks):
        return [
            PeakIntegralParams.from_basic_peak(basic_peak)
            for basic_peak in basic_peaks]

    @staticmethod
    def from_diffuse_peak(diffuse_peak):
        return PeakIntegralParams(
            diffuse_peak.v_over_l(), diffuse_peak.kl(), diffuse_peak.y_units())

    @staticmethod
    def from_diffuse_peaks(diffuse_peaks):
        return [
            PeakIntegralParams.from_diffuse_peak(diffuse_peak)
            for diffuse_peak in diffuse_peaks]


class PeakValueCount(object):
    def __init__(self, v_over_l, kl, count):
        self.v_over_l = bestfloat(v_over_l)
        self.kl = bestfloat(kl)
        self.count = count

    @staticmethod
    def from_peak_integral_params(pip):
        return PeakValueCount(pip.v_over_l, pip.kl, pip.count())

    @staticmethod
    def from_peak_integral_params_list(pip_list):
        return [PeakValueCount.from_peak_integral_params(pip) for pip in pip_list]

    @staticmethod
    def sum_counts(peak_value_count_list):
        return sum([pvc.count for pvc in peak_value_count_list])

class Protocol(object):
    def __init__(self, v_over_l, kl, y_start_units, m):
        self.v_over_l = bestfloat(v_over_l)
        self.kl = bestfloat(kl)
        self.y_start_units = y_start_units
        common.check_ge(m, 1)
        self.m = m

    def __repr__(self):
        return "Protocol(%r, %r, %r, %r)" % (
            self.v_over_l, self.kl, self.y_start_units, self.m)

    def __eq__(self, other):
        return self.v_over_l == other.v_over_l and self.kl == other.kl and self.y_start_units == other.y_start_units and self.m == other.m

    @staticmethod
    def ms(protocols):
        return [protocol.m for protocol in protocols]

    @staticmethod
    def y_start_units_list(protocols):
        return [protocol.y_start_units for protocol in protocols]

    @staticmethod
    def kls(protocols):
        return [protocol.kl for protocol in protocols]

    @staticmethod
    def v_over_ls(protocols):
        return [protocol.v_over_l for protocol in protocols]


class DiffuseProtocol(object):
    def __init__(self, protocol, sigma_units, hsu):
        self.protocol = protocol
        self.sigma_units = sigma_units
        self.hsu = hsu

    def y_start_units(self):
        return self.protocol.y_start_units


class BasicPeak(object):
    def __init__(self, protocol, y_end_units):
        self.protocol = protocol
        common.check_le(protocol.y_start_units, y_end_units)
        self.y_end_units = y_end_units

    def __repr__(self):
        return "BasicPeak(%r, %r)" % (self.protocol, self.y_end_units)

    def y_units(self):
        if self.start_equals_first_end():
            return [self.y_start_units()]
        else:
            return [self.y_start_units(), self.y_end_units]

    def y_start_units(self):
        return self.protocol.y_start_units

    def start_equals_first_end(self):
        return self.y_start_units() == self.y_end_units

    def len_y_end_units(self):
        return 1

    def v_over_l(self):
        return self.protocol.v_over_l

    def kl(self):
        return self.protocol.kl

    def y_start_units(self):
        return self.protocol.y_start_units

    def m(self):
        return self.protocol.m

    @staticmethod
    def protocols(basic_peaks):
        return [basic_peak.protocol for basic_peak in basic_peaks]

    @staticmethod
    def ms(basic_peaks):
        return Protocol.ms(BasicPeak.protocols(basic_peaks))

    @staticmethod
    def y_start_units_list(basic_peaks):
        return Protocol.y_start_units_list(BasicPeak.protocols(basic_peaks))

    @staticmethod
    def kls(basic_peaks):
        return Protocol.kls(BasicPeak.protocols(basic_peaks))

    @staticmethod
    def v_over_ls(basic_peaks):
        return Protocol.v_over_ls(BasicPeak.protocols(basic_peaks))

    @staticmethod
    def y_end_units_list(basic_peaks):
        return [basic_peak.y_end_units for basic_peak in basic_peaks]

class DiffusePeak(object):
    """
    weights and y_end_units may be either numpy arrays or lists.
    sum_{y=y_s}^{\inf} w_y(x) = 1, \forall x
    """
    def __init__(self, protocol, weights, y_end_units):
        self.protocol = protocol
        common.check_equal(len(weights), len(y_end_units))
        self.weights = weights
        self.y_end_units = y_end_units
        common.check_le(protocol.y_start_units, y_end_units[0])
        self.y_end_units = y_end_units

    def allclose(self, other):
        return self.equality(other, numpy.allclose)

    def __eq__(self, other):
        return self.equality(other, numpy.array_equal)

    def equality(self, other, f):
        return self.protocol == other.protocol and f(
            self.weights, other.weights) and f(self.y_end_units, other.y_end_units)

    def __ne__(self, other):
        return not self.__eq__(other)

    def y_units(self):
        if self.start_equals_first_end():
            return self.y_end_units
        else:
            if isinstance(self.y_end_units, list):
                return [self.y_start_units()] + self.y_end_units
            else:
                return numpy.concatenate([
                        numpy.asarray([self.y_start_units()], dtype=int),
                        self.y_end_units])

    def y_start_units(self):
        return self.protocol.y_start_units

    def v_over_l(self):
        return self.protocol.v_over_l

    def kl(self):
        return self.protocol.kl

    def start_equals_first_end(self):
        return self.y_start_units() == self.y_end_units[0]

    def len_y_end_units(self):
        return len(self.y_end_units)

    def __repr__(self):
        return "DiffusePeak(%r, %r, %r)" % (
            self.protocol, self.weights, self.y_end_units)

    @staticmethod
    def __norm(sigma_units):
        if sigma_units:
            # pdf only seems to work with float values.
            d = scipy.stats.norm(loc=0, scale=float(sigma_units))
        else:
            class A(object):
                def pdf(self, s):
                    return numpy.logical_not(s)
            d = A()
        return d

    @staticmethod
    def normal_from_basic_peak(basic_peak, sigma_units, hsu, upper_bound_units):
        """
        hsu: half-support units
        """
        d = DiffusePeak.__norm(sigma_units)
        _distrib = d.pdf(range(-hsu, hsu + 1))
        distrib = numpy.empty(2 * hsu + 2, dtype=bestfloat)
        distrib[0] = 0
        distrib[1:] = _distrib
        #distrib /= distrib.sum()  # not necessary
        #cpd = distrib.cumsum()
        #common.check_True(numpy.allclose(cpd[2 * hsu], 1))
        a = max(basic_peak.y_end_units - hsu, basic_peak.y_start_units())
        b = min(basic_peak.y_end_units + hsu + 1, upper_bound_units)
        logger.debug(
            "a=%d b=%d hsu=%d upper_bound_units=%d", a, b, hsu, upper_bound_units)
        x = numpy.arange(a, b, dtype=int)
        numerator = distrib[1 + hsu + x - basic_peak.y_end_units]
        # cpd is the same (one) for positive values larger than or equal to hsu.
        #denominator = cpd[hsu + numpy.minimum(x - basic_peak.y_start_units(), hsu)] - cpd[hsu + upper_bound_units - x]
        c = 1 + hsu + numpy.maximum(basic_peak.y_start_units() - x, -1 - hsu)
        d = 1 + hsu + numpy.minimum(hsu, upper_bound_units - 1 - x) + 1
        logger.debug("numerator=%s distrib=%s c=%s d=%s", numerator, distrib, c, d)
        # Could do it in linear time using cumulative sums, but who cares.
        denominator = numpy.empty(b - a, dtype=bestfloat)
        for i in range(b - a):
            denominator[i] = distrib[c[i]:d[i]].sum()
        weights = numerator / denominator
        protocol = Protocol(
            basic_peak.v_over_l(), basic_peak.kl(),
            basic_peak.y_start_units(), basic_peak.m())
        return DiffusePeak(protocol, weights, x)
    
    @staticmethod
    def truncated_normal_from_basic_peak(
        basic_peak, sigma_units, hsu, upper_bound_units):
        d = DiffusePeak.__norm(sigma_units)
        _distrib = d.pdf(range(hsu + 1))
        distrib = numpy.empty(hsu + 2, dtype=bestfloat)
        distrib[0] = 0
        distrib[1:] = _distrib
        #distrib /= distrib.sum()
        a = max(basic_peak.y_end_units - hsu, basic_peak.y_start_units())
        b = min(basic_peak.y_end_units + 1, upper_bound_units)
        # If real force x results in experimentally observed y, x <= y must hold.
        x = numpy.arange(a, b, dtype=int)
        numerator = distrib[1 + basic_peak.y_end_units - x]
        c = 1 + numpy.maximum(basic_peak.y_start_units() - x, -1)
        d = 1 + numpy.minimum(hsu, upper_bound_units - 1 - x) + 1
        logger.debug("numerator=%s distrib=%s c=%s d=%s", numerator, distrib, c, d)
        # Could do it in linear time using cumulative sums, but who cares.
        denominator = numpy.empty(b - a, dtype=bestfloat)
        for i in range(b - a):
            denominator[i] = distrib[c[i]:d[i]].sum()
        weights = numerator / denominator
        protocol = Protocol(
            basic_peak.v_over_l(), basic_peak.kl(),
            basic_peak.y_start_units(), basic_peak.m())
        return DiffusePeak(protocol, weights, x)

    @staticmethod
    def protocols(basic_peaks):
        return [basic_peak.protocol for basic_peak in basic_peaks]

    @staticmethod
    def ms(basic_peaks):
        return Protocol.ms(DiffusePeak.protocols(basic_peaks))

    @staticmethod
    def kls(basic_peaks):
        return Protocol.kls(DiffusePeak.protocols(basic_peaks))

    @staticmethod
    def v_over_ls(basic_peaks):
        return Protocol.v_over_ls(DiffusePeak.protocols(basic_peaks))

    @staticmethod
    def y_end_units_list(diffuse_peaks):
        # When start == first end, skip it.
        return [diffuse_peak.y_end_units for diffuse_peak in diffuse_peaks]


class PositionsRelevanceBasicPeaks(object):
    def __init__(self, positions, relevance, basic_peaks):
        self.positions = positions
        self.relevance = relevance
        self.basic_peaks = basic_peaks


class PositionsRelevanceDiffusePeaks(object):
    def __init__(self, positions, relevance, diffuse_peaks):
        self.positions = positions
        self.relevance = relevance
        self.diffuse_peaks = diffuse_peaks


### API with largely unchecked redundant input parameters ###

def compute_nonempty_integrals(
    kbt_over_dx, newtons_per_unit, h, positions, relevance,
    peak_integral_params_list):
    ps = partial_sums(kbt_over_dx, newtons_per_unit, h, positions, relevance)
    peak_counts = PeakIntegralParams.counts(peak_integral_params_list)
    cps = accumulate_one_level(ps, peak_counts)
    peak_value_count_list = PeakValueCount.from_peak_integral_params_list(
        peak_integral_params_list)
    return _nonempty_integral_values(kbt_over_dx, peak_value_count_list, cps)

def add_zero_for_empty_integrals(nonempty_integral_values, peaks):
    n = sum(peak.len_y_end_units() for peak in peaks)
    result = numpy.zeros(n, dtype=bestfloat)
    i = 0
    zeroes = 0
    for peak in peaks:
        zeroes += peak.start_equals_first_end()
        j = i + peak.len_y_end_units() - peak.start_equals_first_end()
        result[i + zeroes:j + zeroes] = nonempty_integral_values[i:j]
        i = j
    common.check_equal(nonempty_integral_values.shape, (i, ))
    common.check_equal(i + zeroes, n)
    return result

def compute_biased_k0(kbt_over_dx, newtons_per_unit, h, prbp):
    peak_integral_params_list = PeakIntegralParams.from_basic_peaks(prbp.basic_peaks)
    nonempty_integral_values = compute_nonempty_integrals(
        kbt_over_dx, newtons_per_unit, h, prbp.positions, prbp.relevance,
        peak_integral_params_list)
    integral_values = add_zero_for_empty_integrals(
        nonempty_integral_values, prbp.basic_peaks)
    ms = BasicPeak.ms(prbp.basic_peaks)
    common.check_equal(integral_values.shape, (len(prbp.basic_peaks), ))
    k0 = len(prbp.basic_peaks) / (ms * integral_values).sum()
    lls = _compute_log_likelihoods(
        kbt_over_dx, newtons_per_unit, h, prbp.basic_peaks, k0, integral_values)
    return k0, lls

def _estimate_k0_diffuse(
    kbt_over_dx, newtons_per_unit, h, prdp, k0_lower_bound, k0_upper_bound):
    peak_integral_params_list = PeakIntegralParams.from_diffuse_peaks(
        prdp.diffuse_peaks)
    nonempty_integral_values = compute_nonempty_integrals(
        kbt_over_dx, newtons_per_unit, h, prdp.positions, prdp.relevance,
        peak_integral_params_list)
    integral_values = add_zero_for_empty_integrals(
        nonempty_integral_values, prdp.diffuse_peaks)
    def func(log_k0):
        ll = _compute_log_likelihoods_diffuse(
            kbt_over_dx, newtons_per_unit, h, prdp,
            numpy.exp(log_k0), integral_values)
        ll_sum = ll.sum(dtype=bestfloat)
        return -ll_sum
    res = scipy.optimize.minimize_scalar(
        func, bounds=(numpy.log(k0_lower_bound), numpy.log(k0_upper_bound)),
        tol=0, method="bounded")
    common.check_True(res.success)
    k0 = numpy.exp(res.x)
    lls = _compute_log_likelihoods_diffuse(
        kbt_over_dx, newtons_per_unit, h, prdp, k0, integral_values)
    numpy.testing.assert_allclose(-res.fun, lls.sum(dtype=bestfloat))
    return k0, lls

def _estimate_dx_k0_diffuse(
    t, newtons_per_unit, h, prdp,
    dx_lower_bound, dx_upper_bound, k0_lower_bound, k0_upper_bound):
    def func(dx):
        kbt_over_dx = scipy.constants.k * t / dx
        k0, lls = _estimate_k0_diffuse(
            kbt_over_dx, newtons_per_unit, h, prdp,
            k0_lower_bound, k0_upper_bound)
        return -lls.sum(dtype=bestfloat)
    res = scipy.optimize.minimize_scalar(
        func, bounds=(dx_lower_bound, dx_upper_bound), tol=0, method="bounded")
    common.check_True(res.success)
    kbt_over_dx = scipy.constants.k * t / res.x
    k0, lls = _estimate_k0_diffuse(
        kbt_over_dx, newtons_per_unit, h, prdp,
        k0_lower_bound, k0_upper_bound)
    numpy.testing.assert_allclose(-res.fun, lls.sum(dtype=bestfloat))
    return res.x, k0, lls

def _estimate_shared_dx_k0_diffuse(
    t, newtons_per_unit, h, prdp_list,
    dx_lower_bound, dx_upper_bound, k0_lower_bound, k0_upper_bound):
    def func(dx):
        kbt_over_dx = scipy.constants.k * t / dx
        k0_lls_list = [
            _estimate_k0_diffuse(
                kbt_over_dx, newtons_per_unit, h, prdp,
                k0_lower_bound, k0_upper_bound)
            for prdp in prdp_list]
        return -numpy.sum([lls.sum() for _, lls in k0_lls_list], dtype=bestfloat)
    res = scipy.optimize.minimize_scalar(
        func, bounds=(dx_lower_bound, dx_upper_bound), tol=0, method="bounded")
    common.check_True(res.success)
    kbt_over_dx = scipy.constants.k * t / res.x
    k0_lls_list = [
        _estimate_k0_diffuse(
            kbt_over_dx, newtons_per_unit, h, prdp,
            k0_lower_bound, k0_upper_bound)
        for prdp in prdp_list]
    numpy.testing.assert_allclose(
        -res.fun, numpy.sum([lls.sum() for _, lls in k0_lls_list], dtype=bestfloat))
    return res.x, k0_lls_list
    
def _compute_log_likelihoods(
    kbt_over_dx, newtons_per_unit, h, basic_peaks, k0, integral_values):
    ms = numpy.asarray(BasicPeak.ms(basic_peaks), dtype=int)
    common.check_equal(integral_values.shape, (len(basic_peaks), ))
    ys = numpy.asarray(BasicPeak.y_end_units_list(basic_peaks), dtype=int)
    kls = numpy.asarray(BasicPeak.kls(basic_peaks), dtype=bestfloat)
    v_over_ls = numpy.asarray(BasicPeak.v_over_ls(basic_peaks), dtype=bestfloat)
    derived = (1 / kls + h[ys]) / v_over_ls
    log_likelihoods = numpy.log(ms) + numpy.log(k0) + ys * newtons_per_unit / kbt_over_dx + numpy.log(derived) - ms * k0 * integral_values
    return log_likelihoods

def _compute_full_log_pdfs(
    kbt_over_dx, newtons_per_unit, h, protocols, k0, integral_values):
    ms = numpy.asarray(Protocol.ms(protocols), dtype=int)
    upper_bound_units = len(h)
    n = len(protocols)
    ms = numpy.tile(ms, (upper_bound_units, 1)).transpose()
    common.check_equal(integral_values.shape, (n, upper_bound_units))
    ys = numpy.tile(numpy.arange(upper_bound_units, dtype=int), (n, 1))
    kls = numpy.asarray(Protocol.kls(protocols), dtype=bestfloat)
    kls = numpy.tile(kls, (upper_bound_units, 1)).transpose()
    v_over_ls = numpy.asarray(Protocol.v_over_ls(protocols), dtype=bestfloat)
    v_over_ls = numpy.tile(v_over_ls, (upper_bound_units, 1)).transpose()
    derived = (1 / kls + h[ys]) / v_over_ls
    log_likelihoods = numpy.log(ms) + numpy.log(k0) + ys * newtons_per_unit / kbt_over_dx + numpy.log(derived) - ms * k0 * integral_values
    return log_likelihoods

def _compute_protocol_log_pdfs(
    kbt_over_dx, newtons_per_unit, h, protocols, k0, integral_values):
    ms = numpy.asarray(Protocol.ms(basic_peaks), dtype=int)
    common.check_equal(integral_values.shape, (len(basic_peaks), ))
    n = len(protocols)
    upper_bound_units = len(h)
    common.check_equal((n, upper_bound_units), integral_values.shape)
    ys = numpy.tile(numpy.arange(upper_bound_units, dtype=int), (n, 1))
    kls = numpy.asarray(Protocol.kls(protocols), dtype=bestfloat)
    v_over_ls = numpy.asarray(Protocol.v_over_ls(protocols), dtype=bestfloat)
    derived = (1 / kls + h[ys]) / v_over_ls
    # Where the integral is inf, the rest don't matter.
    log_pdsfs = numpy.log(ms) + numpy.log(k0) + ys * newtons_per_unit / kbt_over_dx + numpy.log(derived) - ms * k0 * integral_values
    return log_pdfs
    
def _compute_log_likelihoods_diffuse(
    kbt_over_dx, newtons_per_unit, h, prdp, k0, integral_values):
    ms = numpy.asarray(DiffusePeak.ms(prdp.diffuse_peaks), dtype=int)
    y_end_units_list = DiffusePeak.y_end_units_list(prdp.diffuse_peaks)
    ys = numpy.concatenate([
            numpy.asarray(y_end_units, dtype=int)
            for y_end_units in y_end_units_list])
    common.check_equal(ys.shape[0], integral_values.shape[0])
    kls = numpy.asarray(DiffusePeak.kls(prdp.diffuse_peaks), dtype=bestfloat)
    v_over_ls = numpy.asarray(
        DiffusePeak.v_over_ls(prdp.diffuse_peaks), dtype=bestfloat)
    repeats = numpy.asarray(map(len, y_end_units_list), dtype=int)
    for i in range(len(repeats)):
        common.check_equal(repeats[i], len(prdp.diffuse_peaks[i].weights))
    repeated_ms = ms.repeat(repeats)
    repeated_kls = kls.repeat(repeats)
    repeated_v_over_ls = v_over_ls.repeat(repeats)
    # integral_values is already unravelled.
    derived = (1 / repeated_kls + h[ys]) / repeated_v_over_ls
    log_likelihoods = numpy.log(repeated_ms) + numpy.log(
        k0) + ys * newtons_per_unit / kbt_over_dx + numpy.log(
        derived) - repeated_ms * k0 * integral_values
    cumulative_counts = repeats.cumsum()
    common.check_equal(log_likelihoods.shape, (cumulative_counts[-1], ))
    n = len(prdp.diffuse_peaks)
    aggregated_log_likelihoods = numpy.asarray(
        [common.logsumexp(
                log_likelihoods[
                    0 if not i else cumulative_counts[i - 1]:cumulative_counts[i]] + numpy.log(
                    numpy.asarray(prdp.diffuse_peaks[i].weights, dtype=bestfloat)))
         for i in range(n)], dtype=bestfloat)
    return aggregated_log_likelihoods

def compute_log_likelihoods(
    kbt_over_dx, newtons_per_unit, h, prbp, k0):
    peak_integral_params_list = PeakIntegralParams.from_basic_peaks(prbp.basic_peaks)
    nonempty_integral_values = compute_nonempty_integrals(
        kbt_over_dx, newtons_per_unit, h, prbp.positions, prbp.relevance,
        peak_integral_params_list)
    integral_values = add_zero_for_empty_integrals(
        nonempty_integral_values, prbp.basic_peaks)
    return _compute_log_likelihoods(
        kbt_over_dx, newtons_per_unit, h, prbp.basic_peaks, k0, integral_values)

def compute_log_likelihoods_diffuse(kbt_over_dx, newtons_per_unit, h, prdp, k0):
    peak_integral_params_list = PeakIntegralParams.from_diffuse_peaks(
        prdp.diffuse_peaks)
    nonempty_integral_values = compute_nonempty_integrals(
        kbt_over_dx, newtons_per_unit, h, prdp.positions, prdp.relevance,
        peak_integral_params_list)
    integral_values = add_zero_for_empty_integrals(
        nonempty_integral_values, prdp.diffuse_peaks)
    return _compute_log_likelihoods_diffuse(
        kbt_over_dx, newtons_per_unit, h, prdp, k0, integral_values)

def compute_dx_k0(
    t, newtons_per_unit, h, prpb,
    dx_lower_bound, dx_upper_bound, brute):
    dx_lower_bound, dx_upper_bound = map(bestfloat, (dx_lower_bound, dx_upper_bound))
    kbt = scipy.constants.k * t
    constant_factor = 1  # Can make it 1e9, but it doesn't seem to change anything.
    def func(dx):
        dx /= constant_factor
        kbt_over_dx = kbt / dx
        k0, lls = compute_biased_k0(kbt_over_dx, newtons_per_unit, h, prpb)
        result = -lls.sum()
        return result
    def finish(func_args, x0, args, full_output, disp):
        second_opt = scipy.optimize.brute(
            func_args,
            [(x0 - 0.01e-9 * constant_factor, x0 + 0.01e-9 * constant_factor)],
            Ns=101,
            full_output=True, finish=None)
        # 0 indicates success.
        return second_opt[:2] + (0, )
    if brute:
        # First step size is 0.01 nm
        xopt, fval, grid, _ = scipy.optimize.brute(
            func,
            [(dx_lower_bound * constant_factor, dx_upper_bound * constant_factor)],
            Ns=1 + int((dx_upper_bound - dx_lower_bound) * 1e11),
            full_output=True, finish=finish)
        xopt /= constant_factor
    else:
        res = scipy.optimize.minimize_scalar(
            func, bounds=(
                dx_lower_bound * constant_factor, dx_upper_bound * constant_factor),
            tol=0, method="bounded")
        common.check_True(res.success)
        xopt, fval = res.x / constant_factor, res.fun
    k0, lls = compute_biased_k0(kbt / xopt, newtons_per_unit, h, prpb)
    # brute
    numpy.testing.assert_allclose(-fval, lls.sum(dtype=bestfloat))
    return xopt, k0, lls

def compute_shared_dx_k0(
    t, newtons_per_unit, h, prbp_list, dx_lower_bound, dx_upper_bound, brute):
    dx_lower_bound, dx_upper_bound = map(bestfloat, (dx_lower_bound, dx_upper_bound))
    kbt = scipy.constants.k * t
    constant_factor = 1  # Can make it 1e9, but it doesn't seem to change anything.
    def func(dx):
        dx /= constant_factor
        kbt_over_dx = kbt / dx
        k0_lls_list = [
            compute_biased_k0(kbt_over_dx, newtons_per_unit, h, prbp)
            for prbp in prbp_list]
        result = -numpy.sum([lls.sum() for _, lls in k0_lls_list], dtype=bestfloat)
        return result
    def finish(func_args, x0, args, full_output, disp):
        second_opt = scipy.optimize.brute(
            func_args,
            [(x0 - 0.01e-9 * constant_factor, x0 + 0.01e-9 * constant_factor)],
            Ns=101,
            full_output=True, finish=None)
        # 0 indicates success.
        return second_opt[:2] + (0, )
    if brute:
        # First step size is 0.01 nm
        xopt, fval, grid, _ = scipy.optimize.brute(
            func,
            [(dx_lower_bound * constant_factor, dx_upper_bound * constant_factor)],
            Ns=1 + int((dx_upper_bound - dx_lower_bound) * 1e11),
            full_output=True, finish=finish)
        xopt /= constant_factor
    else:
        res = scipy.optimize.minimize_scalar(
            func, bounds=(
                dx_lower_bound * constant_factor, dx_upper_bound * constant_factor),
            tol=0, method="bounded")
        common.check_True(res.success)
        xopt, fval = res.x / constant_factor, res.fun
        kbt_over_dx = kbt / xopt
    k0_lls_list = [compute_biased_k0(
            kbt_over_dx, newtons_per_unit, h, prbp) for prbp in prbp_list]
    numpy.testing.assert_allclose(
        -fval, numpy.sum([lls.sum() for _, lls in k0_lls_list], dtype=bestfloat))
    return xopt, k0_lls_list

def compute_probabilities(log_pdfs):
    # Subtract the maximum from each log_pdf vector to help numerically.
    log_pdf_max = log_pdfs.max(axis=1)
    log_numerator = log_pdfs - numpy.tile(
        log_pdf_max, (log_pdfs.shape[1], 1)).transpose()
    numerator = numpy.exp(log_numerator)
    denominator = numpy.tile(
        numerator.sum(axis=1), (log_pdfs.shape[1], 1)).transpose()
    return numerator / denominator

### User-friendly API ###

def integrals(
    t, p, dx, upper_bound_units, newtons_per_unit, peak_integral_params_list):
    for pip in peak_integral_params_list:
        common.check_lt(pip.y_units[-1], upper_bound_units)
    t, p, dx, newtons_per_unit = map(bestfloat, (t, p, dx, newtons_per_unit))
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    se = peak_integral_params_to_start_end_tuples(
        peak_integral_params_list)
    positions, relevance = array_interval_covering(se)
    kbt_over_dx = scipy.constants.k * t / dx
    nonempty_integrals = compute_nonempty_integrals(
        kbt_over_dx, newtons_per_unit, h, positions, relevance,
        peak_integral_params_list)
    return nonempty_integrals

def estimate_biased_k0(t, p, dx, upper_bound_units, newtons_per_unit, basic_peaks):
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    peak_integral_params_list = PeakIntegralParams.from_basic_peaks(basic_peaks)
    se = peak_integral_params_to_start_end_tuples(
        peak_integral_params_list)
    positions, relevance = array_interval_covering(se)
    kbt_over_dx = scipy.constants.k * t / dx
    prbp = PositionsRelevanceBasicPeaks(positions, relevance, basic_peaks)
    return compute_biased_k0(kbt_over_dx, newtons_per_unit, h, prbp)

def unbias_k0(biased_k0, n):
    return (n - 1) * biased_k0 / n

def log_likelihoods(t, p, dx, k0, upper_bound_units, newtons_per_unit, basic_peaks):
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    peak_integral_params_list = PeakIntegralParams.from_basic_peaks(basic_peaks)
    se = peak_integral_params_to_start_end_tuples(
        peak_integral_params_list)
    positions, relevance = array_interval_covering(se)
    kbt_over_dx = scipy.constants.k * t / dx
    prbp = PositionsRelevanceBasicPeaks(positions, relevance, basic_peaks)
    return compute_log_likelihoods(kbt_over_dx, newtons_per_unit, h, prbp, k0)

def log_likelihoods_diffuse(
    t, p, dx, k0, upper_bound_units, newtons_per_unit, diffuse_peaks):
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    peak_integral_params_list = PeakIntegralParams.from_diffuse_peaks(diffuse_peaks)
    se = peak_integral_params_to_start_end_tuples(
        peak_integral_params_list)
    positions, relevance = array_interval_covering(se)
    prdp = PositionsRelevanceDiffusePeaks(positions, relevance, diffuse_peaks)
    kbt_over_dx = scipy.constants.k * t / dx
    return compute_log_likelihoods_diffuse(kbt_over_dx, newtons_per_unit, h, prdp, k0)

def estimate_dx_k0(
    t, p, upper_bound_units, newtons_per_unit, basic_peaks,
    dx_lower_bound, dx_upper_bound, brute=False):
    """
    For 3174 peaks, newtons_per_unit=1e-12,
    dx_lower_bound=0.1e-9, dx_upper_bound=1e-9, and
    - brute=False: 130 s
    - brute=True: 1635 s
    """
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    peak_integral_params_list = PeakIntegralParams.from_basic_peaks(basic_peaks)
    se = peak_integral_params_to_start_end_tuples(peak_integral_params_list)
    positions, relevance = array_interval_covering(se)
    prbp = PositionsRelevanceBasicPeaks(positions, relevance, basic_peaks)
    return compute_dx_k0(
        t, newtons_per_unit, h, prbp,
        dx_lower_bound, dx_upper_bound, brute)

def estimate_shared_dx_k0(
    t, p, upper_bound_units, newtons_per_unit, basic_peaks_list,
    dx_lower_bound, dx_upper_bound, brute=False):
    """
    For 3174 peaks, newtons_per_unit=1e-12,
    dx_lower_bound=0.1e-9, dx_upper_bound=1e-9, and
    - brute=False: 130 s
    - brute=True: 1635 s
    """
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    prbp_list = []
    for basic_peaks in basic_peaks_list:
        peak_integral_params_list = PeakIntegralParams.from_basic_peaks(basic_peaks)
        se = peak_integral_params_to_start_end_tuples(peak_integral_params_list)
        positions, relevance = array_interval_covering(se)
        prbp_list.append(
            PositionsRelevanceBasicPeaks(positions, relevance, basic_peaks))
    return compute_shared_dx_k0(
        t, newtons_per_unit, h, prbp_list, dx_lower_bound, dx_upper_bound, brute)

def estimate_k0_diffuse(
    t, p, dx, upper_bound_units, newtons_per_unit, diffuse_peaks,
    k0_lower_bound, k0_upper_bound):
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    peak_integral_params_list = PeakIntegralParams.from_diffuse_peaks(diffuse_peaks)
    se = peak_integral_params_to_start_end_tuples(peak_integral_params_list)
    positions, relevance = array_interval_covering(se)
    kbt_over_dx = scipy.constants.k * t / dx
    prdp = PositionsRelevanceDiffusePeaks(positions, relevance, diffuse_peaks)
    return _estimate_k0_diffuse(
        kbt_over_dx, newtons_per_unit, h, prdp, k0_lower_bound, k0_upper_bound)

def estimate_dx_k0_diffuse(
    t, p, upper_bound_units, newtons_per_unit, diffuse_peaks,
    dx_lower_bound, dx_upper_bound, k0_lower_bound, k0_upper_bound):
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    peak_integral_params_list = PeakIntegralParams.from_diffuse_peaks(diffuse_peaks)
    se = peak_integral_params_to_start_end_tuples(peak_integral_params_list)
    positions, relevance = array_interval_covering(se)
    prdp = PositionsRelevanceDiffusePeaks(positions, relevance, diffuse_peaks)
    return _estimate_dx_k0_diffuse(
        t, newtons_per_unit, h, prdp,
        dx_lower_bound, dx_upper_bound, k0_lower_bound, k0_upper_bound)

def estimate_shared_dx_k0_diffuse(
    t, p, upper_bound_units, newtons_per_unit, diffuse_peaks_list,
    dx_lower_bound, dx_upper_bound, k0_lower_bound, k0_upper_bound):
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    prdp_list = []
    for diffuse_peaks in diffuse_peaks_list:
        peak_integral_params_list = PeakIntegralParams.from_diffuse_peaks(
            diffuse_peaks)
        se = peak_integral_params_to_start_end_tuples(peak_integral_params_list)
        positions, relevance = array_interval_covering(se)
        prdp_list.append(
            PositionsRelevanceDiffusePeaks(positions, relevance, diffuse_peaks))
    return _estimate_shared_dx_k0_diffuse(
        t, newtons_per_unit, h, prdp_list,
        dx_lower_bound, dx_upper_bound, k0_lower_bound, k0_upper_bound)

def full_log_pdfs(
    t, p, upper_bound_units, newtons_per_unit, protocols, dx, k0):
    kbt_over_p = scipy.constants.k * t / p
    h = wlc_inverse_derived_times_v_over_l_minus_1_over_kl(
        kbt_over_p, upper_bound_units, newtons_per_unit)
    y_start_units_list = Protocol.y_start_units_list(protocols)
    kbt_over_dx = scipy.constants.k * t / dx
    fps = full_partial_sums(
        kbt_over_dx, newtons_per_unit, h, y_start_units_list)
    v_over_ls = numpy.array(Protocol.v_over_ls(protocols), dtype=bestfloat)
    v_over_ls = numpy.tile(v_over_ls, (upper_bound_units, 1)).transpose()
    kls = numpy.array(Protocol.kls(protocols), dtype=bestfloat)
    kls = numpy.tile(kls, (upper_bound_units, 1)).transpose()
    integral_values = kbt_over_dx / v_over_ls * (fps[:, :, 0] / kls + fps[:, :, 1])
    lls = _compute_full_log_pdfs(
        kbt_over_dx, newtons_per_unit, h, protocols, k0, integral_values)
    return lls

def full_log_pdfs_diffuse(
    t, p, upper_bound_units, newtons_per_unit, diffuse_protocols, dx, k0, factory):
    """
    Params:
    factory - DiffusePeak.normal_from_basic_peak or DiffusePeak.truncated_normal_from_basic_peak
    """
    protocols = [diffuse_protocol.protocol for diffuse_protocol in diffuse_protocols]
    log_pdfs = full_log_pdfs(
        t, p, upper_bound_units, newtons_per_unit, protocols, dx, k0)
    diffuse_peaks_list = [[
            factory(
                BasicPeak(diffuse_protocol.protocol, i),
                diffuse_protocol.sigma_units, diffuse_protocol.hsu,
                upper_bound_units
                )
            for i in range(diffuse_protocol.y_start_units(), upper_bound_units)]
                          for diffuse_protocol in diffuse_protocols]
    lls = numpy.empty((len(protocols), upper_bound_units), dtype=bestfloat)
    for i in range(len(protocols)):
        protocol = protocols[i]
        diffuse_peaks = diffuse_peaks_list[i]
        lls[i, :protocol.y_start_units] = -numpy.inf
        for j in range(upper_bound_units - protocol.y_start_units):
            diffuse_peak = diffuse_peaks[j]
            assert diffuse_peak.y_start_units() == protocol.y_start_units
            lw = numpy.log(numpy.asarray(diffuse_peak.weights, dtype=bestfloat))
            l = numpy.asarray(
                [log_pdfs[i, k] for k in diffuse_peak.y_end_units], dtype=bestfloat)
            lls[i, protocol.y_start_units + j] = common.logsumexp(lw + l)
    return lls

def full_probabilities(
    t, p, upper_bound_units, newtons_per_unit, protocols, dx, k0):
    log_pdfs = full_log_pdfs(
        t, p, upper_bound_units, newtons_per_unit, protocols, dx, k0)
    common.check_equal(log_pdfs.shape, (len(protocols), upper_bound_units))
    return compute_probabilities(log_pdfs)

def full_probabilities_diffuse(
    t, p, upper_bound_units, newtons_per_unit, diffuse_protocols, dx, k0, factory):
    log_pdfs = full_log_pdfs_diffuse(
        t, p, upper_bound_units, newtons_per_unit, diffuse_protocols, dx, k0,
        factory)
    common.check_equal(log_pdfs.shape, (len(diffuse_protocols), upper_bound_units))
    return compute_probabilities(log_pdfs)

    
if __name__ == "__main__":
    main(sys.argv[1:])
