import os
import sys
import time
import multiprocessing as mp
import numpy as np
from simple_tif import tif_to_array, array_to_tif
from scipy.ndimage import interpolation
from fftconvolve_customized import fftconvolve
from generate_illumination_stripes import (get_illumination_info,
                                           get_illumination_pattern,
                                           get_separable_illumination_pattern)

camera_zero_offset = 100 #Cameras add a constant offset to avoid 0 values
num_iterations = 100
num_processes = 15

if sys.platform == 'win32':
    get_time = time.clock
else:
    get_time = time.time
    
def density_to_sim_data(
    density,
    emission_psf,
    out=None,
    which_scan_position='all'):
    """
    Takes a 3D image input, and returns an expected 4D SIM dataset
    output (or a portion of one, if 'which_scan_position' is an
    integer).
    """
    illumination_info = get_illumination_info()
    if which_scan_position == 'all':
        scan_positions = range(illumination_info['num_scan_positions'])
    else:
        scan_positions = [which_scan_position]
    pad_xy = illumination_info['pad_amount_xy']
    pad_z = illumination_info['pad_amount_z']
    bin_xy = illumination_info['zoom_factor_xy']
    bin_z = illumination_info['zoom_factor_z']
    separable_illumination = illumination_info['separable']
    if out == None:
        sim_data = np.zeros(
            (len(scan_positions),
             (density.shape[0] - 2*pad_z) / bin_z,
             (density.shape[1] - 2*pad_xy) / bin_xy,
             (density.shape[2] - 2*pad_xy) / bin_xy),
            dtype=np.float64)
    else:
        sim_data = out
        assert sim_data.shape[0] == len(scan_positions)
        sim_data.fill(0)

    """
    To hold intermediate data:
    """
    glow = np.zeros_like(density)
    blurred_glow = np.zeros_like(glow)

    """
    Simulate the imaging process: multiply by the illumination, blur,
    sum in Z, and bin.
    """
    for i, p in enumerate(scan_positions):
        """
        If we're looping over all the scan positions, 'i' and 'p'
        should be equal. If we're just doing one of the scan
        positions, 'i' should be zero, and 'p' is the scan position we
        were asked to calculate.
        """
        if separable_illumination:
            """
            Less general, but should be much faster to compute.
            """
            illumination_xy = get_separable_illumination_pattern(
                p, direction='xy')
            illumination_z = get_separable_illumination_pattern(
                p, direction='z')
            for term in range(illumination_xy.shape[0]):
                ill_xy = illumination_xy[term, :, :].reshape(
                    1, density.shape[-2], density.shape[-1])
                ill_z = illumination_z[:, term].reshape(
                    emission_psf.shape[0], 1, 1)
                np.multiply(ill_xy, density, out=glow)
                blurred_glow = blur_3d(glow, emission_psf*ill_z)
                cropped_glow = blurred_glow[
                    pad_z:blurred_glow.shape[0] - pad_z,
                    pad_xy:blurred_glow.shape[1] - pad_xy,
                    pad_xy:blurred_glow.shape[2] - pad_xy]
                """
                We're actually only 'binning' in x/y; in z, we're sampling:
                """
                binned_glow = binned_downsampling(
                    cropped_glow[::bin_z, :, :],
                    bin_size=(1, bin_xy, bin_xy))
                sim_data[i, :, :, :] += binned_glow
        else:
            """
            Arbitrary illumination, slower to compute but more general.
            """
            for z in range(illumination_info['num_depths']):
                """
                'z' is the depth we're focused on, in data pixel units.

                Every time we change the illumination, re-calculate where
                the sample is glowing:
                """
                illumination = get_illumination_pattern(
                    which_depth=z, which_pattern=p)
                np.multiply(illumination, density, out=glow)
                for d in range(glow.shape[0]):
                    """Each slice of the glow blurs differently onto
                    the camera:"""
                    blur_2d(glow[d, : , :],
                            defocus=((pad_z + z*bin_z) - d),
                            emission_psf=emission_psf,
                            output=blurred_glow[d, :, :])
                blur_smushed_in_z = blurred_glow.sum(axis=0)
                cropped_glow = blur_smushed_in_z[
                    pad_xy:blur_smushed_in_z.shape[0] - pad_xy,
                    pad_xy:blur_smushed_in_z.shape[1] - pad_xy]
                sim_data[i, z, :, :] = binned_downsampling(
                    cropped_glow, bin_size=(bin_xy, bin_xy))
    sim_data_min = sim_data.min()
    if sim_data_min <= 0:
        sim_data_max = sim_data.max()
        if abs(sim_data_min) / sim_data_max > 1e-3:
            print "SIM data min:", sim_data_min
            print "SIM data max:", sim_data_max
            raise UserWarning("SIM data minimum is negative, and not small.")
        else:
            """
            We have a small negative part. Probably a numerical
            artifact. Clip it!
            """
            sim_data[sim_data <= 0] = 1e-9 * sim_data.max()
    sim_data += camera_zero_offset #Cameras add a constant offset to avoid 0
    return sim_data

def sim_data_to_density(
    sim_data,
    emission_psf,
    out=None,
    which_scan_position='all'):
    """
    The transpose of the density_to_sim_data operation we perform above.
    
    Takes a 4D SIM dataset.
    Returns a 3D density in zoomed units.
    """
    illumination_info = get_illumination_info()
    if which_scan_position == 'all':
        scan_positions = range(illumination_info['num_scan_positions'])
    else:
        scan_positions = [which_scan_position]
    pad_xy = illumination_info['pad_amount_xy']
    pad_z = illumination_info['pad_amount_z']
    bin_xy = illumination_info['zoom_factor_xy']
    bin_z = illumination_info['zoom_factor_z']
    separable_illumination = illumination_info['separable']
    """
    The dimensions of the 'sim_data' input should match the number of
    scan positions we were asked to calculate.
    """
    assert len(scan_positions) == sim_data.shape[0]
        
    if out == None:
        density = np.zeros((sim_data.shape[-3]*bin_z + 2*pad_z,
                            sim_data.shape[-2]*bin_xy + 2*pad_xy,
                            sim_data.shape[-1]*bin_xy + 2*pad_xy),
                            dtype=np.float64) 
    else:
        density = out
        if which_scan_position == 'all':
            """
            We're looping over all the scan positions, so start with a
            blank slate.
            """
            density.fill(0)
        else:
            """
            We're just calculating the contribution from one scan
            position, so we're adding to an existing 'density'.
            """
            pass
    """
    Simulate the transpose of the imaging process: unbin, uncrop,
    unsum in z, blur, and multiply by the illumination.
    """
    if separable_illumination:
        unsampled_sim_data = np.zeros((sim_data.shape[1] * bin_z,
                                       sim_data.shape[2] * bin_xy,
                                       sim_data.shape[3] * bin_xy),
                                      dtype=np.float64)
        padded_sim_data = np.zeros(density.shape, dtype=np.float64)
    else:
        padded_sim_data = np.zeros(density.shape[-2:], dtype=np.float64)
    for i, p in enumerate(scan_positions): #Where was the illumination in xy
        if separable_illumination:
            """
            Less general, but should be much faster to compute.

            First, unbin/unsample the data. Note that we binned in xy,
            but we sampled in z. The transpose of binning is not the
            same as the transpose of sampling!
            """
            unbinned_sim_data = interpolation.zoom(
                sim_data[i, :, :, :], zoom=(1, bin_xy, bin_xy), order=0)
            unsampled_sim_data[::bin_z, :, :] = unbinned_sim_data
            """
            Now uncrop the data:
            """
            padded_sim_data.fill(0)
            padded_sim_data[
                pad_z:density.shape[0] - pad_z,
                pad_xy:density.shape[1] - pad_xy,
                pad_xy:density.shape[2] - pad_xy] = unsampled_sim_data
            """
            Blurring is a lot easier with separable illumination!
            """
            illumination_xy = get_separable_illumination_pattern(
                p, direction='xy')
            illumination_z = get_separable_illumination_pattern(
                p, direction='z')
            for term in range(illumination_xy.shape[0]):
                ill_xy = illumination_xy[term, :, :].reshape(
                    1, density.shape[-2], density.shape[-1])
                ill_z = illumination_z[:, term].reshape(
                    emission_psf.shape[0], 1, 1)
                density += (ill_xy *
                            blur_3d(padded_sim_data,
                                    emission_psf*ill_z,
                                    transpose=True))
        else:
            """
            Arbitrary illumination, slower to compute but more general.
            """
            for z in range(sim_data.shape[-3]): #Where was the focus in z
                """
                Unbin:
                Replace each xy pixel with a cluster of pixels. Every
                pixel in a cluster has the same value. This is the
                transpose of the 'binning' operation, which you might call
                'unbinning'.
                """
                interpolated_sim_data = interpolation.zoom(
                    sim_data[i, z, :, :], bin_xy, order=0)
                """
                Uncrop:
                Pad the interpolated SIM data with zeros. This is the
                transpose of the 'cropping' operation, which you might
                call 'uncropping'.
                """
                padded_sim_data.fill(0)
                padded_sim_data[
                    pad_xy:density.shape[1] - pad_xy,
                    pad_xy:density.shape[2] - pad_xy] = interpolated_sim_data
                illumination = get_illumination_pattern(
                    which_depth=z, which_pattern=p)
                for d in range(density.shape[0]): #Where we're assigning blame
                    """
                    'Unsum' in z, blur, and finally multiply by the
                    illumination.
                    """
                    density[d, :, :] += (
                        illumination[d, :, :] *
                        blur_2d(padded_sim_data,
                                defocus=((pad_z + z*bin_z) - d),
                                emission_psf=emission_psf,
                                transpose=True))
    density_min = density.min()
    density_max = density.max()
    if density_min <= 0:
        density_max = density.max()
        if abs(density_min) / density_max > 1e-3:
            print "Density min:", sim_data_min
            print "Density max:", sim_data_max
            raise UserWarning("Density minimum is negative, and not small.")
        else:
            """
            We have a small negative part. Probably a numerical
            artifact. Clip it!
            """
            density[density <= 0] = 1e-9 * sim_data.max()
    return density

def blur_2d(slice_2d, defocus, emission_psf, output=None, transpose=False):
    if transpose:
        defocus = -defocus
    slice_defocus = defocus + emission_psf.shape[0]//2
    """
    If the PSF is too small, we'll just extend it in Z unchanged:
    """
    if slice_defocus < 0:
##        slice_defocus = 0
        if output is not None:
            output[:] = np.zeros_like(slice_2d)
        return np.zeros_like(slice_2d)
    elif slice_defocus >= emission_psf.shape[0]:
##        slice_defocus = emission_psf.shape[0] - 1
        if output is not None:
            output[:] = np.zeros_like(slice_2d)
        return np.zeros_like(slice_2d)
    # Defocus is in zoomed units, and so is emission psf
    psf_slice = emission_psf[slice_defocus, :, :]
    if transpose:
        psf_slice = psf_slice[::-1, ::-1]
    convolved_slice = fftconvolve(slice_2d, psf_slice, mode='same')
    if output is not None:
        output[:] = convolved_slice #a bit crufty here
    return convolved_slice

def blur_3d(data_3d, emission_psf, output=None, transpose=False):
    if transpose:
        psf = emission_psf[::-1, ::-1, ::-1]
    else:
        psf = emission_psf
    blurred_data = fftconvolve(data_3d, psf, mode='same')
    if output is not None:
        output[:] = blurred_data #a bit crufty here too
    return blurred_data #I know you want it

def binned_downsampling(x, bin_size):
    """
    http://stackoverflow.com/a/4624923
    """
    assert len(x.shape) == len(bin_size)
    for i in range(len(x.shape)):
        """Shape must be divisible by the bin size"""
        assert (x.shape[i] // bin_size[i]) * bin_size[i] == x.shape[i]
    new_shape = []
    for i in range(len(x.shape)):
        new_shape.append(x.shape[i] // bin_size[i])
        new_shape.append(bin_size[i])
    out = x.reshape(new_shape)
    for i in reversed(range(1, len(out.shape), 2)):
        out = out.mean(i) #I think this allocates memory...
    return out

def sim_data_to_visualization(sim_data, outfile=None):
    image_stack = np.zeros(
        ((sim_data.shape[-4]*sim_data.shape[-3]),
         sim_data.shape[-2],
         sim_data.shape[-1]),
        dtype=np.float64)
    s = -1
    for p in range(sim_data.shape[-4]):
        for z in range(sim_data.shape[-3]):
            s += 1
            image_stack[s, :, :] = sim_data[p, z, :, :]
    if outfile is not None:
        array_to_tif(image_stack.astype(np.float32), outfile)
    return image_stack

def correction_factor_from_one_scan_position((
    noisy_sim_data_portion,
    estimate,
    emission_psf,
    scan_position,
    expected_data_dir)):
    """
    Construct the expected data from the estimate
    """
    expected_data = density_to_sim_data(
        estimate,
        emission_psf=emission_psf,
        which_scan_position=scan_position)
    outfile = os.path.join(
        expected_data_dir, 'expected_data_%04i.tif'%scan_position)
    ##array_to_tif(expected_data[0, :, :, :].astype(np.float32), outfile)
    sim_data_to_visualization(expected_data, outfile=outfile)
    """
    Take the ratio between the measured data and the expected data.
    Store this ratio in 'expected_data'
    """
    expected_data += 1e-6 #Don't want to divide by 0!
    np.divide(noisy_sim_data_portion,
              expected_data[0, :, :, :],
              out=expected_data[0, :, :, :])
    """
    Apply the transpose of the expected data operation to the
    correction factor
    """
    correction_factor = sim_data_to_density(
        expected_data,
        emission_psf=emission_psf,
        which_scan_position=scan_position)
    return correction_factor

if __name__ == '__main__':
    """
    'emission_PSF.tif' is sampled in estimate pixels, centered, and
    with an ODD number of pixels in each dimension.
    """
    emission_psf = tif_to_array('emission_PSF.tif').astype(np.float64)
    """
    Normalize the emission PSF
    """
    middle_slice_brightness = emission_psf[emission_psf.shape[0]//2, :, :].sum()
    emission_psf = emission_psf * 1.0 / middle_slice_brightness
    illumination_info = get_illumination_info()
    print "Loading 'noisy_sim_data.tif'..."
    noisy_sim_data = tif_to_array('noisy_sim_data.tif')
    noisy_sim_data = noisy_sim_data.reshape(
        illumination_info['num_scan_positions'],
        illumination_info['num_depths'],
        noisy_sim_data.shape[-2],
        noisy_sim_data.shape[-1])
##    if illumination_info['separable']:
##        raise UserWarning("Separable illumination is not implemented.")
##        if actual_object_shape[0] < emission_psf.shape[0]:
##            raise UserWarning(
##                """If we use separable illumination, the object
##                has to have at least as many slices as the PSF,
##                since we'll be using fftconvolve""")
    """
    Time for deconvolution!!!
    """
    estimate = np.ones(illumination_info['density_shape'], dtype=np.float64)
    expected_data = np.zeros((1,) + noisy_sim_data.shape[1:], dtype=np.float64)
    expected_data_dir = os.path.join(os.getcwd(), 'expected_data')
    if not os.path.exists(expected_data_dir):
        os.mkdir(expected_data_dir)
    correction_factor = np.zeros_like(estimate)
    print "Computing normalization factor..."
    correction_factor_normalization = 1e-5 + sim_data_to_density(
        np.ones_like(noisy_sim_data), emission_psf=emission_psf)
    array_to_tif(correction_factor_normalization.astype(np.float32),
                 'correction_factor_normalization.tif')
    print "Done computing."
    history = np.zeros(
        ((1+num_iterations,) + estimate.shape[-2:]), dtype=np.float64)
    history[0, :, :] = estimate.max(axis=0)
    history_yz = np.zeros(
        ((1+num_iterations,) + (estimate.shape[-2], estimate.shape[-3])),
        dtype=np.float64)
    history_yz[0, :, :] = np.transpose(estimate.max(axis=2))
    pool = mp.Pool(processes=num_processes)
    scan_positions = range(illumination_info['num_scan_positions'])
    for i in range(num_iterations):
        print
        print "Iteration", i
        start = get_time()
        process_me = [(noisy_sim_data[s, :, :, :],
                       estimate,
                       emission_psf,
                       s,
                       expected_data_dir)
                      for s in scan_positions]
        correction_factor.fill(0)
        for n in range(0, len(process_me), num_processes):
            chunk = process_me[n:n+num_processes]
            print " Processing chunk with length", len(chunk)
            result = pool.map(correction_factor_from_one_scan_position, chunk)
            for r in result:
                correction_factor += r
        
##        for scan_position in range(num_scan_positions):
##            print " Scan position", scan_position
##            """
##            Construct the expected data from the estimate
##            """
##            print " Constructing estimated data..."
##            density_to_sim_data(estimate, illumination, out=expected_data,
##                                separable_illumination=separable_illumination,
##                                which_scan_position=scan_position)
##            outfile = os.path.join(expected_data_dir,
##                                   'expected_data_%04i.tif'%scan_position)
##            array_to_tif(expected_data[0, :, :, :].astype(np.float32), outfile)
##            ##sim_data_to_visualization(expected_data, outfile=outfile)
##            print "  Done constructing."
##            """
##            Take the ratio between the measured data and the expected data.
##            Store this ratio in 'expected_data'
##            """
##            expected_data += 1e-6 #Don't want to divide by 0!
##            np.divide(noisy_sim_data[scan_position, :, :, :],
##                      expected_data[0, :, :, :],
##                      out=expected_data[0, :, :, :])
##            """
##            Apply the transpose of the expected data operation to the
##            correction factor
##            """
##            print " Applying transpose..."
##            sim_data_to_density(expected_data,
##                                illumination,
##                                out=correction_factor,
##                                separable_illumination=separable_illumination,
##                                which_scan_position=scan_position)
##            print "  Done applying."
            
        end = get_time()
        print "Elapsed time:", end - start
        array_to_tif(correction_factor.astype(np.float32),
                     'correction_factor.tif')
        """
        Multiply the old estimate by the normalized correction factor
        to get the new estimate.
        """
        np.divide(correction_factor,
                  correction_factor_normalization,
                  out=correction_factor)
        array_to_tif(correction_factor.astype(np.float32),
                     'correction_factor_normalized.tif')        
        np.multiply(estimate, correction_factor, out=estimate)
        correction_factor.fill(0)
        """
        Update the history
        """
        print "Saving..."
        history[i+1, :, :] = estimate.max(axis=0)
        array_to_tif(history.astype(np.float32), outfile='history.tif')
        history_yz[i+1, :, :] = np.transpose(estimate.max(axis=2))
        array_to_tif(history_yz.astype(np.float32), outfile='history_yz.tif')
        array_to_tif(estimate.astype(np.float32),
                     outfile='estimate_%03i.tif'%(i))
        print " Done saving."
    print "Done deconvolving"
    raw_input("Hit enter to continue...")
