import sys
import time
import numpy as np
from simple_tif import array_to_tif, tif_to_array
from generate_illumination_stripes import (
    generate_illumination, generate_separable_illumination,
    get_illumination_info, set_illumination_info,
    get_illumination_pattern, set_illumination_pattern,
    get_separable_illumination_pattern, set_separable_illumination_pattern)
from decon_3D import (
    density_to_sim_data, binned_downsampling, sim_data_to_visualization)
"""
Load a resolution target and an emission PSF which we'll use to
generate artificial test data. Use a big resolution target; we're
going to crop the resulting data down so the boundary conditions are
realistic:
"""
actual_object = tif_to_array('resolution_target.tif').astype(np.float64)
crop_to_estimate_xy = 0 #Object pixel units
crop_to_estimate_z = 0 #Object pixel units
crop_to_data_xy = 10 #Object pixel units
crop_to_data_z = 10 #Object pixel units
"""
Adjust the level of binning to mimic real data.
"""
zoom_factor_to_estimate_xy = 1 #Has to be odd, so the PSF stays odd-sized
zoom_factor_to_estimate_z = 1 #Has to be odd, so the PSF stays odd-sized
zoom_factor_to_data_xy = 2 * zoom_factor_to_estimate_xy
zoom_factor_to_data_z = 1 * zoom_factor_to_estimate_z

"""
Choose the brightness (noise level) and the representation of the
illumination. Not every illumination can be modeled as separable, but
if possible, this is usually computationally faster.
"""
intensity_scaling = 100.
separable_illumination = True
object_emission_psf = tif_to_array('resolution_target_emission_PSF.tif')
estimate_emission_psf = binned_downsampling( #Bin in XY, sample in Z
    object_emission_psf[
        zoom_factor_to_estimate_z//2::zoom_factor_to_estimate_z, :, :],
    bin_size=(1, zoom_factor_to_estimate_xy, zoom_factor_to_estimate_xy))
array_to_tif(estimate_emission_psf, 'emission_PSF.tif')
object_cropped_to_estimate = actual_object[
    crop_to_estimate_z:actual_object.shape[0] - crop_to_estimate_z,
    crop_to_estimate_xy:actual_object.shape[1] - crop_to_estimate_xy,
    crop_to_estimate_xy:actual_object.shape[2] - crop_to_estimate_xy]
array_to_tif(object_cropped_to_estimate.astype(np.float32),
             'resolution_target_cropped.tif')
object_cropped_and_binned_to_estimate = binned_downsampling(
    object_cropped_to_estimate,
    (zoom_factor_to_estimate_z,
     zoom_factor_to_estimate_xy,
     zoom_factor_to_estimate_xy))
array_to_tif(object_cropped_and_binned_to_estimate.astype(np.float32),
             'resolution_target_cropped_binned.tif')
##del object_cropped_to_estimate
##del object_cropped_and_binned_to_estimate

##if separable_illumination:
##    """
##    If we use separable illumination, the object has to have at least
##    as many slices as the PSF, since we'll be using fftconvolve
##    """
##    if actual_object.shape[0] < emission_psf.shape[0]:
##        """
##        The new size also has to be divisible by the zoom factor.
##        """
##        new_size = emission_psf.shape[0]
##        while new_size % zoom_factor != 0:
##            new_size += 1
##        temp = np.zeros((new_size,
##                         actual_object.shape[1],
##                         actual_object.shape[2]), dtype=np.float64)
##        start_slice = (new_size - actual_object.shape[0]) // 2
##        temp[start_slice:start_slice + actual_object.shape[0], :, :
##             ] = actual_object
##        actual_object = temp
"""
Generate illumination; this returns nothing, but leaves illumination
files on disk.
"""
if separable_illumination:
    generate_separable_illumination(
        density_shape=actual_object.shape,
        emission_psf_shape=object_emission_psf.shape,
        zoom_factor_xy=zoom_factor_to_data_xy,
        zoom_factor_z=zoom_factor_to_data_z,
        pad_amount_xy=crop_to_data_xy,
        pad_amount_z=crop_to_data_z)
else:
    generate_illumination(
        density_shape=actual_object.shape,
        zoom_factor_xy=zoom_factor_to_data_xy,
        zoom_factor_z=zoom_factor_to_data_z,
        pad_amount_xy=crop_to_data_xy,
        pad_amount_z=crop_to_data_z)
illumination_info = get_illumination_info()
"""
Generate noiseless data
"""
if sys.platform == 'win32':
    timer = time.clock
else:
    timer = time.time
print "Generating sim data from resolution target..."
for i in range(illumination_info['num_scan_positions']):
    print "Scan position", i
    start = timer()
    data = density_to_sim_data(
        density=actual_object,
        emission_psf=object_emission_psf,
        which_scan_position=i)
    end = timer()
    print "Elapsed time:", end - start
    if i == 0:
        noisy_sim_data = np.zeros(
            (illumination_info['num_scan_positions'],) + data.shape[-3:],
            dtype=np.float32)
    noisy_sim_data[i, :, :, :] = data
    """
    We started with a huge resolution target, and now we have tiny
    cropped/binned SIM data. If we try to decon this, it'll use a
    huuuuge estimate, just as big as the actual object, because the
    illumination info is for a huge object.

    We'd like to use a moderate-sized estimate, with a bigger field of
    view and finer pixels than the SIM data, but a smaller field of
    view and coarser pixels than the actual object.
    
    So, crop and bin the illumination patterns we just used, replacing
    the old illumination patterns.
    """
    if separable_illumination:
        illumination_xy = get_separable_illumination_pattern(
            which_pattern=i, direction='xy')
        cropped_illumination_xy = illumination_xy[
            :,
            crop_to_estimate_xy:illumination_xy.shape[1] - crop_to_estimate_xy,
            crop_to_estimate_xy:illumination_xy.shape[2] - crop_to_estimate_xy]
        binned_illumination_xy = binned_downsampling(
            cropped_illumination_xy, bin_size=(1,
                                               zoom_factor_to_estimate_xy,
                                               zoom_factor_to_estimate_xy))
        set_separable_illumination_pattern(
            binned_illumination_xy, which_pattern=i, direction='xy')
        illumination_z = get_separable_illumination_pattern(
            which_pattern=i, direction='z')
        binned_illumination_z = binned_downsampling(
            illumination_z, bin_size=(zoom_factor_to_estimate_z, 1, 1))
        set_separable_illumination_pattern(binned_illumination_z,
                                           which_pattern=i,
                                           direction='z')
    else:
        for d in range(illumination_info['num_depths']):
            illumination = get_illumination_pattern(
                which_depth=d, which_pattern=i)
            cropped_illumination = illumination[
                crop_to_estimate_z:illumination.shape[0] - crop_to_estimate_z,
                crop_to_estimate_xy:illumination.shape[1] - crop_to_estimate_xy,
                crop_to_estimate_xy:illumination.shape[2] - crop_to_estimate_xy]
            binned_illumination = binned_downsampling(
                cropped_illumination,
                (zoom_factor_to_estimate_z,
                 zoom_factor_to_estimate_xy,
                 zoom_factor_to_estimate_xy))
            set_illumination_pattern(binned_illumination,
                                     which_depth=d,
                                     which_pattern=i)
"""
Now that we've replaced all the old illumination patterns, correct and
reset illumination_info:
"""
illumination_info['density_shape'] = (
    ((actual_object.shape[0] - 2*crop_to_estimate_z) /
     zoom_factor_to_estimate_z),
    ((actual_object.shape[1] - 2*crop_to_estimate_xy) /
     zoom_factor_to_estimate_xy),
    ((actual_object.shape[2] - 2*crop_to_estimate_xy) /
     zoom_factor_to_estimate_xy))
illumination_info['zoom_factor_xy'] = (zoom_factor_to_data_xy /
                                       zoom_factor_to_estimate_xy)
illumination_info['zoom_factor_z'] = (zoom_factor_to_data_z /
                                      zoom_factor_to_estimate_z)
illumination_info['pad_amount_xy'] = ((crop_to_data_xy - crop_to_estimate_xy) /
                                      zoom_factor_to_estimate_xy)
illumination_info['pad_amount_z'] = ((crop_to_data_z - crop_to_estimate_z) /
                                      zoom_factor_to_estimate_z)
illumination_info['depth_list'] = range(
    illumination_info['pad_amount_z'],
    illumination_info['density_shape'][0] - illumination_info['pad_amount_z'],
    illumination_info['zoom_factor_z'])
set_illumination_info(illumination_info)

print "Done generating."
print "Saving visualization..."
sim_data_to_visualization(noisy_sim_data, outfile='noiseless_sim_data.tif')
print "Done saving"

##"""
##Useful for debugging the transpose operations:
##"""
##print "Computing transpose..."
##from decon_3D import sim_data_to_density
##tran = sim_data_to_density(sim_data=noisy_sim_data,
##                           emission_psf=estimate_emission_psf)
##print "Done computing."
##print "Saving transpose..."
##array_to_tif(tran.astype(np.float32), outfile='transpose.tif')
##print "Done saving."

##"""
##Useful for profiling the forward and transpose expected data
##operations
##"""
##import cProfile, pstats
##from decon_3D import sim_data_to_density
##print "Profiling expected data operation..."
##cProfile.run(
##    """density_to_sim_data(density=object_cropped_and_binned_to_estimate, emission_psf=estimate_emission_psf)
##    """,
##    'H_stats.log')
##print "Done profiling."
##print "Profiling transpose operation..."
##cProfile.run(
##    """sim_data_to_density(sim_data=noisy_sim_data, emission_psf=estimate_emission_psf)
##    """,
##    'H_transpose_stats.log')
##print "Done profiling"
##p = pstats.Stats('H_stats.log')
##p.strip_dirs().sort_stats('cumulative').print_stats()
##p = pstats.Stats('H_transpose_stats.log')
##p.strip_dirs().sort_stats('cumulative').print_stats()

print "Saving unprocessed image..."
array_to_tif(noisy_sim_data.sum(axis=0).astype(np.float32),
             outfile='noiseless_image.tif')
print "Done saving"
print
"""
Add noise
"""
print "Adding noise to sim data..."
np.random.seed(0) #Repeatably random, for now
for p in range(noisy_sim_data.shape[0]):
    noisy_sim_data[p, :, :, :] = np.random.poisson(
        lam=intensity_scaling * noisy_sim_data[p, :, :, :])
print "Done adding noise."
print "Saving visualization..."
sim_data_to_visualization(noisy_sim_data, outfile='noisy_sim_data.tif')
print "Done saving"
print "Saving unprocessed image..."
array_to_tif(noisy_sim_data.sum(axis=0).astype(np.float32),
             outfile='noisy_image.tif')
print "Done saving"
raw_input('Hit enter to exit...')
