# -*- coding: utf-8 -*-
"""\
Main ptycho module. Might be broken in pieces soon as the file is getting long.

Author: Pierre Thibault
Started July 5th 2010
"""

__all__ = ['useMPI', 'FType', 'CType', 'default_type', 'prepare_params', 
           'print_summary', 'ptycho_DM', 'ptycho_ML', 'ptycho_ePIE', 'save_run', 'load_run',
           'load_intens', 'load_fmag', 'verbose','Plotter', 'load_data',
           'raster_scan_positions', 'round_scan_positions', 'subpix_linear',
           'round_scan_ROI_positions', 'default_parameters', 'subpix_fourier',
           'gaussian_filter',
           'prepare_data', 'ortho', 'hooks']

DEBUG = False
USE_FFTPACK = True

import numpy as np
from numpy import fft as FFT
from pyE17 import utils as U
from pyE17 import prop
from pyE17 import io
from pyE17 import wave

from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt

from numpy import linalg as LA

io.h5options['UNSUPPORTED'] = 'ignore' # otherwise saving e.g. a function will fail. 
from pyE17 import verbose
if USE_FFTPACK:
    import scipy
    from scipy import fftpack as FFT
    if [int(x) for x in scipy.__version__.split('.')] < [0,8]:
        print('Warning: scipy.fftpack version (< 0.8) does not support single-precision ffts.')
import time
import sys
import os
import os.path
import re
from data_tools import prepare_data
from itertools import izip
from random import shuffle
from ptyplot import Plotter 

if DEBUG:
    Debug = U.verbosemod.Verbose()
    Debug.mpi_action = True
    debug = lambda msg: Debug(1,msg)
else:
    debug = lambda msg: None

# A timing wrapper class.

def _load_default_parameters():
    """\
    Load default reconstruction parameters, looking for the file 'ptycho.default', starting
    from the current working directory, down to the root of the filesystem. Hard-coded
    parameters (below) are used if no file is found. 
    """
    ptycho_default_filename = 'ptycho.default'
    d = os.path.abspath(os.path.curdir)
    p = None
    while True:
        param_filename = os.path.join(d,ptycho_default_filename)
        try:
            p = U.Param(param_filename)
            verbose(2, 'Loaded default parameter file "%s".' % param_filename)
            break
        except:
            d,t = os.path.split(d)
            if not t:
                break 
    if p is None:
        p = U.Param()
        p.update(dict(
            clip_object = False,
            clip_min = .5,
            clip_max = 1.,
            dump_data = False,
            dump_interval = 10,
            dump_pattern = './%(run_name)s_dump.hd5',
            plot_fig = None,
            plot_interval = 10,
            average_start = 50,
            average_interval = 5,
            probe_change_start = 3,
            pbound = None,
            quad_interval = 5,
            scale_precond = True,
            data_type = 'double',
            #initial_probe = None,
            initial_object_type = 'ones',
            save = True,
            save_dir_patt = './',
            run_label = '0',
            last_plot = False,
            ctr = None,
            flip_data = True,
            use_periodic_boundary = False,
            average_probes = False,
            average_probe_amp = 1e-2,
            dump_plot = False,
            dump_plot_patt = 'dump_plot_%04d.png',
            empty_probe_dp = None,
            flat_object = False,
            flat_scan = None,
            flat_object_weight = 1.,
            dp_shift = False,
            probe_support = False,
            probe_support_type = 'disc',
            probe_support_area = .5,
            probe_support_start = 0,
            probe_before_object = False,
            reg_del2 = False,
            reg_del2_amplitude = 1e3,
            reg_TV = False,
            reg_TV_amplitude = 1e3,
            reg_Huber = False,
            reg_Huber_amplitude = 1e3,
            reg_Huber_parameter = 1e-6,
            multiple_propagators = False,
            multi_prop_par_list = [1],
            Ndp = 1,
            Nmodes_probe = 1,
            Nmodes_object = 1,
            remove_incoherent_scattering = False,
            incoherent_scattering_mask_count = 50,
            proportional_probes = False,
            nearfield = False,
            ePIE_regul_probe = .5,
            ePIE_regul_object = .5,
            ePIE_switch_probe = 0.,
            ePIE_switch_object = 0.,
            ePIE_alpha = 1.,
            ePIE_beta = 1., 
            intens_renorm = None,
            object_smooth_gradient = 0.,
            scale_probe_object = 1.,
            dump_object = False,
            dump_object_pattern = None,
            dump_object_interval = 1,
            dump_probe = False,
            dump_probe_pattern = None,
            dump_probe_interval = 1,
            ML_type = 'Gauss',
            float_intens = False,
            fourier_relax_factor = .5,
            scan_roi = None,
            probe_change_start_ML = 0,
            plot_mask = None,
            MPI_timing = False,
            LL_in_DM = False,
            LL_in_ePIE = False,
            subpix = False,
            subpix_method = 'fourier',
            subpix_start = 0,
            DM_smooth_amplitude = None,
            DM_smooth_std = 5.,
	    plot_template='default',
            highq_exposure = None,
            probe_weight_in_DM = None,
            object_weight_in_DM = None,
            mode_spectrum = None,
            exitloopfile = None
            ))

    p.update()
    return p

# Parallel computation
global comm
try:
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    psize = comm.Get_size()
    prank = comm.Get_rank()
except:
    verbose(2, 'MPI initialization failed. Proceeding with one processor')
    MPI = None
    psize = 1
    prank = 0
    comm = None

global parallel
parallel = not (psize==1)

def useMPI(do=None):
    """\
    Toggle using MPI or not.
    
    """
    global parallel
    if do is None:
        return parallel
    if MPI is None:
        parallel = False
    else:
        parallel = do

default_parameters = _load_default_parameters()

# Default single/double precision
global FType, CType
FType = None
CType = None
def default_type(datatype):
    """\
    default_type('single')
    or
    default_type('double')
    
    Select the floating point precision for reconstructions.
    """
    if datatype not in ['single', 'double']:
        raise RuntimeError("Expected 'single' or 'double'.")
    global FType, CType
    FType = np.dtype('f' + str(np.dtype(np.typeDict[datatype]).itemsize)).type
    CType = np.dtype('c' + str(2*np.dtype(np.typeDict[datatype]).itemsize)).type
    verbose(3, 'Default data type is %s' % datatype)

default_type(default_parameters.data_type)

import scipy.ndimage
def gaussian_filter(x, sigma):
    #return FFT.ifftn(scipy.ndimage.fourier_gaussian(FFT.fftn(x), sigma).astype(CType))
    return scipy.ndimage.gaussian_filter(x.real,sigma) + 1j*scipy.ndimage.gaussian_filter(x.imag,sigma)


def raster_scan_positions(nx,ny,sx,sy):
    iix, iiy = np.indices((nx+1,ny+1))
    positions = [(sx*i, sy*j) for i,j in zip(iix.ravel(), iiy.ravel())]
    return positions

def round_scan_positions(r_in, r_out, nr, nth):
    """\
    Round scan positions, defined as in spec and matlab.
    """
    dr = (r_out - r_in)/ nr
    positions = []
    for ir in range(1,nr+2):
        rr = r_in + ir*dr
        dth = 2*np.pi / (nth*ir)
        positions.extend([(rr*np.sin(ith*dth), rr*np.cos(ith*dth)) for ith in range(nth*ir)])
    return positions

def round_scan_ROI_positions(dr, lx, ly, nth):
    """\
    Round scan positions with ROI, defined as in spec and matlab.
    """
    rmax = np.sqrt( (lx/2)**2 + (ly/2)**2 )
    nr = np.floor(rmax/dr) + 1
    positions = []
    for ir in range(1,int(nr+2)):
        rr = ir*dr
        dth = 2*np.pi / (nth*ir)
        th = 2*np.pi*np.arange(nth*ir)/(nth*ir)
        x1 = rr*np.sin(th)
        x2 = rr*np.cos(th)
        positions.extend([(xx1,xx2) for xx1,xx2 in zip(x1,x2) if (np.abs(xx1) <= ly/2) and (np.abs(xx2) <= lx/2)])
    return positions

# Shifting functions for subpixel precision in probe positions
def subpix_fourier(a,ramp,forward=True):
    """\
    Perform a shift with the provided ramp.
    """
    if forward:
        return np.fft.ifftn(np.fft.fftn(a)*ramp)
    else:
        return np.fft.ifftn(np.fft.fftn(a)*ramp.conj())

def subpix_linear(a, pts, forward=True):
    """\
    Perform a subpixel shift using bilinear interpolation.
    """
    x0,x1 = pts
    if not forward: x0,x1 = -x0,-x1
    a00 = U.pshift(a,[np.floor(x0),np.floor(x1)],'nearest')
    a01 = U.pshift(a,[np.floor(x0),np.ceil(x1)],'nearest')
    a10 = U.pshift(a,[np.ceil(x0),np.floor(x1)],'nearest')
    a11 = U.pshift(a,[np.ceil(x0),np.ceil(x1)],'nearest')
    x0 = x0 % 1
    x1 = x1 % 1
    return a00*(1-x0)*(1-x1) + a01*(1-x0)*x1 + a10*x0*(1-x1) + a11*x0*x1

# List of hooks that can be called at various points in the reconstructions
hooks = dict(
pre_DM = [],
loop_DM = [],
obj_inner_loop_DM = [],
post_DM = [],
pre_ML = [],
loop_ML = [],
post_ML = [],
pre_ePIE = [],
loop_ePIE = [],
post_ePIE = []
)

   
def prepare_params(pdict, **kwargs):
    """\
    Preparation routine for ptychographic reconstructions of any flavor.
    
    The routine draws the initial parameters from three possible sources, in
    this order:
        1) the key=value pairs in the argument
        2) pdict, the provided dictionnary
        3) default_parameters, a dictionnary containing some (but not all) default
           parameters.
    Two intended uses of this routine are:
        a) For script initialization. For convenience, or readability, important
           parameters can be defined in a script, at the end of which a call to
           prepare_params(globals()) will take care of extracting all necessary parameters
           and derive the missing parts.
        b) To update an already existing parameter dictionnary.
    """

    # Npos: number of positions in a scan
    # Nscan: number of scans
    # Npts : total number of points to iterate over
    # Npts_scan : number of points to iterate over in a single scan
    # Npos_flat : number of positions in a flat scan
    # Npts_flat : number of points to iterate over in the flat scan
    # Nmodes_probe : number of probe modes
    # Nmodes_object : number of object modes
    # Nmodes : total number of extra modes
    # Ndp : number of diffraction patterns per scan point.
    # Ndata : total number of diffraction patterns
    # Ndata_scan : number of diffraction patterns in a scan
    # Ndata_flat : number of diffraction patterns in the flat scan
    # 
    # Nmodes = Nmodes_probe * Nmodes_object
    # Npts_scan = Ndp * Nmodes * Npos
    # Npts_flat = Ndp * Nmodes * Npos_flat
    # Ndata = Ndp * Npos * Nscan
    # Ndata_scan = Ndp * Npos
    # Ndata_flat = Ndp * Npos_flat
    # Npts = Nscan * Npts_scan + Npts_flat
    #      = Ndp * Nmodes * (Nscan*Npos + Npos_flat)

    global comm # Why is this needed???

    p = U.Param(kwargs, pdict, default_parameters)

    verbose.set_level(p.verbose_level)

    verbose(3, 'Entering "prepare_params"...')

    # Set data_type
    default_type(p.data_type)
    verbose(3, '%-25s%15s' % ('Data type:', p.data_type))

    # Prefix, suffix, run_name
    p.asize = np.asarray(p.asize)
    asize = p.asize
    suffix = '_%03dx%03d_%s' % (tuple(asize) + (p.run_label,))
    scans = p.scans
    Nscan = len(scans)
    verbose(3, '%-25s%15d' % ('Number of scans:', Nscan))

    prefix = scans[0]
    if Nscan > 1:
        prefix += '_' + scans[-1]
    run_name = prefix  + suffix
    verbose(3, '%-25s%25s' % ('Run name:', run_name))

    p.suffix = suffix
    p.Nscan = Nscan
    p.prefix = prefix
    p.run_name = run_name
    if p.save:
        savedir = p.save_dir_patt % {'write_dir':prefix}
        if parallel and prank==0:
            try:
                os.mkdir(savedir)
            except OSError:
                pass
        p.save_file = savedir + p.run_name + '_%s.h5'
        verbose(3, '%-25s%25s' % ('File will be saved to:', p.save_file))
    else:
        verbose(2, 'No file will be saved at the end of this run.')

    if p.dp_shift is not None and p.Nscan == 1:
        verbose(2, 'Only one scan is being used in this reconstruction. dp_shift will be reset to False')
        p.dp_shift = False

    # Wavelength
    lam = 1.2398e-9/p.energy
    p.z
    # Near/far field
    nearfield = p.nearfield
    if nearfield:
        dx_spec = p.ds    # resolution in the specimen plane = pixel size.
	if np.isscalar(dx_spec): dx_spec = np.array([dx_spec, dx_spec])
        verbose(3, '%-25s%f x %f (nanometers)' % ('Resolution:', 1e9*dx_spec[0], 1e9*dx_spec[1]))
    else:
        dx_spec = lam*p.z/(asize*p.ds)  # resolution in the specimen plane
        verbose(3, '%-25s%f x %f (nanometers)' % ('Resolution:', 1e9*dx_spec[0], 1e9*dx_spec[1]))

    p.lam = lam
    p.dx_spec = dx_spec
    verbose(3, '%-25s%f x %f (meters)' % ('Resolution:', p.dx_spec[0], p.dx_spec[1]))

    # Scan pattern
    scan_pattern = p.scan_pattern
    if scan_pattern is None:
        pr_indices = p.scan_pattern_probe_indices
        ob_indices = p.scan_pattern_object_indices
    else:
        pr_interval = scan_pattern[0] or 2**15
        ob_interval = scan_pattern[1] or 2**15
        pr_indices = np.arange(Nscan) // pr_interval
        ob_indices = np.arange(Nscan) // ob_interval

    # Prepare scan positions
    scan_type = p.scan_type
    verbose(3, '%-25s%25s' % ('Scan type:', str(p.scan_type)))
    if scan_type == 'raster':
        #raise RuntimeError('Raster needs to be implemented')
        scanpos = np.array(raster_scan_positions(p.nx, p.ny, p.step_size_x, p.step_size_y))
    elif scan_type == 'round':
        scanpos = np.array(round_scan_positions(p.radius_in, p.radius_out, p.nr, p.nth))
    elif scan_type == 'round_roi':
        scanpos = np.array(round_scan_ROI_positions(p.dr, p.lx, p.ly, p.nth))
    elif scan_type == 'custom':
        scanpos = np.asarray(p.positions)
    elif scan_type is None:
        try:
            motorx = p.motorx
            motory = p.motory
            motorx_mult = p.motorx_mult
            motory_mult = p.motory_mult
        except:
            motorname_format = False
    else:
        raise RuntimeError('Unknown scan type : %s' % str(scan_type))

    datafile_list,positions,pos_subpix_portion = [],[],[]
    scan_info = {}

    for i,scan in enumerate(p.scans):
        pathdir = p.pathdir_patt % {'scan':scan}
        datafile_name = p.datafile_patt % {'path':pathdir,'scan':scan, 'a0':asize[0], 'a1':asize[1]}
        verbose(3, 'Will read data file "%s"' % datafile_name) 
        datafile_list.append(datafile_name)
        pri = pr_indices[i]
        obi = ob_indices[i]
        if scan_type is None:
            # Get positions from file
            try:
                # New file format
                pos = io.h5read(datafile_name, 'scan_info.positions')['positions']
            except:
                # transitory file format June - July 2012 
                counters = io.h5read(datafile_name, 'counters')['counters']
                pos = np.array(zip(np.array(counters[motory])*motory_mult, np.array(counters[motorx])*motorx_mult))
        else:
            pos = scanpos.copy()
        Npos = len(pos)
        pos_mindist = dx_spec
        if Npos > 1:
            pos_mindist = min([U.norm(ppos - pos[0]) for ppos in pos[1:]]) 
        pos_float = (pos - pos.min(axis=0))/dx_spec
        pos = np.round(pos_float)
        pos_dev = pos_float - pos           # Sub-pixel correction
        for j in range(Npos):
            positions.append(tuple(pos[j]) + (pri, obi) + tuple(pos_dev[j]))
            pos_subpix_portion.append(tuple(pos_float[j]))
        scan_info[str(scan)] = [datafile_name, len(positions) - Npos, len(positions)]
    p.datafile_list = datafile_list
    p.scan_info = scan_info
    p.pos_subpix_portion = pos_subpix_portion
    
    Npos_total = len(positions)

    # Here apply roi constraint and keep track of the position indices
    # pos_in_roi = ... bool
    # data_pos_index = ...
    # Npos_no_roi = len(pos)
    # pos = pos[pos_in_roi]
    Nmodes_probe = p.Nmodes_probe
    
    
    
    if p.multiple_propagators:
        verbose(3,'Multiple propagators requested')
        Nmodes_probe *=len(p.multi_prop_par_list)
    
    p.Nmodes_probe=Nmodes_probe
    Nmodes_object = p.Nmodes_object
    Nmodes = Nmodes_probe * Nmodes_object
    p.Nmodes = Nmodes
    Ndp = p.Ndp
    #if Nmodes != 1:
    #    raise RuntimeError('Coherent modes not yet implemented!')
    if Ndp != 1:
        raise RuntimeError('Multiple diffration patterns per point (multiple exposures) not yet implemented!')

    #Npts_scan = Nmodes * Ndp * Npos
    #Npts = Nscan * Npts_scan
    #Ndata_scan = Ndp * Npos
    #Ndata = Nscan * Ndata_scan
    #p.Npts_scan = Npts_scan
    #p.Npts = Npts
    #p.Npos = Npos
    Npts = Nmodes * Ndp * Npos_total
    Npts_scan = Npts / float(Nscan)
    Ndata = Ndp * Npos_total
    Ndata_scan = int(Ndata / float(Nscan))
    p.Npts_scan = Npts_scan
    p.Npts = Npts
    p.Npos = Npos
    p.Npos_total = Npos_total
    p.Ndata = Ndata
    p.Ndata_scan = Ndata_scan
    
    verbose(3, '%-25s%d' % ('Total number of points: ', p.Npos_total))

    if not nearfield:
        if p.ctr is None:
            p.ctr = p.asize//2
        else:
            p.ctr = np.asarray(p.ctr)
        verbose(3, '%-25s%25s' % ('Diffraction pattern center: ', str(tuple(p.ctr))))

    subpix = p.subpix
    if subpix:
        verbose(2, 'Using subpixel precision (after %dth iteration) for probe positions (interpolation method "%s").' % (p.subpix_start, p.subpix_method))

    if len(p.datafile_list) == 1:
        verbose(3, '%-25s%s' % ('Data file:', p.datafile_list[0]))
    else:
        verbose(3, '%s\n%s\n...\n%s' % ('Data files:', p.datafile_list[0], p.datafile_list[-1]))

    p.num_probes = np.max(pr_indices)+1
    p.num_objects = np.max(ob_indices)+1

    verbose(3, '%-25s%d' % ('Number of independent probes:', p.num_probes))
    verbose(3, '%-25s%d' % ('Number of independent objects:', p.num_objects))

    if p.flat_object:
        verbose(2, 'Flat object will be enforced')
        if p.num_probes > 1:
            verbose(1, 'Warning: more than one probe will be reconstructed, but flat_object couples with #0 only')

        # Add the relevant scan file
        scan = p.flat_scan
        pathdir = p.pathdir_patt % {'scan':scan}
        flat_scan_file = p.datafile_patt % {'path':pathdir,'scan':scan, 'a0':asize[0], 'a1':asize[1]}
        verbose(3, 'Will load file %s' % flat_scan_file)

        # Figure out how many diffraction patterns there are in the flat scan
        if (not parallel) or prank == 0:
            # Load file
            root,ext = os.path.splitext(flat_scan_file)
            is_matlab = (ext == '.mat')
            d = _load_data(flat_scan_file)
            if is_matlab:
                data = _matlab_transpose(d['data'])
            else:
                data = d['data']
            del d
            Npos_flat = data.shape[0]
            verbose(3, 'Flat scan file %s loaded successfully.' % flat_scan_file)
            verbose(3, '%d diffraction patterns in the flat scan file.' % Npos_flat)
            del data
        else:
            Npos_flat = 0


        if parallel:
            Npos_flat = comm.bcast(np.array(Npos_flat), root=0).item()

        Ndata_flat = Ndp * Npos_flat   

        p.datafile_list.append(flat_scan_file)

        # One more object layer
        p.num_objects += 1
        obi = p.num_objects - 1

        # Add fake positions
        # Default for now: a raster scan
        linear_extent = int(np.ceil(np.sqrt(Npos_flat)))
        flat_step = .6181 * pos_mindist
        flat_pos = np.array(raster_scan_positions(linear_extent, linear_extent, flat_step, flat_step))
        flat_pos = flat_pos[:Npos_flat]
        flat_pos_float = (flat_pos - flat_pos.min(axis=0))/dx_spec
        flat_pos = np.round(flat_pos_float)
        flat_pos_dev = flat_pos_float - flat_pos
        verbose(3, 'Flat scan : using a %d x %d grid with step size of %6.2e meters' % (linear_extent, linear_extent, flat_step))

        for j in range(Npos_flat):
            positions.append(tuple(flat_pos[j]) + (0,obi) + tuple(flat_pos_dev[j]))

        # Update total number of points
        Npts += Ndp * Nmodes * Npos_flat
        p.Npts = Npts
        p.Npos_flat = Npos_flat
        p.Ndata_flat = Ndata_flat
        p.Ndata += p.Ndata_flat

        # Switch updating of probe and object in the inner loop
        p.probe_before_object = True
        verbose(3, 'Flat object weight is set to %f' % p.flat_object_weight)

    p.use_empty_probe = False
    if p.empty_probe_dp not in (None, False):
        verbose(2, 'Using empty probe diffraction pattern.')
        if p.num_probes > 1:
            verbose(1, 'Warning: empty probe diffraction pattern will be enforced only on probe index 0.')

        # We give this job to the last process
        if prank == psize-1:
            if str(p.empty_probe_dp) == p.empty_probe_dp:
                # We are given a data file name
                verbose(2, 'Loading empty probe data file.')
                d = _load_data(p.empty_probe_dp)
                empty_probe_data = d['data']
                if empty_probe_data.ndim == 3:
                    empty_probe_data = empty_probe_data.mean(axis=0)
                try:
                    empty_probe_mask = d['mask']
                except KeyError:
                    verbose(2, 'No mask found in empty probe data file.')
                    empty_probe_mask = np.ones_like(empty_probe_data, dtype=bool)
            else:
                empty_probe_data = p.empty_probe_dp
                empty_probe_mask = np.ones_like(empty_probe_data, dtype=bool)

            p.use_empty_probe = True
            p.empty_probe_data = empty_probe_data
            p.empty_probe_mask = empty_probe_mask
            p.empty_probe_strength 

    positions = np.array(positions)
    p.positions = positions

    # Intensity renormalization 
    intens_renorm = p.intens_renorm
    if intens_renorm is not None:
        # Transform scalar or list input into a datafile-dependent dictionary
        verbose(3, 'Using intensity renormalization.')
        if not isinstance(intens_renorm, dict):
            if np.isscalar(intens_renorm):
                intens_renorm = [intens_renorm]*len(p.datafile_list)
            p.intens_renorm = dict(zip(p.datafile_list,intens_renorm))     

    # Prepare scan positions and MPI data sharing
    _prepare_datainfo(p)
 
    osize = tuple(asize + positions[:,0:2].max(axis=0))
    p.object_size = (p.num_objects, Nmodes_object) + osize
    p.probe_size = (p.num_probes, Nmodes_probe) + tuple(asize)
    
    if p.probe_support is not False:
        if p.probe_support_type == 'disc':
            verbose(2, 'Using a disc support for the probe.')
            supp_radius2 = np.ceil(np.prod(asize)*p.probe_support_area/np.pi)
            p.probe_support = (np.fft.fftshift(U.fvec2(asize)) < supp_radius2).astype('float')
        elif p.probe_support_type is None:
            verbose(2, 'Using the provided probe support.')
        else:
            raise RuntimeError('Unsupported probe support type : %s' % str(p.probe_support_type))

    # Prepare probe
    initial_probe_type = p.initial_probe_type

    # ----For backward compatibility:
    if initial_probe_type != 'file' and p.get('initial_probe_file') is not None: 
        verbose(1, "Warning: 'initial_probe_file' is set but 'initial_probe_type' is '%s' rather than 'file'." % initial_probe_type) 
    # ----

    verbose(3, '%-25s%s' % ('Initial probe type:', initial_probe_type)) 
    if initial_probe_type == 'disc':
        # A defocused circular aperture
        r2 = np.fft.fftshift(U.fvec2(asize, dx_spec))
        probe = (r2 < (p.probe_diameter/2.)**2).astype(CType)
        probe = prop.free_nf(probe, lam, p.probe_propdist, dx_spec[0]);
    elif initial_probe_type == 'focus':
        #Focussed probe
        t_psize = dx_spec[0] # focussed_probe has (y,x) convention for pixels. that maybe different for dx_spec
        t_pupil = p.probe_focus_aperture_type # 'rect'*,'circ'  *default
        t_pupildims = p.probe_focus_aperture_size # size of beam aperture at position of focussing optics
	t_fdist = p.probe_focaldist # distance of optics to focus
        t_defocus = p.probe_propdist # free space propagation distance (nearfield) of probe after focus
        t_focusdims = p.probe_diameter # characterize focal spot by focal spot size and not by optics aperture dimensions
	if p.probe_antialiasing is not None:
            t_antialiasing = p.probe_antialiasing #(>1.0) remove artifacts in probe generation, use antialiasing especially for defocussed probe
	probe=wave.focussed_probe(np.ones(asize),lam,t_psize,t_fdist,t_defocus,t_pupil,t_pupildims,t_focusdims,t_antialiasing,Plotting=False,WaveOut=False)
        if p.probe_use_conjugate:
            probe=probe.conj() 
    elif initial_probe_type == 'gaussian':
        # Gaussian probe
        r2 = np.fft.fftshift(U.fvec2(asize, dx_spec))
        probe = np.exp(-.5*r2/(p.probe_diameter/2.)**2).astype(CType)
    else:
        if initial_probe_type == 'file':
            # Load file
            initial_probe = _load_data(p.initial_probe_file, 'probe')['probe'].astype(CType)
            root,ext = os.path.splitext(p.initial_probe_file)
            if (ext == '.mat'):
                initial_probe = _matlab_transpose(initial_probe)
        else:
            # Last try: use an "initial_probe"
            verbose(2, "Using in-memory initial probe.")
            initial_probe = p.initial_probe.astype(CType)
        if initial_probe.ndim == 2:
            initial_probe.resize((1,) + initial_probe.shape)
        if initial_probe.ndim == 3: # and Nmodes > 1:
            # Augment the dimension to 4 and initialize the higher modes to other random (small) values 
            if prank == 0:
                new_initial_probe = []
                for pr in initial_probe:
                    new_initial_probe.append([pr] + [pr*.1*np.random.normal(size=asize) for mode in range(Nmodes_probe-1)])
                    #new_initial_probe.append([pr] + [pr for mode in range(Nmodes_probe-1)])
                initial_probe = np.array(new_initial_probe)
            else:
                initial_probe = None
            if parallel:
                initial_probe = comm.bcast(initial_probe, root=0)
        # Repeat in case the new reconstruction has more probes than initial_probe
        probe = np.repeat(initial_probe, p.num_probes // initial_probe.shape[0], axis=0)

    if probe.ndim == 2:
        # Tile a 2D probe into a 4D array, filling higher modes to other random values
        if p.mode_spectrum == 'equal':
            weight=[1. for mode in range(Nmodes_probe)]
            probe = np.array([[probe*(weight[mode]+.1*np.random.normal(size=asize)) for mode in range(Nmodes_probe)] for ii in range(p.num_probes)])
        elif np.isscalar(p.mode_spectrum):
            weight=[0. for mode in range(Nmodes_probe)]
            weight[np.int(p.mode_spectrum) %len(weight)]=1.0
            probe = np.array([[probe*(weight[mode]+.1*np.random.normal(size=asize)) for mode in range(Nmodes_probe)] for ii in range(p.num_probes)])
        elif isinstance(p.mode_spectrum,(list,np.ndarray)):
            weight= p.mode_spectrum
            probe = np.array([[probe*(weight[mode]+.1*np.random.normal(size=asize)) for mode in range(Nmodes_probe)] for ii in range(p.num_probes)])
        else:
            probe = np.array([[probe]+[probe*.1*np.random.normal(size=asize) for mode in range(Nmodes_probe-1)] for ii in range(p.num_probes)])
        #probe = np.array([[probe]+[probe for mode in range(Nmodes_probe-1)] for ii in range(p.num_probes)])

    try:
        assert probe.shape == p.probe_size        
    except AssertionError:
        print 'probe.shape = ' + str(probe.shape)
        print 'p.probe_size = ' + str(p.probe_size)
        raise

    # Normalization
    probe = probe.astype(CType)
    a2 = np.prod(asize)
    probe *= np.sqrt(a2 * p.num_probes / U.norm2(probe))

    if p.probe_support is not False:
        p.probe = p.probe_support * probe
    else:
        p.probe = probe

    # Check for initial object
    initial_object_type = p.initial_object_type
    verbose(3, '%-25s%s' % ('Initial object type:', initial_object_type)) 
    if initial_object_type == 'ones':
        object = np.ones(p.object_size, dtype=CType)
        if Nmodes_object > 1:
            object[:,1:,:,:] += .1*np.random.normal(size=object[:,1:,:,:].shape) + .1j*np.random.normal(size=object[:,1:,:,:].shape)
    elif initial_object_type == 'stxm':
        raise RuntimeError('STXM analysis for initial object is not yet implemented.')
    else:
        if initial_object_type == 'file':
            initial_object_dict = _load_data(p.initial_object_file)
            dict_has_key_object = initial_object_dict.has_key('object')
            dict_has_key_obj = initial_object_dict.has_key('obj')
            if dict_has_key_obj and dict_has_key_object:
                verbose(1, 'Warning, provided initial object file has both "object" and "obj" variables. Using default ("object").')
                initial_object = initial_object_dict['object'].astype(CType)
            elif dict_has_key_object:
                initial_object = initial_object_dict['object'].astype(CType)
            else:
                initial_object = initial_object_dict['obj'].astype(CType)
        else:
            verbose(2, "Using in-memory initial object.")
            initial_object = p.initial_object.astype(CType)
        if initial_object.ndim == 2:
            initial_object.resize((1,) + initial_object.shape)
        if initial_object.ndim == 3: # and Nmodes > 1:
            # Augment the dimension to 4 and initialize the higher modes to other random (small) values 
            new_initial_object = []
            for ob in initial_object:
                new_initial_object.append([ob] + [ob*.1*np.random.normal(size=osize) for mode in range(Nmodes_object-1)])
            initial_object = np.array(new_initial_object)
            del new_initial_object
        object = np.repeat(initial_object, p.num_objects // initial_object.shape[0],axis=0)
        
    assert object.shape == p.object_size

    if p.flat_object:
        object[-1] = 1. # This sets all object modes to 1.

    if p.probe_before_object:
        verbose(2, 'Will switch object and probe updates in inner loop (DM only).')

    p.object = object
    p.cfact = .000000001*Npts_scan

    if p.remove_incoherent_scattering:
        verbose(2, 'Will try to subtract addaptively a smooth incoherent background in the diffraction patterns (ML only).')

    # This was done here, but now it is done within ptycho_ML
    # p.object_smooth_filter = prepare_smoothing_conditioner(p.object_smooth_gradient)

    verbose(2, 'Scale object/probe: %3.1g' % p.scale_probe_object)

    if p.dump_object and p.dump_object_pattern is None:
        p.dump_object_pattern = './' + p.scans[0] + '_' + p.run_label + '_object_%04d.h5'
        verbose(2, 'Will dump object in files "%s"... (interval = %d)' % ( (p.dump_object_pattern % 0), p.dump_object_interval))
    if p.dump_probe and p.dump_probe_pattern is None:
        p.dump_probe_pattern = './' + p.scans[0] + '_' + p.run_label + '_probe_%04d.h5'
        verbose(2, 'Will dump probe in files "%s"... (interval = %d)' % ( (p.dump_probe_pattern % 0), p.dump_probe_interval))

    if p.MPI_timing:
        comm = U.TimingWrapper(comm)
        verbose(2, 'Using MPI timing')

    if p.LL_in_DM:
        verbose(2, 'Will compute Gaussian Log-likelihood in DM')

    if p.reg_del2 or p.reg_TV or p.reg_Huber:
        if p.scale_precond:
            verbose(1, 'Warning: Regularization will be used - disabling the P/O scaling preconditioner')
            p.scale_precond = False
        #if p.object_smooth_filter is not None:
        #    verbose(1, 'Warning: using a smoothing preconditioner in combination with the regularizer may be counterproductive.')

    if p.DM_smooth_amplitude is not None:
        verbose(2, 'Using object smoothing in DM (amplitude = %g, std = %g)' % (p.DM_smooth_amplitude, p.DM_smooth_std))


    # Other variables that need to pass through for the reconstruction
    try:
        p.debug_subpix_disp
    except:
        None
    p.exitloopfile
    p.mode_spectrum
    p.probe_weight_in_DM
    p.object_weight_in_DM
    p.highq_exposure
    p.subpix_disp
    p.subpix_disp_start
    p.subpix_method
    p.numit
    p.doplot
    p.dump_plot
    p.dump_plot_patt
    p.pbound
    p.average_start
    p.average_interval
    p.scale_precond
    p.plot_interval
    p.flip_data
    p.probe_change_start
    p.probe_change_start_ML
    p.average_probes
    p.average_probe_amp
    p.probe_support_start
    p.reg_del2
    p.reg_del2_amplitude
    p.reg_TV
    p.reg_TV_amplitude
    p.reg_Huber
    p.reg_Huber_amplitude
    p.reg_Huber_parameter
    p.remove_incoherent_scattering
    p.incoherent_scattering_mask_count
    p.proportional_probes
    p.ePIE_regul_probe
    p.ePIE_regul_object
    p.ePIE_switch_probe
    p.ePIE_switch_object
    p.ePIE_alpha
    p.ePIE_beta
    p.quad_interval
    p.ML_type
    p.float_intens
    p.fourier_relax_factor
    p.plot_mask
    p.LL_in_ePIE
    p.plot_template
    # Convenient to store or documentation purposes.
    p.parallel = parallel
    p.psize = None
    if parallel:
        p.psize = psize

    return p.paramdict

def prepare_smoothing_preconditioner(object_smooth_gradient):
    """\
    This code was moved out of prepare_params to be able to
    have it change from one call of ptycho_ML to another. It could\
    be seen as a small step in the right direction in terms of redesign... 
    """
    if object_smooth_gradient == 0.: return None

    class GaussFilt:
        def __init__(self,sigma):
            self.sigma = sigma
        def __call__(self,x):
            y = np.empty_like(x)
            sh = x.shape
            xf = x.reshape((-1,)+sh[-2:])
            yf = y.reshape((-1,)+sh[-2:])
            for i in range(len(xf)):
                yf[i] = gaussian_filter(xf[i], self.sigma)
            return y

    from scipy.signal import correlate2d
    class HannFilt:
        def __call__(self,x):
            y = np.empty_like(x)
            sh = x.shape
            xf = x.reshape((-1,)+sh[-2:])
            yf = y.reshape((-1,)+sh[-2:])
            for i in range(len(xf)):
                yf[i] = correlate2d(xf[i], np.array([[.0625, .125, .0625], [.125, .25, .125], [.0625, .125, .0625]]), mode='same')
            return y

    if object_smooth_gradient > 0.:
        verbose(2, 'Using a smooth gradient filter (Gaussian blur - only for ML)')
        return GaussFilt(object_smooth_gradient)

    elif object_smooth_gradient < 0.:
        verbose(2, 'Using a smooth gradient filter (Hann window - only for ML)')
        return HannFilt()



def print_summary(p):
    """\
    Returns a text summary of the reconstruction parameters in dictionary p.
    """    
    
    # Format string
    s = """\
    ### Run summary ###
    Time now: {dtime}
    Run name: {run_name}
    Scan type: {scan_type}
    Number of scans: {Nscan}
    Number of diffraction patterns per scan: {Npts_scan}
    Parallel run : {parallel} ({psize} processes)
    Object size: {object_size}
    Probe size: {probe_size}
    Pixel size (meters): {dx_spec}
    ### ------------ ###
    """
    
    # Extract all keys, substituting a default value if not present
    keys = re.findall('\{([^\}]*)\}',s)
    not_there = '<not set>'
    ndict = {}
    for k in keys:
        ndict[k] = p.get(k,not_there)
    ndict['dtime'] = time.asctime()
    return s.format(**ndict)



def ptycho_DM(pdict=None, **kwargs):
    """\
    Ptychography reconstruction based on difference map.
    """
    # Create the parameter holder
    p = U.Param(kwargs, pdict, default_parameters, store=(0,1))

    verbose.set_level(p.verbose_level)
    verbose(3,'Entering ptycho_DM...')
   
    positions = p.positions
    asize = p.asize
    Npts = p.Npts
    Ndata = p.Ndata

    save_file = p.save_file % 'DM'
    verbose(2, 'Reconstruction will be saved as %s' % save_file)

    # Load data
    fmag, fmask = load_fmag(p)

    use_empty_probe = p.use_empty_probe
    if use_empty_probe:
        empty_probe_fmag = np.sqrt(p.empty_probe_data)
        empty_probe_mask = p.empty_probe_mask
        empty_probe_strength = p.empty_probe_strength
    
    # Manage diffraction pattern shifts
    if p.dp_shift is True:
        verbose(2, 'dp_shift is True: automated shifts of the diffraction patterns will be used.')
        # Compute the average of every scan's diffraction patterns
        dp_average = np.zeros((p.Nscan,) + tuple(asize), FType)
        fmask_all = np.zeros((p.Nscan,) + tuple(asize), FType)
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            if fmag[lstart] is not None:
                #print fmask_all[iscan].shape, fmask[lstart].shape
                fmask_all[iscan] = fmask[lstart]
            for jj in range(lstart, lend):
                if fmag[jj] is None:
                    continue
                dp_average[iscan] += fmag[jj]**2
        if parallel:
            comm.Allreduce(MPI.IN_PLACE, dp_average)
            comm.Allreduce(MPI.IN_PLACE, fmask_all)
        dp_average /= p.Npts_scan
        fmask_all_sum = fmask_all.sum(axis=0)
        fmask_all_sum += (fmask_all_sum == 0)
        dp_global_average = (dp_average * fmask_all).sum(axis=0) / fmask_all_sum
        # Compute shift relative to global average
        dp_shift = []
        #io.h5write('average_dump.h5', dp_global_average=dp_global_average, dp_average=dp_average, fmask_all=fmask_all)
        for ii in range(len(dp_average)):
            #r = U.shift_best(dp_global_average, dp_average[ii], fmask[ii])[1]
            r = U.shift_best(np.log(dp_global_average+1), np.log(dp_average[ii]+1), fmask_all[ii])[1]
            dp_shift.append(r)
        p.dp_shift = dp_shift
    elif p.dp_shift is not False:
        verbose(2, 'Using provided sub-pixel shifts for the diffraction patterns.')

    dp_shift_ramp = [None]*Npts
    dp_shift_ramp_conj = [None]*Npts
    if p.dp_shift:
        verbose(3, ''.join([p.scans[ii] + ' : ' + str(p.dp_shift[ii]) + '\n' for ii in range(len(p.scans))]))
        fx0, fx1 = U.fgrid(asize,1./asize)
        fx0 = np.fft.fftshift(fx0).astype(FType)
        fx1 = np.fft.fftshift(fx1).astype(FType)
        dp_shift_ramp_per_scan = [np.exp(2j * np.pi * (fx0*x0 + fx1*x1)).astype(CType) for x0,x1 in p.dp_shift]
        dp_shift_ramp_per_scan_conj = [np.exp(-2j * np.pi * (fx0*x0 + fx1*x1)).astype(CType) for x0,x1 in p.dp_shift]
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            for ll in range(lstart, lend):
                if fmag[ll] is not None:
                    dp_shift_ramp[ll] = dp_shift_ramp_per_scan[iscan]
                    dp_shift_ramp_conj[ll] = dp_shift_ramp_per_scan_conj[iscan]

    # Maximum power : used for normalization
    power_list = [U.norm2(ff) if ff is not None else 0. for ff in fmag]
    if p.use_empty_probe:
        power_list += [U.norm2(empty_probe_fmag)]
    max_power = max(power_list)
    if parallel:
        max_power = np.array([max_power])
        comm.Allreduce(MPI.IN_PLACE, max_power, op=MPI.MAX)
        max_power = max_power[0]

    # Total number of measurements - used for normalization in LL_in_DM
    tot_measpts = sum([fm.sum() for fm in fmask if fm is not None])
    if parallel:
        tot_measpts = np.array([tot_measpts])
        comm.Allreduce(MPI.IN_PLACE,tot_measpts)
        tot_measpts = tot_measpts.item()

    # Is this a continuing run?
    cont_run = False
    if p.hasattr('history'):
        verbose(3, 'Continuing previous run.')
        cont_run = True
        probe = p.probe
        obj = p.obj
    else:
        verbose(3, 'No previous run. Starting from initialized objects and probe.')
        p.history = []
        probe = p.probe
        probe *= np.sqrt(p.num_probes * max_power / U.norm2(probe))
        obj = p.object
    
    
    if p.highq_exposure is not None:
        base,highq,fact=p.highq_exposure
        verbose(3, 'Adapting probe guess of highq exposure by a factor %.2f' % fact)
        probe[highq]=probe[base]*fact

    p.obj = obj
    p.probe = probe

    # Quick sanity check
    assert (asize == np.array(probe.shape[2:])).all()

    if p.pbound is None:
        # This formulation is consistent with Giewekemeyer (2010). 
        pbound = .25 * p.fourier_relax_factor**2
        verbose(3, 'Computed pbound is %g (would be %g in the old formulation)' % (pbound, pbound * np.prod(asize) / max_power))  
    else:
        # Renormalize pbound (old forumulation)
        pbound = p.pbound * max_power / np.prod(asize)

    # Local references of various parameters
    numit = p.numit
    object_size = p.object_size
    clip_object = p.clip_object
    if clip_object:
        clip_max = p.clip_max
        clip_min = p.clip_min

    nearfield = p.nearfield
    probe_before_object = p.probe_before_object

    # Local references of regular task parameters
    dump_data = p.dump_data
    if dump_data:
        dump_interval = p.dump_interval
        dump_filename = p.dump_pattern % {'run_name': p.run_name}
    dump_object = p.dump_object
    if dump_object:
        dump_object_interval = p.dump_object_interval
        dump_object_pattern = p.dump_object_pattern
    dump_probe = p.dump_probe
    if dump_probe:
        dump_probe_interval = p.dump_probe_interval
        dump_probe_pattern = p.dump_probe_pattern
    average_start = p.average_start
    average_interval = p.average_interval
    probe_change_start = p.probe_change_start
    
    # Initialization
    err = []
    p.err = err
    rfact = []
    avob = np.zeros_like(obj)
    numav = 0
    im_number = 0
    cfact = p.cfact

    compute_LL = p.LL_in_DM
    if compute_LL:
        LL_DM = []
        p.LL_DM = LL_DM

    DM_smooth_obj = (p.DM_smooth_amplitude is not None)
    if DM_smooth_obj:
        DM_smooth_amplitude = (p.DM_smooth_amplitude * max_power * p.num_probes * Ndata) / np.prod(asize)
        DM_smooth_std = p.DM_smooth_std

    # Probe support
    use_probe_support = False
    if p.probe_support is not False:
        probe_support = p.probe_support
        use_probe_support = True

    subpix = p.subpix
    subpix_method = p.subpix_method
    subpix_start = p.subpix_start
    subpix_started = False
    do_subpix = None
    if subpix_method == 'fourier':
        do_subpix = subpix_fourier
    elif subpix_method == 'linear':
        do_subpix = subpix_linear
    else:
        raise RuntimeError('Unknown subpix method : %s' % str(subpix_method))

    # Prepare statistics
    proj1_time = 0
    wait1_time = 0
    wait2_time = 0
    proj2_time = 0
    plot_time = 0
    llindm_time = 0

    osh = obj.shape
    sh = probe.shape
    num_probes = p.num_probes
    num_objects = p.num_objects
    Nmodes_probe = p.Nmodes_probe
    Nmodes_object = p.Nmodes_object
    Nmodes = p.Nmodes
    if p.probe_weight_in_DM is not None:
        prweight = p.probe_weight_in_DM
    if p.object_weight_in_DM is not None:
        objweight = p.object_weight_in_DM

    nobj = np.zeros_like(obj)
    obj_denom = np.zeros_like(obj)
    nprobe = np.zeros_like(probe)
    probe_denom = np.zeros_like(probe)
    
    # Setup views
    position_ranges = [(x[3],            # Object index
                        x[0],            # start y
                        x[0]+sh[2],      # end y
                        x[1],            # start x
                        x[1]+sh[3],      # end x
                        x[2],            # probe index,
                        x[4],            # subpixel shift y 
                        x[5])            # subpixel shift x
                        for x in positions]

    # Take care of all the ugly indexing here.
    # This makes the loop parts below more readable.
    iter_flat = []               # A flat list of all independent terms
    iter_data = []          # A list of lists, made to map to fmag.
    num_elements_in_iter_by_fmag = 9
    for i in range(Ndata):
        if fmag[i] is None:
            iter_flat.extend(Nmodes*[None])
            iter_data.append(num_elements_in_iter_by_fmag*[None])
            continue
        x = position_ranges[i]
        psilist = []
        probelist = []
        objlist = []
        parlist = []
        subpix_parameter = None
        if subpix:
            if subpix_method.lower() == 'fourier':
                sp_shift = -np.array([x[6],x[7]])
                subpix_parameter = np.exp(2j * np.pi * np.sum(U.fgrid(asize,sp_shift/asize),axis=0))
            elif subpix_method.lower() == 'linear':
                subpix_parameter = -np.array([x[6],x[7]])
        
        for prm in range(Nmodes_probe):
            for obm in range(Nmodes_object):
                psi = probe[x[5],prm]*obj[x[0],obm,x[1]:x[2],x[3]:x[4]]  # This is the initial guess.
                iter_flat.append([\
                    psi,                                                   # 0 - iterate
                    probe[x[5],prm],                                       # 1 - probe
                    nprobe[x[5],prm],                                      # 2 - updated probe
                    probe_denom[x[5],prm],                                 # 3 - denominator for the sum
                    obj[x[0],obm, x[1]:x[2],x[3]:x[4]],                    # 4 - object
                    nobj[x[0],obm, x[1]:x[2],x[3]:x[4]],                   # 5 - updated object
                    obj_denom[x[0],obm, x[1]:x[2],x[3]:x[4]],              # 6 - object denominator for the sum
                    p.flat_object and (x[0] == num_objects-1),             # 7 - flat object flag
                    p.flat_object_weight if p.flat_object else 0.,         # 8 - flat object weight
                    p.probe_weight_in_DM[int(x[5])] if p.probe_weight_in_DM is not None else 1.,
                    p.object_weight_in_DM[int(x[0])] if p.object_weight_in_DM is not None else 1.,
                    subpix_parameter])                                     # 9 - Parameter for subpixel shift
                psilist.append(psi)
                probelist.append(probe[x[5],prm])
                objlist.append(obj[x[0],obm, x[1]:x[2],x[3]:x[4]])
                if p.multiple_propagators:
                    parlist.append(p.multi_prop_par_list[prm % len(p.multi_prop_par_list)])
                else:
                    parlist.append(prm)
        iter_data.append([\
            psilist,                                                       # 0 - iterate list
            probelist,                                                     # 1 - probe views
            objlist,                                                       # 2 - object views
            fmag[i],                                                       # 3 - fmag
            fmask[i],                                                      # 4 - fmask
            dp_shift_ramp[i],                                              # 5 - dp shift
            dp_shift_ramp_conj[i],                                         # 6 - dp shift (conj)
            subpix_parameter,                                              # 7 - Parameter for probe subpixel shift 
            parlist                                                        # 8 - generic parameter list e.g. used for multiple propagators
            ])                                                      

    # List of flat object
    if p.flat_object:
        is_flat = [(x[3] == num_objects-1) for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
        is_flat_mode = [(x[3] == num_objects-1) for x in positions]
        flat_w = [1. if isfl else p.flat_object_weight for isfl in is_flat]

    if use_empty_probe:
        # Initial guess for empty probe exit wave
        empty_probe_psi = probe[0].copy()

    # Plotting
    if (p.doplot or p.dump_plot or p.last_plot) and (not parallel or prank==0):
        plot_interval = p.plot_interval
        if p.doplot:
            from matplotlib import pyplot
            pyplot.interactive(True)
        plot_fig = Plotter(p,interactive=p.doplot)
        if p.nearfield:
            p.plot_mask = np.zeros(osh[-2:], dtype=bool)
            dsh = (osh[-2] - sh[-2], osh[-1] - sh[-1])
            print dsh
            p.plot_mask[dsh[0]:-dsh[0], dsh[1]:-dsh[1]] = True
        else:
            p.plot_mask = np.fft.fftshift(U.fvec2(osh[-2:]) < .25*(max(osh[-2:]) - max(sh[-2:]))**2)
        if p.dump_plot:
            dump_plot_counter = 0

    a2 = np.prod(asize)
    if USE_FFTPACK:
        fnorm_fw = 1/np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = FFT.fft2
        ifft = FFT.ifft2
    else:
        fnorm_fw = 1/np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = np.fft.fft2
        ifft = np.fft.ifft2
        
    fshift = np.fft.fftshift
    if nearfield:
        q2 = U.fvec2(asize, psize=(1./asize[0], 1./asize[1]))
        z = p.z / p.dx_spec[0]
        l = p.lam / p.dx_spec[0]
        propfact = np.exp(2j * np.pi * (z / l) * (np.sqrt(1 - q2*l**2) - 1) )     
        ipropfact = propfact.conj()
        def fprop(x,gen):
            return ifft(fft(x) * propfact)
        def ifprop(x,gen):
            return ifft(fft(x) * ipropfact)      
    #elif p.multiple_propagators:
    #    def fprop(x,gen):
    #        if gen < 0:
    #            f = fft(wave.cut2d(x,-gen))
    #            return fnorm_fw * fshift(wave.pad2d(fshift(f),-gen,fill=0.0))
    #        elif gen == 0:
    #            return fnorm_fw * fft(x)
    #        elif gen > 0:
    #            f = fft(wave.pad2d(x,gen,fill=0.0))
    #            return fnorm_fw * fshift(wave.cut2d(fshift(f),gen))
    #    def ifprop(x,gen):
    #        if gen < 0:
    #            f = fshift(wave.cut2d(fshift(x),-gen))
    #            return fnorm_bw * wave.pad2d(ifft(f),-gen,fill=0.0)
    #        elif gen == 0:
    #            return fnorm_bw * ifft(x)
    #        elif gen > 0:
    #            f = fshift(wave.pad2d(fshift(x),gen,fill=0.0))
    #            return fnorm_bw * wave.cut2d(ifft(f),gen)
    else:
        def fprop(x,gen):
            return fnorm_fw * fft(x)
        def ifprop(x,gen):
            return fnorm_bw * ifft(x)

    for f in hooks['pre_DM']:
        verbose(3, 'Calling pre-DM hooks')
        f(locals(),globals())

    def LL_in_DM():
        """\
        Computes the gaussian negative log-likelihood from within DM
        """
        LL = np.array([0.])
        for psilist, prlist, oblist, fmagi, fmaski, dp_rampi, dp_ramp_conji, sp_param, parlist in iter_data:
            if fmagi is None:
                continue
            if subpix_started:
                prlist = [do_subpix(pr, sp_param) for pr in prlist]
            fpsij = [fprop(pr * ob,gen) for pr,ob,gen in izip(prlist,oblist,parlist)]
            Imodel = sum(U.abs2(fi) for fi in fpsij)
            Ii = fmagi**2
            wi = fmaski/(Ii + 1)
            DI = Imodel - Ii  
            LL += (wi * DI**2).sum()
        if parallel:
            comm.Allreduce(MPI.IN_PLACE, LL)
        return LL.item()/tot_measpts
        
    # main loop
    for it in range(numit):
        if parallel:
            comm.Barrier()
        verbose(1,'%s, iteration # %d of %d' % (p.scans[0] if len(p.scans)==0 else p.scans[0] + ' - ' + p.scans[-1], it, numit))
        verbose(1,time.asctime())
        
        # 1. Overlap projection - a large loop where probe and object are refined.
        verbose(1, " - projection 1: overlap constraint - ")
        tm = time.time()
        prch0 = 0

        #subpix deactivated in DM
        #subpix_started = subpix and (it >= subpix_start)        
        
        for inner in range(10):

            if not ((probe_change_start < it) and (inner == 0) and probe_before_object):
                # Additional condition to allow updating the probe first

                #obj.fill(0.)
                if parallel and prank != 0:
                    obj_denom.fill(0.)
                    obj.fill(0.)
                else:
                    if DM_smooth_obj:
                        obj[:] = DM_smooth_amplitude * gaussian_filter(obj,DM_smooth_std)
                        obj_denom.fill(DM_smooth_amplitude)
                    else:
                        obj.fill(0.)
                        obj_denom.fill(.001)

                for x in iter_flat:
                    if x is None:
                        continue
                    psi,pr,npr,prdenom,ob,nob,obdenom,fo,fw,prw,objw,sp = x
                    if fo:
                        continue
                    if subpix_started:
                        pr = do_subpix(pr,sp)
                    ob += pr.conj() * psi *prw
                    obdenom += U.abs2(pr) *prw
                
                if parallel:
                    tm_old,tm = tm,time.time()
                    proj1_time +=  tm - tm_old;
                    comm.Allreduce(MPI.IN_PLACE, obj)
                    comm.Allreduce(MPI.IN_PLACE, obj_denom)
                    tm_old,tm = tm,time.time()
                    wait1_time += tm - tm_old;

                obj /= obj_denom

                for f in hooks['obj_inner_loop_DM']:
                    verbose(3, 'Calling obj_inner_loop_DM hooks')
                    f(locals(),globals())

                if p.flat_object:
                    obj[-1] = 1.
    
                # Object clipping between clip_min and clip_max
                if clip_object:
                    aobj = np.abs(obj);
                    phobj = np.exp(1j* np.angle(obj))
                    too_high = (aobj > clip_max)
                    too_low = (aobj < clip_min)
                    obj[too_high] = clip_max*phobj[too_high]
                    obj[too_low] = clip_min*phobj[too_low]
            
            # Probe update: exit the inner loop if it is not time yet.
            if probe_change_start >= it:
                break
    
            if parallel and prank != 0:
                nprobe.fill(0.)
                probe_denom.fill(0.)
            else:
                nprobe[:] = cfact * probe
                probe_denom.fill(cfact)

            for x in iter_flat:
                if x is None:
                        continue
                psi,pr,npr,prdenom,ob,nob,obdenom,fo,fw,prw,objw,sp = x
                tpr = psi * ob.conj() * objw
                tprdenom = U.abs2(ob) * objw
                if subpix_started:
                    tpr = do_subpix(tpr,sp,forward=False)
                    tprdenom = do_subpix(tprdenom,sp,forward=False)
                if fo:
                    npr += fw * tpr
                    prdenom += fw * tprdenom
                else:
                    npr += tpr
                    prdenom += tprdenom

            if use_empty_probe:
                nprobe += empty_probe_strength * empty_probe_psi
                probe_denom += empty_probe_strength
                
            if parallel:
                tm_old,tm = tm,time.time()
                proj1_time += tm - tm_old;
                comm.Allreduce(MPI.IN_PLACE,nprobe)
                comm.Allreduce(MPI.IN_PLACE,probe_denom)
                tm_old,tm = tm,time.time()
                wait1_time += tm - tm_old;
                
            nprobe /= probe_denom
            if use_probe_support:
                nprobe *= probe_support
            prch = U.norm(probe - nprobe);
            if not parallel or prank==0:
                print('Change in probe: %f' % prch)
            probe[:] = nprobe
    
            if prch0 == 0:
                prch0 = prch
            elif prch < .1*prch0:
                break
        
        if p.proportional_probes and probe_change_start < it:
            verbose(3, 'Making probes proportional now.')
            #if Nmodes > 1:
            #    raise RuntimeError('Probe averaging has not been modified to take into account multiple modes')
            probe_norm = [U.norm(pr) for pr in probe]
            probe_normalized = [pr / U.norm(pr) for pr in probe]
            probe_normalized_averaged = sum(ppp for ppp in probe_normalized)/len(probe_normalized)
            best_angle = [np.angle((pr * probe_normalized_averaged.conj()).sum()) for pr in probe_normalized]
            probe_factor = [prn * np.exp(1j*pra) for prn,pra in zip(probe_norm, best_angle)]
            new_probe_normalized = [pr/prn for pr,prn in zip(probe, probe_factor)]
            new_probe_averaged = sum(nppp for nppp in new_probe_normalized)/len(new_probe_normalized)
            for iprobe_index in range(num_probes):
                probe[iprobe_index] = probe_factor[iprobe_index]*new_probe_averaged

        #if p.average_phases:
        #    new_phase=np.array([np.angle(ob) for ob in obj]).mean(0)
        #    for iobj_index in range(len(obj)):
        #        obj[iobj_index]=np.abs(obj[iobj_index])*np.exp(1j*new_phase) 
            
        tm_old,tm = tm,time.time()
        proj1_time += tm - tm_old;
    
        er2 = 0.
        # 2. Fourier projection + complete diffmap loop
        verbose(1, ' - projection 2: Fourier modulus constraint - ')
        rf = 0
        rf_nrm = 0
        update_counter = 0
        for psilist, prlist, oblist, fmagi, fmaski, dp_rampi, dp_ramp_conji, sp_param, parlist in iter_data:
            if fmagi is None:
                continue
            if subpix_started:
                prlist = [do_subpix(pr, sp_param) for pr in prlist]

            p1 = [pr*ob for pr,ob in izip(prlist,oblist)]

            if dp_rampi is None:
                f = [fprop( 2*p1i - psi,gen) for (p1i,psi,gen) in izip(p1,psilist,parlist)]
            else:
                f = [fprop(dp_rampi * (2*p1i - psi),gen) for (p1i,psi,gen) in izip(p1,psilist,parlist)]

            af = np.sqrt(sum(U.abs2(fi) for fi in f))
            fdev = af - fmagi
            fdev2 = fmaski*fdev**2
            power = fdev2.mean()  / fmaski.mean()
            #if not parallel or prank==0:
            #    print('%1.2f' % power)
            if power > pbound:
                update_counter +=1
                renorm = np.sqrt(pbound / power)
                fm = (1-fmaski) + fmaski*(fmagi + fdev*renorm)/(af + 1e-10)
                if dp_rampi is None:
                    p2 = [ifprop(fm*fi,gen) for (fi,gen) in izip(f,parlist) ]
                else:
                    p2 = [dp_ramp_conji * ifprop(fi,gen) for (fi,gen) in izip(f,parlist)]
                df = [p2i - p1i for (p2i, p1i) in izip(p2, p1)]
            else:
                df = [p1i-psi for (p1i,psi) in izip(p1,psilist)]
            for (psi,dfi) in izip(psilist, df):
                psi += dfi
                er2 += U.norm2(dfi)
    
        if use_empty_probe:
            #p1 = probe[0]
            f = [fprop( 2*pr - epsi,0) for (pr,epsi) in izip(probe[0],empty_probe_psi)]
            af = np.sqrt(sum(U.abs2(fi) for fi in f))
            fdev = af - empty_probe_fmag
            fdev2 = empty_probe_mask*fdev**2
            power = fdev2.mean()
            if power > pbound:
                renorm = np.sqrt(pbound / power)
                fm = (1-empty_probe_mask) + empty_probe_mask*(empty_probe_fmag + fdev*renorm)/(af + 1e-10)
                p2 = [ifprop(fm*fi,0) for (fi) in f]
                df = [p2i - pr for (p2i, pr) in izip(p2, probe[0])]
            else:
                df = [pr-psi for (pr,psi) in izip(probe[0],empty_probe_psi)]
            for (psi,dfi) in izip(empty_probe_psi, df):
                psi += dfi
                er2 += U.norm2(dfi)

        tm_old,tm = tm,time.time()
        proj2_time += tm - tm_old
        
        if parallel:
            uc = np.array([update_counter])
            comm.Allreduce(MPI.IN_PLACE, uc)
            update_counter = uc[0]
            er2 = np.array([er2])
            comm.Allreduce(MPI.IN_PLACE, er2)
            er2 = er2[0]

        tm_old,tm = tm,time.time()
        wait2_time += tm - tm_old
            
        verbose(3, '%d updates from fourier modulus constraint. Fraction of %.2f' % (update_counter,update_counter / np.float(Ndata)))
        if np.isnan(er2):
            raise RuntimeError('Error is NaN! If you are using FFTW, make sure ffts are called with the right type (double/single)')
        """\
        # Hack to check datatypes and nans, left there for documentation.
        for k,v in locals().iteritems():
            try:
                dt = v.dtype.name
                is_single = (dt in ['complex64', 'float32'])
                is_a_nan = np.any(np.isnan(v))
                print '%30s : %20s - %10s' % (k, 'single' if is_single else dt.name, 'NAN!' if is_a_nan else 'no nan')
            except:
                try:
                    v = v[0]
                    dt = v.dtype.name
                    is_single = (dt in ['complex64', 'float32'])
                    is_a_nan = np.any(np.isnan(v))
                    print '%30s : %20s - %10s' % (k, 'single' if is_single else dt.name, 'NAN!' if is_a_nan else 'no nan')
                except:
                    print '%20s : %s' % (k, ' -- no dtype -- ')
        """

        err.append(np.sqrt(er2/(max_power*Npts)))

        if compute_LL:
            LL_DM.append(LL_in_DM())
            verbose(1,'Error: %12.3f\tNeg. LL: %12.3g' % (err[-1], LL_DM[-1]))
        else:
            verbose(1,'Error: %12.3f' % err[-1])
    
        tm_old,tm = tm,time.time()
        llindm_time += tm - tm_old
        
        """\
        if (p.doplot or p.dump_plot) and (not parallel or prank==0) and (it % p.plot_interval == 0):
            nobj = U.rmphaseramp(obj[0], pr_nrm[0])
            mean_nobj = (nobj*pr_nrm[0]).sum() / pr_nrm[0].sum()
            angle_obj = np.angle(nobj / mean_nobj)
            abs_obj = np.abs(nobj)
            angle_obj_bounds = (angle_obj[plot_mask].min(), angle_obj[plot_mask].max())
            abs_obj_bounds = (abs_obj[plot_mask].min(), abs_obj[plot_mask].max())
            plot_axes[0].imshow(abs_obj,vmin=abs_obj_bounds[0], vmax=abs_obj_bounds[1])
            plot_axes[1].imshow(angle_obj,vmin=angle_obj_bounds[0], vmax=angle_obj_bounds[1])
            plot_axes[2].imshow(U.imsave(probe[0]))
            plot_axes[3].plot(err)
            if p.doplot:
                plot_fig.canvas.draw()
            if p.dump_plot:
                try:
                    dump_plot_file = p.dump_plot_patt % dump_plot_counter
                except TypeError:
                    dump_plot_file = p.dump_plot_patt
                plot_fig.savefig(dump_plot_file)
                dump_plot_counter += 1
        """

        if (p.doplot or p.dump_plot) and (not parallel or prank==0):
            if (it % p.plot_interval == 0):
                plot_fig.plot(p)
                if p.dump_plot:
                    try:
                        dump_plot_file = p.dump_plot_patt % dump_plot_counter
                    except TypeError:
                        dump_plot_file = p.dump_plot_patt
                    ispdf = (os.path.splitext(dump_plot_file)[-1].lower() == '.pdf')
                    if ispdf:
                        plot_fig.savefig(dump_plot_file, dpi=600)
                    else:
                        plot_fig.savefig(dump_plot_file)
                    dump_plot_counter += 1
            if p.doplot:
                plot_fig.draw()
 
        if dump_object and (it % dump_object_interval == 0) and (not parallel or prank==0):
            print 'Dumping!'
            io.h5write(dump_object_pattern % it, obj=obj)            
        if dump_probe and (it % dump_probe_interval == 0) and (not parallel or prank==0):
            io.h5write(dump_probe_pattern % it, probe=probe)            

        tm_old,tm = tm,time.time()
        plot_time += tm - tm_old

        if (it >= average_start) & (it % average_interval == 0):
            avob += obj
            numav += 1
 
        for f in hooks['loop_DM']:
            verbose(3, 'Calling loop-DM hooks')
            f(locals(),globals())

    for f in hooks['post_DM']:
        verbose(3, 'Calling post-DM hooks')
        f(locals(),globals())

    header = ''
    if parallel:
        header = 'Process # %d ' % prank

    msg = """\
    {header} Finished
    {header} Time elapsed computing in projection 1: {0:6.2f} seconds
    {header} Time elapsed computing in projection 2: {1:6.2f} seconds
    {header} Time elapsed waiting for MPI in projection 1: {2:6.2f} seconds
    {header} Time elapsed waiting for MPI in projection 2: {3:6.2f} seconds
    {header} Time elapsed computing Log-Likelyhood : {4:6.2f} seconds
    {header} Time spent plotting: {5:6.2f} seconds""".format(proj1_time, proj2_time, wait1_time,wait2_time,llindm_time, plot_time, header=header)

    verbose(1,msg,mpi=True)
    
    # Average
    if average_start < numit:
        object = avob / numav
    else:
        object = obj
    

    # Store important variables
    p.obj = obj
    p.object = object
    p.probe = probe
    p.err = err
    p.history.append(('DM', numit, err))
    if hasattr(verbose, 'get_log'):
        p.verbose_log = verbose.get_log()
    # Save stuff
    if p.save and (not parallel or prank==0):
        #filename = p.run_name + '_DM.h5'
        #save_run(filename, p.paramdict)
        save_run(save_file, p.paramdict)
        verbose(2, 'Saved reconstruction in file %s' % save_file)

    if p.MPI_timing:
        fname = os.path.abspath(os.path.curdir) + '/Timing_proc_%03d.h5' % prank
        io.h5write(fname, timing=comm.timing)

    #p.probe_view = probe_view
    #p.obj_view = obj_view
    #p.fmag = fmag

    # Last plot (blocking!)
#    if p.last_plot and (not parallel or prank==0):
#        pyplot.interactive(False)
#        plot_axes[0].imshow(np.abs(object[0]))
#        plot_axes[1].imshow(np.angle(object[0]))
#        plot_axes[2].imshow(U.imsave(probe[0,0]))
#        plot_axes[3].plot(err)
#        pyplot.show()

    return p.paramdict

def ptycho_ePIE(pdict=None, **kwargs):
    """\
    Ptychography reconstruction based on difference map.
    """
    # Create the parameter holder
    p = U.Param(kwargs, pdict, default_parameters, store=(0,1))

    verbose.set_level(p.verbose_level)
    verbose(3,'Entering ptycho_ePIE...')
   
    positions = p.positions
    asize = p.asize
    Npts = p.Npts

    save_file = p.save_file % 'ePIE'
    verbose(2, 'Reconstruction will be saved as %s' % save_file)

    # Load data
    fmag, fmask = load_fmag(p)
    
    # Manage diffraction pattern shifts
    if p.dp_shift is True:
        verbose(2, 'dp_shift is True: automated shifts of the diffraction patterns will be used.')
        # Compute the average of every scan's diffraction patterns
        dp_average = np.zeros((p.Nscan,) + tuple(asize), FType)
        fmask_all = np.zeros((p.Nscan,) + tuple(asize), FType)
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            if fmag[lstart] is not None:
                #print fmask_all[iscan].shape, fmask[lstart].shape
                fmask_all[iscan] = fmask[lstart]
            for jj in range(lstart, lend):
                if fmag[jj] is None:
                    continue
                dp_average[iscan] += fmag[jj]**2
        if parallel:
            comm.Allreduce(MPI.IN_PLACE, dp_average)
            comm.Allreduce(MPI.IN_PLACE, fmask_all)
        dp_average /= p.Npts_scan
        fmask_all_sum = fmask_all.sum(axis=0)
        fmask_all_sum += (fmask_all_sum == 0)
        dp_global_average = (dp_average * fmask_all).sum(axis=0) / fmask_all_sum
        # Compute shift relative to global average
        dp_shift = []
        #io.h5write('average_dump.h5', dp_global_average=dp_global_average, dp_average=dp_average, fmask_all=fmask_all)
        for ii in range(len(dp_average)):
            #r = U.shift_best(dp_global_average, dp_average[ii], fmask[ii])[1]
            r = U.shift_best(np.log(dp_global_average+1), np.log(dp_average[ii]+1), fmask_all[ii])[1]
            dp_shift.append(r)
        p.dp_shift = dp_shift
    elif p.dp_shift is not False:
        verbose(2, 'Using provided sub-pixel shifts for the diffraction patterns.')

    dp_shift_ramp = [None]*Npts
    dp_shift_ramp_conj = [None]*Npts
    if p.dp_shift:
        verbose(3, ''.join([p.scans[ii] + ' : ' + str(p.dp_shift[ii]) + '\n' for ii in range(len(p.scans))]))
        fx0, fx1 = U.fgrid(asize,1./asize)
        fx0 = np.fft.fftshift(fx0).astype(FType)
        fx1 = np.fft.fftshift(fx1).astype(FType)
        dp_shift_ramp_per_scan = [np.exp(2j * np.pi * (fx0*x0 + fx1*x1)).astype(CType) for x0,x1 in p.dp_shift]
        dp_shift_ramp_per_scan_conj = [np.exp(-2j * np.pi * (fx0*x0 + fx1*x1)).astype(CType) for x0,x1 in p.dp_shift]
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            for ll in range(lstart, lend):
                if fmag[ll] is not None:
                    dp_shift_ramp[ll] = dp_shift_ramp_per_scan[iscan]
                    dp_shift_ramp_conj[ll] = dp_shift_ramp_per_scan_conj[iscan]

    # Maximum power : used for normalization
    max_power = max([U.norm2(ff) if ff is not None else 0. for ff in fmag])
    if parallel:
        max_power = np.array([max_power])
        comm.Allreduce(MPI.IN_PLACE, max_power, op=MPI.MAX)
        max_power = max_power[0]

    # Total number of measurements - used for normalization in LL_in_DM
    tot_measpts = sum([fm.sum() for fm in fmask if fm is not None])
    if parallel:
        tot_measpts = np.array([tot_measpts])
        comm.Allreduce(MPI.IN_PLACE,tot_measpts)
        tot_measpts = tot_measpts.item()

    # Is this a continuing run?
    cont_run = False
    if p.hasattr('history'):
        verbose(3, 'Continuing previous run.')
        cont_run = True
        probe = p.probe
        obj = p.obj
    else:
        verbose(3, 'No previous run. Starting from initialized objects and probe.')
        p.history = []
        probe = p.probe
        probe *= np.sqrt(p.num_probes * max_power / U.norm2(probe))
        obj = p.object

    p.obj = obj
    p.probe = probe

    # Quick sanity check
    assert (asize == np.array(probe.shape[2:])).all()

    if p.pbound is None:
        # This formulation is consistent with Giewekemeyer (2010). 
        pbound = .25 * p.fourier_relax_factor**2
        verbose(3, 'Computed pbound is %g (would be %g in the old formulation)' % (pbound, pbound * np.prod(asize) / max_power))  
    else:
        # Renormalize pbound (old forumulation)
        pbound = p.pbound * max_power / np.prod(asize)

    ePIE_alpha = p.ePIE_alpha
    ePIE_beta = p.ePIE_beta

    # Local references of various parameters
    numit = p.numit
    object_size = p.object_size
    clip_object = p.clip_object
    if clip_object:
        clip_max = p.clip_max
        clip_min = p.clip_min

    nearfield = p.nearfield

    # Local references of regular task parameters
    dump_data = p.dump_data
    if dump_data:
        dump_interval = p.dump_interval
        dump_filename = p.dump_pattern % {'run_name': p.run_name}
    dump_object = p.dump_object
    if dump_object:
        dump_object_interval = p.dump_object_interval
        dump_object_pattern = p.dump_object_pattern
    dump_probe = p.dump_probe
    if dump_probe:
        dump_probe_interval = p.dump_probe_interval
        dump_probe_pattern = p.dump_probe_pattern
    
    # Initialization
    err = []
    p.err = err
    rfact = []

    compute_LL = p.LL_in_ePIE
    if compute_LL:
        LL_ePIE = []
        p.LL_ePIE = LL_ePIE

    # Probe support
    use_probe_support = False
    if p.probe_support is not False:
        probe_support = p.probe_support
        use_probe_support = True

    subpix = p.subpix
    subpix_method = p.subpix_method
    subpix_start = p.subpix_start
    subpix_started = False
    do_subpix = None
    if subpix_method == 'fourier':
        do_subpix = subpix_fourier
    elif subpix_method == 'linear':
        do_subpix = subpix_linear
    else:
        raise RuntimeError('Unknown subpix method : %s' % str(subpix_method))
   
    osh = obj.shape
    sh = probe.shape
    num_probes = p.num_probes
    num_objects = p.num_objects
    Nmodes_probe = p.Nmodes_probe
    Nmodes_object = p.Nmodes_object
    Nmodes = p.Nmodes

    # This stores the probe and object updates - useful for MPI
    obj_change = np.zeros_like(obj)
    probe_change = np.zeros_like(probe)

    # Setup views
    position_ranges = [(x[3],            # Object index
                        x[0],            # start y
                        x[0]+sh[2],      # end y
                        x[1],            # start x
                        x[1]+sh[3],      # end x
                        x[2],            # probe index,
                        x[4],            # subpixel shift y 
                        x[5])            # subpixel shift x
                        for x in positions]

    # list of list of object modes
    obj_view = [[obj[x[0],obm, x[1]:x[2],x[3]:x[4]] for obm in range(Nmodes_object)] for x in position_ranges]
    obj_change_view = [[obj_change[x[0],obm, x[1]:x[2],x[3]:x[4]] for obm in range(Nmodes_object)] for x in position_ranges]

    probe_view = [[probe[x[5],prm] for prm in range(Nmodes_probe)] for x in position_ranges]
    probe_change_view = [[probe_change[x[5],prm] for prm in range(Nmodes_probe)] for x in position_ranges]

    if subpix:
        if subpix_method.lower() == 'fourier':
            sp_param = [np.exp(2j * np.pi * np.sum(U.fgrid(asize,-np.array([x[6], x[7]])/asize),axis=0)) for x in position_ranges]
        elif subpix_method.lower() == 'linear':
            sp_param = [-np.array([x[6],x[7]]) for x in position_ranges]
    else:
        sp_param = len(fmag)*[None]
        
    # List of flat object
    if p.flat_object:
        is_flat = [(x[3] == num_objects-1) for x in positions]
        flat_w = [1. if isfl else p.flat_object_weight for isfl in is_flat]
    else:
        is_flat = [None for x in positions]

    iter_data = zip(probe_view, probe_change_view, obj_view, obj_change_view, fmag, fmask, dp_shift_ramp, dp_shift_ramp_conj, sp_param, is_flat)

    # Eliminate the elements that don't contain data (if running in parallel)
    iter_data_node = [x for x in iter_data if x[4] is not None]

    # Make all nodes uniform to avoid blocking
    if parallel:
        node_length = comm.allgather(len(iter_data_node))
        if len(iter_data_node) < max(node_length):
            iter_data_node += (max(node_length)-len(iter_data_node)) * [10*(None,)]


    # Plotting
    if (p.doplot or p.dump_plot or p.last_plot) and (not parallel or prank==0):
        plot_interval = p.plot_interval
        if p.doplot:
            from matplotlib import pyplot
            pyplot.interactive(True)
        plot_fig = Plotter(p,interactive=p.doplot)
        if p.nearfield:
            p.plot_mask = np.zeros(osh[-2:], dtype=bool)
            dsh = (osh[-2] - sh[-2], osh[-1] - sh[-1])
            print dsh
            p.plot_mask[dsh[0]:-dsh[0], dsh[1]:-dsh[1]] = True
        else:
            p.plot_mask = np.fft.fftshift(U.fvec2(osh[-2:]) < .25*(max(osh[-2:]) - max(sh[-2:]))**2)
        if p.dump_plot:
            dump_plot_counter = 0

    a2 = np.prod(asize)
    if USE_FFTPACK:
        fnorm_fw = 1/np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = FFT.fft2
        ifft = FFT.ifft2
    else:
        fnorm_fw = 1/np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = np.fft.fft2
        ifft = np.fft.ifft2

    if nearfield:
        q2 = U.fvec2(asize, psize=(1./asize[0], 1./asize[1]))
        z = p.z / p.dx_spec[0]
        l = p.lam / p.dx_spec[0]
        propfact = np.exp(2j * np.pi * (z / l) * (np.sqrt(1 - q2*l**2) - 1) )     
	ipropfact = propfact.conj()
        def fprop(x):
            return ifft(fft(x) * propfact)
        def ifprop(x):
            return ifft(fft(x) * ipropfact)
    else:
        def fprop(x):
            return fnorm_fw * fft(x)
        def ifprop(x):
            return fnorm_bw * ifft(x)

    for f in hooks['pre_ePIE']:
        verbose(3, 'Calling pre-ePIE hooks')
        f(locals(),globals())


    probe_change_start = p.probe_change_start
    ePIE_regul_probe = p.ePIE_regul_probe
    ePIE_regul_object = p.ePIE_regul_object
    ePIE_switch_probe = p.ePIE_switch_probe
    ePIE_switch_object = p.ePIE_switch_object

    def LL_in_ePIE():
        """\
        Computes the gaussian negative log-likelihood from within DM
        """
        LL = np.array([0.])
        for fmagi, prlist, oblist in zip(fmag, probe_view, obj_view):
            if fmagi is None:
                continue
            if subpix_started:
                prlist = [do_subpix(pr, sp_param) for pr in prlist]
            fpsij = [fprop(pr * ob) for pr in prlist for ob in oblist]
            Imodel = sum(U.abs2(fi) for fi in fpsij)
            Ii = fmagi**2
            wi = fmaski/(Ii + 1)
            DI = Imodel - Ii  
            LL += (wi * DI**2).sum()
        if parallel:
            comm.Allreduce(MPI.IN_PLACE, LL)
        return LL.item()/tot_measpts

    # main loop
    for it in range(numit):
        
        if parallel:
            comm.Barrier()
        verbose(1,'%s, iteration # %d of %d' % (p.scans[0] if len(p.scans)==0 else p.scans[0] + ' - ' + p.scans[-1], it, numit))
        verbose(1,time.asctime())
        
        er2 = 0.
        obj_change.fill(0.)
        probe_change.fill(0.)

        #subpix deactivated in ePIE
        #subpix_started = subpix and (it >= subpix_start)        
        
        # Object loop
        verbose(3, 'ePIE object update')
        shuffle(iter_data_node)
        for prlist, dprlist, oblist, doblist, fmagi, fmaski, dp_rampi, dp_ramp_conji, sp_param, flatlist in iter_data_node:

            obj_change.fill(0.)

            if prlist is not None:
            
                # This will never happen (for now)
                if subpix_started:
                    prlist = [do_subpix(pr, sp_param) for pr in prlist]
      
                # Create flat list of exit waves - fast axis is prlist
                psilist = [pr*ob for ob in oblist for pr in prlist]
    
                # Propagate
                if dp_rampi is None:
                    f = [fprop(psi) for psi in psilist]
                else:
                    f = [fprop(dp_rampi * psi) for psi in psilist]
    
                # Project - 
                # TODO: create a function for this projection operation instead
                # of repeating it here and in ptycho_DM
                af = np.sqrt(sum(U.abs2(fi) for fi in f))
                fdev = af - fmagi
                fdev2 = fmaski*fdev**2
                power = fdev2.mean()
                if power > pbound:
                    renorm = np.sqrt(pbound / power)
                    fm = (1-fmaski) + fmaski*(fmagi + fdev*renorm)/(af + 1e-10)
                    if dp_rampi is None:
                        psi_proj = [ifprop(fm*fi) for fi in f]
                    else:
                        psi_proj = [dp_ramp_conji * ifprop(fi) for fi in f]
                    er2 += sum(U.norm2(p2-p1) for p2,p1 in zip(psi_proj, psilist))
                else:
                    psi_proj = [p1+0. for p1 in psilist]
    
                # Reshape psi_proj
                #kk = 0
                #psi_ob = [Nmodes_probe*[None] for ob in oblist]
                #for iob in range(Nmodes_object):
                #    for ipr in range(Nmodes_probe):
                #        psi_ob[iob][ipr] = psi_proj[kk]
                #        kk += 1
                psi_ob = [psi_proj[Nmodes_probe*k:Nmodes_probe*(k+1)] for k in range(Nmodes_object)]
           
                # Total power for probe and object
                a2pr = sum(U.abs2(pr) for pr in prlist)
                a2pr_denom = (1-ePIE_switch_object)*a2pr.max() + ePIE_switch_object*(ePIE_regul_object*a2pr.max() + a2pr)

                # Compute the ePIE object update
                for ob, dob, psi_p in zip(oblist, doblist, psi_ob):
                    #dob += sum(pr.conj()*psi for pr, psi in zip(prlist, psi_p))
                    #dob -= ob * a2pr
                    #dob *= ePIE_alpha / a2pr_max
                    dob += (ePIE_alpha / a2pr_denom) * (sum(pr.conj()*psi for pr, psi in zip(prlist, psi_p)) - ob * a2pr)


            if parallel:
                comm.Allreduce(MPI.IN_PLACE, obj_change)
            obj += obj_change
        
            if p.flat_object:
                obj[-1] = 1.
    
            # Object clipping between clip_min and clip_max
            if clip_object:
                aobj = np.abs(obj);
                phobj = np.exp(1j* np.angle(obj))
                too_high = (aobj > clip_max)
                too_low = (aobj < clip_min)
                obj[too_high] = clip_max*phobj[too_high]
                obj[too_low] = clip_min*phobj[too_low]

          
        # Probe loop 
        if it >= probe_change_start:
            verbose(3, 'ePIE probe update')
            shuffle(iter_data_node)
        for prlist, dprlist, oblist, doblist, fmagi, fmaski, dp_rampi, dp_ramp_conji, sp_param, flatlist in iter_data_node:
            if it < probe_change_start: break

            probe_change.fill(0.)

            if prlist is not None:

                # This will never happen (for now)
                if subpix_started:
                    prlist = [do_subpix(pr, sp_param) for pr in prlist]
    
                psilist = [pr*ob for pr in prlist for ob in oblist]
    
                # Propagate
                if dp_rampi is None:
                    f = [fprop(psi) for psi in psilist]
                else:
                    f = [fprop(dp_rampi * psi) for psi in psilist]
    
                # Project
                af = np.sqrt(sum(U.abs2(fi) for fi in f))
                fdev = af - fmagi
                fdev2 = fmaski*fdev**2
                power = fdev2.mean()
                if power > pbound:
                    renorm = np.sqrt(pbound / power)
                    fm = (1-fmaski) + fmaski*(fmagi + fdev*renorm)/(af + 1e-10)
                if dp_rampi is None:
                    psi_proj = [ifprop(fm*fi) for fi in f]
                else:
                    psi_proj = [dp_ramp_conji * ifprop(fi) for fi in f]
                er2 += sum(U.norm2(p2-p1) for p2,p1 in zip(psi_proj, psilist))
    
                # Reshape psi_proj
                #kk = 0
                #psi_pr = [len(oblist)*[None] for pr in prlist]
                #for ipr in range(Nmodes_probe):
                #    for iob in range(Nmodes_object):
                #        psi_pr[ipr][iob] = psi_proj[kk]
                #        kk += 1
                psi_pr = [psi_proj[Nmodes_object*k:Nmodes_object*(k+1)] for k in range(Nmodes_probe)]

                # Total power for probe and object
                a2ob = sum(U.abs2(ob) for ob in oblist)
                a2ob_denom = (1-ePIE_switch_probe)*a2ob.max() + ePIE_switch_probe*(ePIE_regul_probe*a2ob.max() + a2ob)

            
                for pr, dpr, psi_o in zip(prlist, dprlist, psi_pr):
                    #dpr += sum(ob.conj()*psi for ob, psi in zip(oblist, psi_o))
                    #dpr -= pr * a2ob
                    #dpr *= ePIE_beta / a2ob_max
                    dpr += (ePIE_beta / a2ob_denom) * (sum(ob.conj()*psi for ob, psi in zip(oblist, psi_o)) - pr * a2ob)

            if parallel:
                comm.Allreduce(MPI.IN_PLACE, probe_change)
            probe += probe_change

        if parallel:
            er2 = np.array([er2])
            comm.Allreduce(MPI.IN_PLACE, er2)
            er2 = er2[0]

        err.append(np.sqrt(er2/(max_power*Npts)))

        if compute_LL:
            LL_ePIE.append(LL_in_ePIE())
            verbose(1,'Error: %12.3f\tNeg. LL: %12.3g' % (err[-1], LL_ePIE[-1]))
        else:
            verbose(1,'Error: %12.3f' % err[-1])
    
        if (p.doplot or p.dump_plot) and (not parallel or prank==0):
            if (it % p.plot_interval == 0):
                plot_fig.plot(p)
                if p.dump_plot:
                    try:
                        dump_plot_file = p.dump_plot_patt % dump_plot_counter
                    except TypeError:
                        dump_plot_file = p.dump_plot_patt
                    ispdf = (os.path.splitext(dump_plot_file)[-1].lower() == '.pdf')
                    if ispdf:
                        plot_fig.savefig(dump_plot_file, dpi=600)
                    else:
                        plot_fig.savefig(dump_plot_file)
                    dump_plot_counter += 1
            if p.doplot:
                plot_fig.draw()
 
        if dump_object and (it % dump_object_interval == 0) and (not parallel or prank==0):
            print 'Dumping!'
            io.h5write(dump_object_pattern % it, obj=obj)            
        if dump_probe and (it % dump_probe_interval == 0) and (not parallel or prank==0):
            io.h5write(dump_probe_pattern % it, probe=probe)            
 
        for f in hooks['loop_ePIE']:
            verbose(3, 'Calling loop-ePIE hooks')
            f(locals(),globals())

    for f in hooks['post_ePIE']:
        verbose(3, 'Calling post-ePIE hooks')
        f(locals(),globals())

    # Store important variables
    p.obj = obj
    p.object = obj
    p.probe = probe
    p.err = err
    p.history.append(('ePIE', numit, err))
    if hasattr(verbose, 'get_log'):
        p.verbose_log = verbose.get_log()
    # Save stuff
    if p.save and (not parallel or prank==0):
        save_run(save_file, p.paramdict)
        verbose(2, 'Saved reconstruction in file %s' % save_file)

    if p.MPI_timing:
        fname = os.path.abspath(os.path.curdir) + '/Timing_proc_%03d.h5' % prank
        io.h5write(fname, timing=comm.timing)

    return p.paramdict

def ptycho_ePIE_old(pdict=None, **kwargs):
    """\
    Ptychography reconstruction - extended PIE 
    """
    raise RuntimeError('Not done implementing modes!')
    # Create the parameter holder
    p = U.Param(kwargs, pdict, default_parameters, store=(0,1))

    verbose.set_level(p.verbose_level)
    verbose(3,'Entering ptycho_ePIE...')
   
    positions = p.positions
    asize = p.asize
    Npts = p.Npts
    Ndata = p.Ndata

    save_file = p.save_file % 'ePIE'
    verbose(2, 'Reconstruction will be saved as %s' % save_file)

    # Load data
    fmag, fmask = load_fmag(p)
    
    # Manage diffraction pattern shifts
    if p.dp_shift is True:
        verbose(2, 'dp_shift is True: automated shifts of the diffraction patterns will be used.')
        # Compute the average of every scan's diffraction patterns
        dp_average = np.zeros((p.Nscan,) + tuple(asize), FType)
        fmask_all = np.zeros((p.Nscan,) + tuple(asize), FType)
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            if fmag[lstart] is not None:
                #print fmask_all[iscan].shape, fmask[lstart].shape
                fmask_all[iscan] = fmask[lstart]
            for jj in range(lstart, lend):
                if fmag[jj] is None:
                    continue
                dp_average[iscan] += fmag[jj]**2
        if parallel:
            comm.Allreduce(MPI.IN_PLACE, dp_average)
            comm.Allreduce(MPI.IN_PLACE, fmask_all)
        dp_average /= p.Npts_scan
        fmask_all_sum = fmask_all.sum(axis=0)
        fmask_all_sum += (fmask_all_sum == 0)
        dp_global_average = (dp_average * fmask_all).sum(axis=0) / fmask_all_sum
        # Compute shift relative to global average
        dp_shift = []
        #io.h5write('average_dump.h5', dp_global_average=dp_global_average, dp_average=dp_average, fmask_all=fmask_all)
        for ii in range(len(dp_average)):
            #r = U.shift_best(dp_global_average, dp_average[ii], fmask[ii])[1]
            r = U.shift_best(np.log(dp_global_average+1), np.log(dp_average[ii]+1), fmask_all[ii])[1]
            dp_shift.append(r)
        p.dp_shift = dp_shift
    elif p.dp_shift is not False:
        verbose(2, 'Using provided sub-pixel shifts for the diffraction patterns.')

    dp_shift_ramp = [None]*Npts
    dp_shift_ramp_conj = [None]*Npts
    if p.dp_shift:
        verbose(3, ''.join([p.scans[ii] + ' : ' + str(p.dp_shift[ii]) + '\n' for ii in range(len(p.scans))]))
        fx0, fx1 = U.fgrid(asize,1./asize)
        fx0 = np.fft.fftshift(fx0).astype(FType)
        fx1 = np.fft.fftshift(fx1).astype(FType)
        dp_shift_ramp_per_scan = [np.exp(2j * np.pi * (fx0*x0 + fx1*x1)).astype(CType) for x0,x1 in p.dp_shift]
        dp_shift_ramp_per_scan_conj = [np.exp(-2j * np.pi * (fx0*x0 + fx1*x1)).astype(CType) for x0,x1 in p.dp_shift]
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            for ll in range(lstart, lend):
                if fmag[ll] is not None:
                    dp_shift_ramp[ll] = dp_shift_ramp_per_scan[iscan]
                    dp_shift_ramp_conj[ll] = dp_shift_ramp_per_scan_conj[iscan]


    # Maximum power : used for normalization
    max_power = max([U.norm2(ff) if ff is not None else 0. for ff in fmag])
    if parallel:
        max_power = np.array([max_power])
        comm.Allreduce(MPI.IN_PLACE, max_power, op=MPI.MAX)
        max_power = max_power[0]

    # Is this a continuing run?
    cont_run = False
    if p.hasattr('history'):
        verbose(3, 'Continuing previous run.')
        cont_run = True
        probe = p.probe
        obj = p.obj
    else:
        verbose(3, 'No previous run. Starting from initialized objects and probe.')
        p.history = []
        probe = p.probe
        probe *= np.sqrt(p.num_probes * max_power / U.norm2(probe))
        obj = p.object

    # Quick sanity check
    assert (asize == np.array(probe.shape[2:])).all()

    # regularization parameter
    regul_probe = p.ePIE_regul_probe
    regul_object = p.ePIE_regul_object

    ePie_alpha = p.ePie_alpha
    ePie_beta = p.ePie_beta

    # Local references of various parameters
    numit = p.numit
    object_size = p.object_size
    clip_object = p.clip_object
    if clip_object:
        clip_max = p.clip_max
        clip_min = p.clip_min

    nearfield = p.nearfield

    # Local references of regular task parameters
    dump_data = p.dump_data
    if dump_data:
        dump_interval = p.dump_interval
        dump_filename = p.dump_pattern % {'run_name': p.run_name}
    average_start = p.average_start
    average_interval = p.average_interval
    probe_change_start = p.probe_change_start
    
    # Initialization
    err = []
    p.err = err
    rfact = []
    avob = np.zeros_like(obj)
    numav = 0
    im_number = 0

    # Probe support
    use_probe_support = False
    if p.probe_support is not False:
        probe_support = p.probe_support
        use_probe_support = True

    if p.pbound is None:
        # This formulation is consistent with Giewekemeyer (2010). 
        pbound = .25 * p.fourier_relax_factor**2
        verbose(3, 'Computed pbound is %g (would be %g in the old formulation)' % (pbound, pbound * np.prod(asize) / max_power))
    else:
        # Renormalize pbound (old forumulation)
        pbound = p.pbound * max_power / np.prod(asize)

    # Prepare statistics
    loop_time = 0
    plot_time = 0
    
    osh = obj.shape
    sh = probe.shape
    num_probes = p.num_probes
    num_objects = p.num_objects
    Nmodes_probe = p.Nmodes_probe
    Nmodes_object = p.Nmodes_object
    Nmodes = p.Nmodes

    obj_change = np.zeros_like(obj)
    probe_change = np.zeros_like(probe)

    # Setup views
    position_ranges = [(x[3], x[0],x[0]+sh[2], x[1], x[1]+sh[3]) for x in positions]

    obj_view = [obj[x[0],obm, x[1]:x[2],x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    obj_change_view = [obj_change[x[0],obm, x[1]:x[2],x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]

    probe_view = [probe[x[2],prm] for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    probe_change_view = [probe_change[x[2],prm] for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]

    probe_mode_view = [[probe[x[2],prm] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in positions]
    obj_mode_view = [[obj[x[0],obm,x[1]:x[2],x[3]:x[4]] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in position_ranges]

    in_node = [fm is not None for fm in fmag for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]

    # List of flat object
    if p.flat_object:
        is_flat = [(x[3] == num_objects-1) for x in positions for mode in range(Nmodes)]
	flat_w = [1. if isfl else p.flat_object_weight for isfl in is_flat]

    # Plotting
    if (p.doplot or p.dump_plot or p.last_plot) and (not parallel or prank==0):
        p.obj = obj
        p.probe = probe
        plot_interval = p.plot_interval
        if p.doplot:
            from matplotlib import pyplot
            pyplot.interactive(True)
        plot_fig = Plotter(p,interactive=p.doplot)
        p.plot_mask = FFT.fftshift(U.fvec2(osh[1:]) < .25*(max(osh[1:]) - max(sh[1:]))**2)
        if p.dump_plot:
            dump_plot_counter = 0

    # This is just for debugging ------------
    #for ooo in obj_view:
    #    ooo += np.random.rand(1).item()
    #pyplot.interactive(False)
    #plot_axes[0].imshow(np.abs(obj[0]))
    #plot_axes[1].imshow(np.abs(obj[1]))
    #pyplot.show()
    # ---------------------------------------
    
    a2 = np.prod(asize)
    if USE_FFTPACK:
        fnorm_fw = 1/np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = FFT.fft2
        ifft = FFT.ifft2
    else:
        fnorm_fw = 1/np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = FFT.fft2
        ifft = FFT.ifft2

    sequential = True

    for f in hooks['pre_ePIE']:
        verbose(3, 'Calling pre-ePIE hooks')
        f(locals(),globals())

    # main loop
    for it in range(numit):
        if parallel:
            comm.Barrier()
        verbose(1,'%s, iteration # %d of %d' % (p.scans[0] if len(p.scans)==0 else (p.scans[0] + ' - ' + p.scans[-1]), it, numit))
        verbose(1,time.asctime())
        
        tm = time.time()
        
        obj_change.fill(0.)
        probe_change.fill(0.)
        er2 = 0.
        for i in np.random.permutation(Npts):
            if not in_node[i]:
                continue
            # Do not update flat object
            if p.flat_object and is_flat[i]:
                continue

            p1 = probe_view[i] * obj_view[i]
            if dp_shift_ramp[i] is None:
                f = fnorm_fw * fft(p1) 
            else:
                f = fnorm_fw * fft(dp_shift_ramp[i] * p1)
            af = abs(f)
            ph = f / (af+1e-10)
            fdev = af - fmag[i]
            fdev2 = fmask[i]*fdev**2
            power = fdev2.mean()
            if power > pbound:
                renorm = np.sqrt(pbound / power)
                af = af*(1-fmask[i]) + fmask[i]*(fmag[i] + fdev*renorm)
            if dp_shift_ramp[i] is None: 
                p2 = fnorm_bw * ifft(af * ph)
            else:
                p2 = dp_shift_ramp_conj[i] * fnorm_bw * ifft(af * ph)

            a2pr = U.abs2(probe_view[i])
            a2ob = U.abs2(obj_view[i])
            obj_change_view[i] += ePIE_alpha * probe_view[i].conj() * (p2 - p1) / (a2pr*regul_probe + (1-regul_probe)*a2pr.max())
            probe_change_view[i] += ePIE_beta * obj_view[i].conj() * (p2 - p1) / (a2ob*regul_object + (1-regul_object)*a2ob.max())
            er2 += U.norm2(p2-p1)

            if sequential:
                obj_view[i] += obj_change_view[i]
                probe_view[i] += probe_change_view[i]
                obj_change.fill(0.)
                probe_change.fill(0.)                

        if not sequential:
            if parallel:
                comm.Allreduce(MPI.IN_PLACE, obj_change)
                comm.Allreduce(MPI.IN_PLACE, probe_change)
                er2 = np.array([er2])
                comm.Allreduce(MPI.IN_PLACE, er2)
                er2 = er2[0]
            obj += obj_change
            probe += probe_change
        if use_probe_support:
            probe *= probe_support

        # Object clipping between clip_min and clip_max
        if clip_object:
            aobj = np.abs(obj);
            phobj = np.exp(1j* np.angle(obj))
            too_high = (aobj > clip_max)
            too_low = (aobj < clip_min)
            obj[too_high] = clip_max*phobj[too_high]
            obj[too_low] = clip_min*phobj[too_low]
            
        if p.proportional_probes and probe_change_start < it:
            verbose(3, 'Making probes proportional now.')
            #if Nmodes > 1:
            #    raise RuntimeError('Probe averaging has not been modified to take into account multiple modes')
            probe_norm = [U.norm(pr) for pr in probe]
            probe_normalized = [pr / U.norm(pr) for pr in probe]
            probe_normalized_averaged = sum(ppp for ppp in probe_normalized)/len(probe_normalized)
            best_angle = [np.angle((pr * probe_normalized_averaged.conj()).sum()) for pr in probe_normalized]
            probe_factor = [prn * np.exp(1j*pra) for prn,pra in zip(probe_norm, best_angle)]
            new_probe_normalized = [pr/prn for pr,prn in zip(probe, probe_factor)]
            new_probe_averaged = sum(nppp for nppp in new_probe_normalized)/len(new_probe_normalized)
            for iprobe_index in range(num_probes):
                probe[iprobe_index] = probe_factor[iprobe_index]*new_probe_averaged

        err.append(np.sqrt(er2/(max_power*Npts)))
        verbose(1,'Error: %12.3f' % err[-1])

        tm_old,tm = tm,time.time()
        loop_time = loop_time + tm - tm_old;
        
        if (p.doplot or p.dump_plot) and (not parallel or prank==0) and (it % p.plot_interval == 0):
            plot_fig.plot(p)
            if p.doplot:
                plot_fig.draw()
            if p.dump_plot:
                try:
                    dump_plot_file = p.dump_plot_patt % dump_plot_counter
                except TypeError:
                    dump_plot_file = p.dump_plot_patt
                ispdf = (os.path.splitext(dump_plot_file)[-1].lower() == '.pdf')
                if ispdf:
                    plot_fig.savefig(dump_plot_file, dpi=600)
                else:
                    plot_fig.savefig(dump_plot_file)
                dump_plot_counter += 1
 
        tm_old,tm = tm,time.time()
        plot_time += tm - tm_old
        
        for f in hooks['loop_ePIE']:
            verbose(3, 'Calling loop-ePIE hooks')
            f(locals(),globals())

    for f in hooks['post_ePIE']:
        verbose(3, 'Calling post-ePIE hooks')
        f(locals(),globals())

    header = ''
    if parallel:
        header = 'Process # %d ' % prank

    msg = """\
    {header} Finished
    {header} Time elapsed in loop: {0:6.2f} seconds
    {header} Time spent plotting: {1:6.2f} seconds""".format(loop_time, plot_time, header=header)

    verbose(1,msg,mpi=True)
    
    # Store important variables
    p.obj = obj
    p.probe = probe
    p.err = err
    p.history.append(('ePIE', numit, err))
    if hasattr(verbose, 'get_log'):
        p.verbose_log = verbose.get_log()
    # Save stuff
    if p.save and (not parallel or prank==0):
        #filename = p.run_name + '_ePIE.h5'
        #save_run(filename, p.paramdict)
        save_run(save_file, p.paramdict)
        verbose(2, 'Saved reconstruction in file %s' % save_file)

    # Last plot (blocking!)
    if p.last_plot and (not parallel or prank==0):
        pyplot.interactive(False)
        plot_axes[0].imshow(np.abs(obj[0]))
        plot_axes[1].imshow(np.angle(obj[0]))
        plot_axes[2].imshow(U.imsave(probe[0,0]))
        plot_axes[3].plot(err)
        pyplot.show()

    return p.paramdict

def ptycho_ML(pdict=None, **kwargs):
    """\
     Ptychography reconstruction based on optimization of the Gaussian log-likelihood
    """
    
    # Create the parameter holder
    p = U.Param(kwargs, pdict, default_parameters, store=(0,1))
    verbose.set_level(p.verbose_level)

    if parallel:
        comm.Barrier()

    ML_type = p.ML_type
    positions = p.positions
    asize = p.asize
    probe = p.probe.astype(CType)
    obj = p.object.astype(CType)
    assert (asize == np.array(probe.shape[2:])).all()
    Npts = p.Npts
    Ndata = p.Ndata
    Nmodes_probe = p.Nmodes_probe
    Nmodes_object = p.Nmodes_object
    Nmodes = p.Nmodes
    debug_subpix_disp = p.debug_subpix_disp
    pos_subpix_portion = p.pos_subpix_portion

    save_file = p.save_file % 'ML'
    verbose(2, 'Reconstruction will be saved as %s' % save_file)
   
    num_probes = p.num_probes
    num_objects = p.num_objects

    subpix = p.subpix
    subpix_disp = p.subpix_disp
    subpix_disp_start = p.subpix_disp_start
    subpix_method = p.subpix_method
    subpix_start = p.subpix_start
    subpix_started = False
    subpix_disp_started = False
    
    if subpix_disp:
		try:
		    fn = p.datainfo.keys()[0]
		    real_subpix_displacements = io.h5read(fn)['displacements']
		except:
			real_subpix_displacements = np.zeros((Ndata,2))
    
    do_subpix = None
    if subpix_method == 'fourier':
        do_subpix = subpix_fourier
    elif subpix_method == 'linear':
        do_subpix = subpix_linear
    else:
        raise RuntimeError('Unknown subpix method : %s' % str(subpix_method))
   
    use_empty_probe = p.use_empty_probe
    if use_empty_probe and ML_type != 'Gauss':
        raise RuntimeError('empty_probe is only implemented for Gauss ML.')

    # Load data
    if ML_type == 'Gauss':
        I,w = load_intens(p)
        data_in_node = [II is not None for II in I]
        max_power = max([II.sum() if II is not None else 0. for II in I])
        tot_power = sum([II.sum() if II is not None else 0. for II in I])
        tot_measpts = sum([(ww>0).sum() for ww in w if ww is not None])
    elif ML_type == 'Poisson':
        I,w = load_intens(p)
        data_in_node = [II is not None for II in I]
        max_power = max([II.sum() if II is not None else 0. for II in I])
        tot_power = sum([II.sum() if II is not None else 0. for II in I])
        tot_measpts = sum([(ww>0).sum() for ww in w if ww is not None])
        from scipy import special
        LLbase = [(special.gammaln(II+1)).sum() if II is not None else None for II in I]
        fmask = [(ww>0) if ww is not None else None for ww in w]
        I_sum = [II.sum() if II is not None else None for II in I]
    elif ML_type == 'Euclid':
        fmag, fmask = load_fmag(p)
        data_in_node = [ff is not None for ff in fmag]
        max_power = max([(ff**2).sum() if ff is not None else 0. for ff in fmag])
        tot_power = sum([(ff**2).sum() if ff is not None else 0. for ff in fmag])
        tot_measpts = sum([fm.sum() for fm in fmask if fm is not None])
        fmag_sum = [fm.sum() if fm is not None else None for fm in fmag]
    if use_empty_probe:
        empty_probe_I = p.empty_probe_data
        empty_probe_w = p.empty_probe_strength / (empty_probe_I + 1)
        empty_probe_fmag = np.sqrt(p.empty_probe_data)
        empty_probe_mask = p.empty_probe_mask
        empty_probe_strength = p.empty_probe_strength # We should not need this since it is taken care of in empty_probe_w
        max_power = max([max_power, empty_probe_I.sum()])
        tot_power += empty_probe_I.sum()

    if parallel:
        comm.Barrier()
        max_power = np.array([max_power])
        max_power = max_power.astype(np.float64) #Work around to avoid MPI truncation errors when using large diffraction patterns and single precision
        tot_power = np.array([tot_power])
        tot_measpts = np.array([tot_measpts])
        comm.Allreduce(MPI.IN_PLACE, max_power, op=MPI.MAX)
        max_power = max_power.astype(FType) # See above, to avoid potential up or downcasting if max_power is multiplied somewhere else later
        comm.Allreduce(MPI.IN_PLACE, tot_power)
        comm.Allreduce(MPI.IN_PLACE, tot_measpts)
        max_power = max_power.item()
        tot_power = tot_power.item()
        tot_measpts = tot_measpts.item()

    if p.dp_shift is True:
        verbose(2, 'dp_shift is True: automated shifts of the diffraction patterns will be used.')
        # Compute the average of every scan's diffraction patterns
        dp_average = np.zeros((p.Nscan,) + tuple(asize), FType)
        fmask_all = np.zeros((p.Nscan,) + tuple(asize), FType)
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            if ML_type in ['Gauss', 'Poisson']:
                if data_in_node[lstart]:
                    fmask_all[iscan] = (w[lstart]>0)
                for jj in range(lstart, lend):
                    if data_in_node[jj]:
                        dp_average[iscan] += I[jj]
            elif ML_type == 'Euclid':
                if data_in_node[lstart]:
                    fmask_all[iscan] = fmask[lstart]
                for jj in range(lstart, lend):
                    if data_in_node[jj]:
                        dp_average[iscan] += fmag[jj]**2
                
        if parallel:
            comm.Barrier()
            comm.Allreduce(MPI.IN_PLACE, dp_average)
            comm.Allreduce(MPI.IN_PLACE, fmask_all)
        dp_average /= p.Npts_scan
        fmask_all_sum = fmask_all.sum(axis=0)
        fmask_all_sum += (fmask_all_sum == 0)
        dp_global_average = (dp_average * fmask_all).sum(axis=0) / fmask_all_sum
        # Compute shift relative to global average
        dp_shift = []
        for ii in range(len(dp_average)):
            #r = U.shift_best(dp_global_average, dp_average[ii], fmask[ii])[1]
            r = U.shift_best(np.log(dp_global_average+1), np.log(dp_average[ii]+1), fmask_all[ii])[1]
            dp_shift.append(r)
        p.dp_shift = dp_shift
    elif p.dp_shift is not None:
        verbose(2, 'Using provided sub-pixel shifts for the diffraction patterns.')

    dp_shift_ramp = [None]*Npts
    if p.dp_shift:
        verbose(3, ''.join([p.scans[ii] + ' : ' + str(p.dp_shift[ii]) + '\n' for ii in range(len(p.scans))]))
        fx0, fx1 = U.fgrid(asize,1./asize)
        fx0 = np.fft.fftshift(fx0)
        fx1 = np.fft.fftshift(fx1)
        dp_shift_ramp_per_scan = [np.exp(2j * np.pi * (fx0*x0 + fx1*x1)).astype(CType) for x0,x1 in p.dp_shift]
        dp_shift_ramp = [None]*Npts
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            for ll in range(lstart, lend):
                if data_in_node[ll]:
                    dp_shift_ramp[ll] = dp_shift_ramp_per_scan[iscan]

    # Now this is a diiirty hack, because the ML max_procedure is incapable of handling a probe with beamstop
    if p.highq_exposure is not None:
        if len(p.highq_exposure)>1:
            max_power*=p.highq_exposure[-1]**2 / num_probes #this counteracts the division by num_probes below

    # Is this a continuing run?
    cont_run = False
    if p.hasattr('history'):
        cont_run = True
        probe = p.probe
        probe_amp = np.sqrt(num_probes * max_power)
        obj = p.obj
    else:
        p.history = []
        probe = p.probe
        probe_amp = np.sqrt(num_probes * max_power)
        probe *= probe_amp / U.norm(probe)
        obj = p.object

    object_smooth_filter = prepare_smoothing_preconditioner(p.object_smooth_gradient)
    object_smooth_gradient = (object_smooth_filter is not None)

    # Metric correction parameter
    # scale_probe_object < 1 gives greater weight to the probe compared to the object.
    scale_probe_object = p.scale_probe_object
    
    float_intens = p.float_intens    
    if float_intens:
        float_intens_coeff = [None for i in range(len(I))]    

    # altmin = False turns off automatic rescaling of probe and object gradients
    # In principle, true should always work better, so this option should be removed
    # once that's confirmed.
    #altmin = True
    #altmin = False
    altmin = p.scale_precond

    # Number of iterations
    numit = p.numit

    # Interval to compute exact gradient instead of approximate
    quad_interval = p.quad_interval

    # Make local references of regular task parameters
    dump_data = p.dump_data
    if dump_data:
        dump_interval = p.dump_interval
        dump_filename = p.dump_pattern % {'run_name': p.run_name}
    dump_object = p.dump_object
    if dump_object:
        dump_object_interval = p.dump_object_interval
        dump_object_pattern = p.dump_object_pattern
    dump_probe = p.dump_probe
    if dump_probe:
        dump_probe_interval = p.dump_probe_interval
        dump_probe_pattern = p.dump_probe_pattern

    reg_del2 = p.reg_del2
    reg_del2_amplitude = p.reg_del2_amplitude
    reg_TV = p.reg_TV
    reg_TV_amplitude = p.reg_TV_amplitude
    reg_Huber = p.reg_Huber
    reg_Huber_amplitude = p.reg_Huber_amplitude
    reg_Huber_parameter = p.reg_Huber_parameter

    regularizer = None
    if reg_del2:
        obj_Npix = obj.size
        expected_obj_var = obj_Npix / tot_power  # Poisson
        reg_rescale  = tot_measpts / (8. * obj_Npix * expected_obj_var) 
        verbose(2, 'Rescaling regularization amplitude using the Poisson distribution assumption.')
        verbose(2, 'Factor: %8.5g' % reg_rescale)
        reg_del2_amplitude *= reg_rescale
        regularizer = Regul_del2(amplitude=reg_del2_amplitude)
        R_list = []
        p.R_list = R_list

    # Probe support
    use_probe_support = False
    if p.probe_support is not False:
        probe_support = p.probe_support
        use_probe_support = True
        probe *= probe_support

    osh = obj.shape
    sh = probe.shape
    p.obj = obj
    p.probe = probe

    #in_node = [II is not None for II in I for mode in range(Nmodes)]

    # These are the arrays shaped like object
    grad_obj = np.zeros(osh,dtype=CType)
    new_grad_obj = np.zeros(osh,dtype=CType)
    h_obj = np.zeros(osh,dtype=CType)
    nrm_obj = np.zeros(osh,dtype=FType)
    obj_noramp = np.zeros(osh, dtype=CType)
 
    # These are the arrays shaped like probe
    grad_probe = np.zeros(sh,dtype=CType)
    new_grad_probe = np.zeros(sh,dtype=CType)
    h_probe = np.zeros(sh,dtype=CType)
    
    # List of object array coordinates used for slicing
    position_ranges = [(x[3], x[0],x[0]+sh[2], x[1], x[1]+sh[3]) for x in positions]
    
    # Set up object views (looping through object modes, probe modes and positions)
    # obj_view = [obj[x[0],obm, x[1]:x[2],x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    # grad_obj_view = [grad_obj[x[0],obm, x[1]:x[2],x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    # new_grad_obj_view = [new_grad_obj[x[0],obm, x[1]:x[2],x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    # h_obj_view = [h_obj[x[0],obm, x[1]:x[2],x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    nrm_obj_view = [nrm_obj[x[0],obm, x[1]:x[2],x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
   
    # Set up probe views (looping through object modes, probe modes and positions)
    # probe_view = [probe[x[2],prm] for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    # grad_probe_view = [grad_probe[x[2],prm] for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    # new_grad_probe_view = [new_grad_probe[x[2],prm] for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    # h_probe_view = [h_probe[x[2],prm] for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]

    # These are the same views as above, but re-ordered to make easier updates
    # probe_mode_view = [[probe[x[2],prm] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in positions]
    # obj_mode_view = [[obj[x[0],obm,x[1]:x[2],x[3]:x[4]] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in position_ranges]
    # new_grad_probe_mode_view = [[new_grad_probe[x[2],prm] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in positions]
    # new_grad_obj_mode_view = [[new_grad_obj[x[0],obm,x[1]:x[2],x[3]:x[4]] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in position_ranges]
    # h_probe_mode_view = [[h_probe[x[2],prm] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in positions]
    # h_obj_mode_view = [[h_obj[x[0],obm,x[1]:x[2],x[3]:x[4]] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in position_ranges]
    probe_mode_view = [[probe[x[2],prm] for prm in range(Nmodes_probe)] for x in positions]
    obj_mode_view = [[obj[x[0],obm,x[1]:x[2],x[3]:x[4]] for obm in range(Nmodes_object)] for x in position_ranges]
    new_grad_probe_mode_view = [[new_grad_probe[x[2],prm] for prm in range(Nmodes_probe)] for x in positions]
    new_grad_obj_mode_view = [[new_grad_obj[x[0],obm,x[1]:x[2],x[3]:x[4]] for obm in range(Nmodes_object)] for x in position_ranges]
    h_probe_mode_view = [[h_probe[x[2],prm] for prm in range(Nmodes_probe)] for x in positions]
    h_obj_mode_view = [[h_obj[x[0],obm,x[1]:x[2],x[3]:x[4]] for obm in range(Nmodes_object)] for x in position_ranges]

    ##################################################################################################################################
    # -6- subpixel shift
    ##################################################################################################################################
    if subpix or subpix_disp:
        subpix_shift_probe = []
        subpix_shift_linear = []
        subpix_shift_initial = []
        if subpix_method.lower() == 'fourier':
            for x in positions:
                sp_shift = -np.array([x[4], x[5]])
                subpix_shift_probe.append(np.exp(2j * np.pi * np.sum(U.fgrid(asize, sp_shift / asize), axis=0)))
                subpix_shift_linear.append(-np.array([x[4], x[5]]))
                subpix_shift_initial.append(-np.array([x[4], x[5]]))
        elif subpix_method.lower() == 'linear':
            for x in positions:
                subpix_shift_linear.append(-np.array([x[4], x[5]]))
                subpix_shift_probe.append(-np.array([x[4], x[5]]))
                subpix_shift_initial.append(-np.array([x[4], x[5]]))

    """\
    # Take care of all the ugly indexing here.
    # This makes the loop parts below more readable.
    iter_data = []          # A list of lists, made to map to intensities
    for i in range(Ndata):
        if not data_in_node[i]:
            iter_data.append(9*[None])
            continue
        x = position_ranges[i]
        probelist = []
        objlist = []
        if subpix:
            if subpix_method.lower() == 'fourier':
                sp_shift = np.array([x[6],x[7]])
                subpix_parameter = np.exp(2j * np.pi * np.sum(U.fgrid(asize,sp_shift/asize),axis=0))
            elif subpix_method.lower() == 'linear':
                subpix_parameter = np.array([x[6],x[7]])
                
        for prm in range(Nmodes_probe):
            for obm in range(Nmodes_object):
                probelist.append(probe[x[5],prm])
                objlist.append(obj[x[0],obm, x[1]:x[2],x[3]:x[4]])
        iter_data.append([\
            probelist,                                                     # 1 - probe views
            objlist,                                                       # 2 - object views
            fmag[i],                                                       # 3 - fmag
            fmask[i],                                                      # 4 - fmask
            dp_shift_ramp[i],                                              # 5 - dp shift
            dp_shift_ramp_conj[i]],                                        # 6 - dp shift (conj)
            subpixel_parameter)                                            # 7 - Parameter for probe subpixel shift 
    """

    #in_node = [fm is not None for fm in fmag for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]

    # Incoherent background
    remove_incoherent_scattering = p.remove_incoherent_scattering
    if remove_incoherent_scattering:
        supp_radius2 = np.ceil(p.incoherent_scattering_mask_count/np.pi)
        Qinc_support = (U.fvec2(asize) < supp_radius2).astype('float')
        Qinc_amp = np.array([II.sum() if II is not None else 0. for II in I])
        Qinc_max = Qinc_amp.max()
        Qinc_amp /= Qinc_max
        Qinc = Qinc_support.astype(CType)
        Qinc *= probe_amp / np.sqrt(np.prod(asize))
        grad_Qinc = np.zeros(asize, dtype=CType)
        new_grad_Qinc = np.zeros(asize, dtype=CType)
        h_Qinc = np.zeros(asize, dtype=CType)
        Iinc = np.zeros(asize,dtype=FType)
        verbose(3, 'Initialized incoherent scattering removal')

    # List of flat object
    if p.flat_object:
        is_flat = [(x[3] == num_objects-1) for x in positions] 

    probe_change_start = p.probe_change_start_ML
    
    average_probes = p.average_probes
    if average_probes:
        if num_probes == 1:
            average_probes = False # Nothing to do
        else:
            # Initialize arrays
            av_probe = probe.mean(axis=0)               # Probe average
            d_av_probe = np.zeros_like(probe)           # Deviation from probe average
            d_av_h_probe = np.zeros_like(probe)         # Correction to probe displacement
            average_probe_amp = p.average_probe_amp
            # Estimate the penalization amplitude normalization
            average_probe_normalization = .5 * np.prod(asize) * num_probes / (num_probes + 1)
            verbose(2, 'Using probe averaging (%d probes, amplitude term = %f)' % (num_probes, average_probe_amp))


    # Plotting
#    if (p.doplot or p.dump_plot) and (not parallel or prank==0):
#        plot_interval = p.plot_interval
#        from matplotlib import pyplot
#        if p.doplot:
#            pyplot.interactive(True)
#        plot_fig = pyplot.figure(1)
#        plot_fig.clf()
#        plot_fig.hold(False)
#        plot_axes = [plot_fig.add_subplot(2,2,i) for i in range(1,5)]
#        for pl in plot_axes: pl.hold(False)
#        plot_mask = p.get('plot_mask')
#        if plot_mask is None:
#            plot_mask = np.fft.fftshift(U.fvec2(osh[1:]) < .25*(max(osh[1:]) - max(sh[1:]))**2)
#        if p.dump_plot:
#            dump_plot_counter = 0

    if (p.doplot or p.dump_plot or p.last_plot) and (not parallel or prank==0):
        plot_interval = p.plot_interval
        if p.doplot:
            from matplotlib import pyplot
            pyplot.interactive(True)
        plot_fig = Plotter(p,interactive=p.doplot)
        if p.nearfield:
            p.plot_mask = np.zeros(osh[-2:], dtype=bool)
            dsh = (osh[-2] - sh[-2], osh[-1] - sh[-1])
            print dsh
            p.plot_mask[dsh[0]:-dsh[0], dsh[1]:-dsh[1]] = True
        else:
            p.plot_mask = np.fft.fftshift(U.fvec2(osh[-2:]) < .25*(max(osh[-2:]) - max(sh[-2:]))**2)
        if p.dump_plot:
            dump_plot_counter = 0

    a2 = np.prod(asize)
    if USE_FFTPACK:
        fnorm_fw = FType(1/np.sqrt(a2))
        fnorm_bw = FType(np.sqrt(a2))
        fft = FFT.fft2
        ifft = FFT.ifft2
    else:
        fnorm_fw = FType(1/np.sqrt(a2))
        fnorm_bw = FType(np.sqrt(a2))
        fft = FFT.fft2
        ifft = FFT.ifft2

    nearfield = p.nearfield

    if nearfield:
        q2 = U.fvec2(asize, psize=(1./asize[0], 1./asize[1]))
        z = p.z / p.dx_spec[0]
        l = p.lam / p.dx_spec[0]
        propfact = np.exp(2j * np.pi * (z / l) * (np.sqrt(1 - q2*l**2) - 1) )     
        ipropfact = propfact.conj()
        def fprop(x):
            return ifft(fft(x) * propfact)
        def ifprop(x):
            return ifft(fft(x) * ipropfact)
    else:
        def fprop(x):
            return fnorm_fw * fft(x)
        def ifprop(x):
            return fnorm_bw * ifft(x)

    start = True
    LL_list = []
    p.LL_list = LL_list

    epsilon = 1e-6

    for f in hooks['pre_ML']:
        verbose(3, 'Calling pre-ML hooks')
        f(locals(), globals())
        
    eps = 0.1
    subpix_maxdisp = 0.3
    eps_x_plus = np.array([eps, 0])
    eps_y_plus = np.array([0, eps])
     
     
    delta_r = []
    
    ##################################################################################################################################    
    # -8- MAIN LOOP
    ##################################################################################################################################    
    for itcg in range(numit):
        
        # Group all processes at this point
        if parallel:
            comm.Barrier()
            
        plot_displacements = False

        verbose(1,'%s, iteration # %d of %d' % (p.scans[0] if len(p.scans)==1 else (p.scans[0] + ' - ' + p.scans[-1]), itcg, numit))
        verbose(1,time.asctime())

        quad_approx = (ML_type in ['Poisson', 'Euclid']) or (not quad_interval) or (not (itcg % quad_interval == 0))
        floating_intensity = float_intens #and (itcg >= floating_intensity_start)

        subpix_started = subpix and (itcg >= subpix_start)
        subpix_disp_started = subpix_disp and (itcg >= subpix_disp_start)

        if subpix_started:
            shifted_probe_mode_view = len(probe_mode_view)*[None]
            
        # rescale probe and object
        probe_norm = U.norm(probe)
        probe *= probe_amp / probe_norm
        obj *= probe_norm / probe_amp

        if remove_incoherent_scattering:
            # also rescale the incoherent scattering field and compute the incoherent intensities
            Qinc *= probe_amp / probe_norm
            fQinc = fprop(Qinc)
            Iinc = U.abs2(fQinc)

        #
        # Gradient computation
        #

        # Initialization
        new_grad_obj.fill(0.)
        new_grad_probe.fill(0.)
        nrm_obj.fill(0.)
        LL = np.array([0.])
        
        LL_x_plus = np.array([0.])
        LL_x_minus = np.array([0.])
        LL_y_plus = np.array([0.])
        LL_y_minus = np.array([0.])    
        
        LLarr = np.arange(Ndata)         
        

        if remove_incoherent_scattering:
            new_grad_Qinc.fill(0.)
            fgrad_Qinc = np.zeros(asize, dtype=CType)


        # Loop through data
        if ML_type == 'Gauss':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
    
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                new_grad_probe_mode = new_grad_probe_mode_view[i]
                new_grad_obj_mode = new_grad_obj_mode_view[i]
                
                H = np.array([[0., 0.], [0., 0.]])
                grad_L = np.array([[0.], [0.]])
                gamma = 0.5
                
                np.set_printoptions(precision=3)
                
                if subpix_disp_started:
                    steps = 1.0
                    x = np.arange(-steps , steps + 1.0 , 1.0)
                    y = np.arange(-steps , steps + 1.0 , 1.0)                        
                    X, Y = np.meshgrid(x, y)
                    Xnew, Ynew = np.meshgrid(x, y)
                    Z = np.zeros_like(X)
            
                    for k in (x + steps).tolist():
                        for l in (y + steps).tolist():
                            
                            sp_shift = np.array([0, 0])
                            if subpix_method.lower() == 'fourier':                        
                                sp_shift = subpix_shift_linear[i] + eps_x_plus * X[0, k] + eps_y_plus * Y[l, 0]
                                shift_xy = np.exp(2j * np.pi * np.sum(U.fgrid(asize, sp_shift / asize), axis=0))
                            elif subpix_method.lower() == 'linear': 
                                sp_shift = subpix_shift_linear[i] + eps_x_plus * X[0, k] + eps_y_plus * Y[l, 0]                      
                                shift_xy = sp_shift                                   
                            
                            Xnew[l, k] = sp_shift[0]
                            Ynew[l, k] = sp_shift[1]
                            probe_mode_kl = [do_subpix(pr, shift_xy) for pr in probe_mode]
                            if dp_shift_ramp[i] is None:
                                fpsij_kl = [fnorm_fw * fft(pr * ob) for pr in probe_mode_kl for ob in obj_mode]
                            else:
                                fpsij_kl = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode_kl for ob in obj_mode]
                            Imodel_kl = sum(U.abs2(fi) for fi in fpsij_kl)
                            if remove_incoherent_scattering:
                                Imodel_kl += Qinc_amp[i] * Iinc
                            if floating_intensity:
                                float_intens_coeff_kl[i] = (w[i] * Imodel_kl * I[i]).sum() / (w[i] * Imodel_kl ** 2).sum()
                                Imodel_kl *= float_intens_coeff_kl[i] 
                            DI_kl = Imodel_kl - I[i]    
                            LLj_kl = (w[i] * DI_kl ** 2).sum().item() 
                            Z[l, k] = LLj_kl      
                            
                    H[0, 1] = H[1, 0] = (Z[steps + 1, steps + 1] - Z[steps + 1, steps - 1] - Z[steps - 1, steps + 1] + Z[steps - 1, steps - 1]) / (4 * eps * eps)
                    H[0, 0] = (Z[steps, steps + 1] - 2 * Z[steps, steps] + Z[steps, steps - 1]) / (eps * eps)
                    H[1, 1] = (Z[steps + 1, steps ] - 2 * Z[steps, steps] + Z[steps - 1, steps]) / (eps * eps)
                    
                    grad_L[0, 0] = (Z[steps, steps + 1] - Z[steps, steps - 1]) / (2 * eps)
                    grad_L[1, 0] = (Z[steps + 1, steps] - Z[steps - 1 , steps ]) / (2 * eps)         
       
                    Hinv = LA.inv(H)
                    e, v = LA.eig(Hinv)                       
                    if e.min() > 0:
                        p0 = -.5 * np.dot(Hinv, grad_L)
                        #print p0,
                        length_p0 = np.sqrt(np.vdot(p0, p0))
                        if length_p0 > subpix_maxdisp:
                            p0 *= subpix_maxdisp / length_p0
                        #print p0
                    else:
                        #print 'Not positive definite!'
                        p0 = -subpix_maxdisp * grad_L / np.sqrt(np.vdot(grad_L, grad_L))
                        
                    subpix_shift_linear[i] += p0.T.flatten()   
                    
                    if subpix_method.lower() == 'fourier':                        
                        subpix_shift_probe[i] = np.exp(2j * np.pi * np.sum(U.fgrid(asize, subpix_shift_linear[i] / asize), axis=0))
                    elif subpix_method.lower() == 'linear': 
                        subpix_shift_probe[i] = subpix_shift_linear[i]      
                                 
                    vec = subpix_shift_linear[i] - (subpix_shift_initial[i] - real_subpix_displacements[i])
                    len_vec = np.sqrt(np.vdot(vec, vec))
                    
                    plt.ioff()
                    fig = plt.figure()
                    if debug_subpix_disp and len_vec > 0.5 and (itcg % (plot_interval / 2) == 0):  
                        plot_displacements = True
                                                      
                        steps0 = 5.0
                        x0 = np.arange(-steps0 , steps0 + 1.0 , 1.0)
                        y0 = np.arange(-steps0 , steps0 + 1.0 , 1.0)                        
                        X0, Y0 = np.meshgrid(x0, y0)
                        Xnew0, Ynew0 = np.meshgrid(x0, y0)
                        Z0 = np.zeros_like(X0)
                        for k in (x0 + steps0).tolist():
                            for l in (y0 + steps0).tolist():
                                sp_shift = np.array([0, 0])
                                
                                if subpix_method.lower() == 'fourier':                        
                                    sp_shift = subpix_shift_linear[i] + eps_x_plus * X0[0, k] + eps_y_plus * Y0[l, 0]
                                    shift_xy = np.exp(2j * np.pi * np.sum(U.fgrid(asize, sp_shift / asize), axis=0))
                                elif subpix_method.lower() == 'linear': 
                                    sp_shift = subpix_shift_linear[i] + eps_x_plus * X0[0, k] + eps_y_plus * Y0[l, 0]                      
                                    shift_xy = sp_shift  
                                
                                Xnew0[l, k] = sp_shift[0]
                                Ynew0[l, k] = sp_shift[1]
                                probe_mode_kl0 = [do_subpix(pr, shift_xy) for pr in probe_mode]
                                if dp_shift_ramp[i] is None:
                                    fpsij_kl0 = [fnorm_fw * fft(pr * ob) for pr in probe_mode_kl0 for ob in obj_mode]
                                else:
                                    fpsij_kl0 = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode_kl0 for ob in obj_mode]
                                Imodel_kl0 = sum(U.abs2(fi) for fi in fpsij_kl0)
                                if remove_incoherent_scattering:
                                    Imodel_kl0 += Qinc_amp[i] * Iinc
                                if floating_intensity:
                                    float_intens_coeff_kl0[i] = (w[i] * Imodel_kl0 * I[i]).sum() / (w[i] * Imodel_kl0 ** 2).sum()
                                    Imodel_kl0 *= float_intens_coeff_kl0[i] 
                                DI_kl0 = Imodel_kl0 - I[i] 
                                LLj_kl0 = (w[i] * DI_kl0 ** 2).sum().item() 
                                Z0[l, k] = LLj_kl0           
                        
                        
                        ax = fig.gca(projection='3d')
                        surf = ax.plot_surface(Xnew0, Ynew0, Z0, rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False)
                        ax.set_zlim(Z0.min() - 1, Z0.max() + 1) 
                        ax.view_init(elev=41, azim= -24)
                        ax.zaxis.set_major_locator(LinearLocator(10))
                        ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
                        
                        fig.colorbar(surf, shrink=0.5, aspect=5)                
                        plt.draw()
                        plt.savefig('surface_estm_%d_%d.png' % (i, itcg))                        
                    plt.clf()                                   
                    plt.close()
                    plt.ion()    
                probe_mode = probe_mode_view[i]
                
                if subpix_started or subpix_disp_started:
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]  
		    shifted_probe_mode_view[i] = probe_mode
                                       
                if dp_shift_ramp[i] is None:
                    fpsij = [fprop(pr * ob) for pr in probe_mode for ob in obj_mode]
                else:
                    fpsij = [fprop(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]

                Imodel = sum(U.abs2(fi) for fi in fpsij)
                if remove_incoherent_scattering:
                    Imodel += Qinc_amp[i]*Iinc
                if floating_intensity:
                    float_intens_coeff[i] = (w[i]*Imodel*I[i]).sum() / (w[i]*Imodel**2).sum()
                    Imodel *= float_intens_coeff[i] 
                DI = Imodel - I[i] 

                if dp_shift_ramp[i] is None:
                    xsi_j = [ifprop(w[i]*DI*fi) for fi in fpsij]
                else:
                    xsi_j = [dp_shift_ramp[i].conj() * ifprop(w[i]*DI*fi) for fi in fpsij]

                #for xi, pr, ob, ng_pr, ng_ob in zip(xsi_j, probe_mode, obj_mode, new_grad_probe_mode, new_grad_obj_mode):
                po_list = [(pr,ng_pr,ob,ng_ob) for pr,ng_pr in izip(probe_mode, new_grad_probe_mode) for ob,ng_ob in izip(obj_mode, new_grad_obj_mode)] 
                for xi, po in izip(xsi_j, po_list):
                    pr, ng_pr, ob, ng_ob = po
                    if not (p.flat_object and is_flat[i]):
                        ng_ob += 2. * xi * pr.conj()
                    if subpix_started or subpix_disp_started:
                        ng_pr += 2. * do_subpix(xi * ob.conj(), subpix_shift_probe[i], forward=False) 
                    else:
                        ng_pr += 2. * xi * ob.conj()
                    nrm_obj_view[i] += U.abs2(pr)
                    
                if remove_incoherent_scattering:
                    fgrad_Qinc += fQinc * Qinc_amp[i] * w[i] * DI
                LLj = (w[i] * DI ** 2).sum()
                LLarr[i] = LLj
                LL += LLj 


            if use_empty_probe:
                probe_mode = probe[0]
                new_grad_probe_mode = new_grad_probe[0]

                fpsij = [fprop(pr) for pr in probe_mode]

                Imodel = sum(U.abs2(fi) for fi in fpsij)
                DI = Imodel - empty_probe_I

                xsi_j = [ifprop(empty_probe_w*DI*fi) for fi in fpsij]

                #for xi, pr, ob, ng_pr, ng_ob in zip(xsi_j, probe_mode, obj_mode, new_grad_probe_mode, new_grad_obj_mode):
                prg_list = [(pr,ng_pr) for pr,ng_pr in izip(probe_mode, new_grad_probe_mode)] 
                for xi, prg in izip(xsi_j, prg_list):
                    pr, ng_pr = prg
                    if subpix_started:
                        ng_pr += 2. * do_subpix(xi * ob.conj(),subpix_parameters[i],forward=False) 
                    else:
                        ng_pr += 2. * xi

                LL += (empty_probe_w * DI**2).sum()
                

                                
            if plot_displacements:
                x1 = [  d[0] for  p_sub, d in izip(positions, subpix_shift_linear)]
                y1 = [  d[1] for  p_sub, d in izip(positions, subpix_shift_linear)]
                x2 = [  -d[0] for  p_sub, d in izip(positions, real_subpix_displacements)]
                y2 = [  -d[1] for  p_sub, d in izip(positions, real_subpix_displacements)]
                 
                xaxis = range(0, Ndata)
                fig1 = plt.figure(figsize=(20, 6))
                plt.plot(xaxis[0:100], x1[0:100], 'ro')
                plt.plot(xaxis[0:100], x2[0:100], 'bo')
                plt.plot(xaxis[0:100], y1[0:100], 'r*')
                plt.plot(xaxis[0:100], y2[0:100], 'b*')
                plt.xlim(-1, 100)
                plt.ylim(-2.0, 2.0)                  
                plt.savefig('displacements0_' + str(itcg) + '.png', dpi=(100))   
                plt.clf()                                   
                plt.close()                           
                fig2 = plt.figure(figsize=(20, 6))
                plt.plot(xaxis[101:Ndata - 1], x1[101:Ndata - 1], 'ro')
                plt.plot(xaxis[101:Ndata - 1], x2[101:Ndata - 1], 'bo')
                plt.plot(xaxis[101:Ndata - 1], y1[101:Ndata - 1], 'r*')
                plt.plot(xaxis[101:Ndata - 1], y2[101:Ndata - 1], 'b*')
                plt.xlim(100, Ndata)
                plt.ylim(-2.0, 2.0)
             
                plt.savefig('displacements1_' + str(itcg) + '.png', dpi=(100))  
                plt.clf()                                   
                plt.close()
            if debug_subpix_disp and itcg % plot_interval == 0:
                plotLL(LLarr, itcg, Ndata)
        ##################################################################################################################################
        # -10- Gradient computation - Poisson
        ##################################################################################################################################                
        elif ML_type == 'Poisson':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                new_grad_probe_mode = new_grad_probe_mode_view[i]
                new_grad_obj_mode = new_grad_obj_mode_view[i]

                if subpix_started:
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fprop(pr * ob) for pr in probe_mode for ob in obj_mode]
                else:
                    fpsij = [fprop(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                Imodel = sum(U.abs2(fi) for fi in fpsij) + epsilon
                if remove_incoherent_scattering:
                    Imodel += Qinc_amp[i]*Iinc
                if floating_intensity:
                    float_intens_coeff[i] = I_sum[i] / Imodel.sum()
                    Imodel *= float_intens_coeff[i] 
                DI = fmask[i] * (1 - I[i] / Imodel)
                if dp_shift_ramp[i] is None:
                    xsi_j = [ifprop(DI*fi) for fi in fpsij]
                else:
                    xsi_j = [dp_shift_ramp[i].conj() * ifprop(DI*fi) for fi in fpsij]
                #for xi, pr, ob, ng_pr, ng_ob in zip(xsi_j, probe_mode, obj_mode, new_grad_probe_mode, new_grad_obj_mode):
                po_list = [(pr,ng_pr,ob,ng_ob) for pr,ng_pr in izip(probe_mode, new_grad_probe_mode) for ob,ng_ob in izip(obj_mode, new_grad_obj_mode)] 
                for xi, po in izip(xsi_j, po_list):
                    pr, ng_pr, ob, ng_ob = po
                    if not (p.flat_object and is_flat[i]):
                        ng_ob += 2. * xi * pr.conj()
                    if subpix_started:
                        ng_pr += 2. * do_subpix(xi * ob.conj(), subpix_shift_probe[i], forward=False) 
                    else:
                        ng_pr += 2. * xi * ob.conj()
                    nrm_obj_view[i] += U.abs2(pr)
                if remove_incoherent_scattering:
                    fgrad_Qinc += fQinc * Qinc_amp[i] * DI
                LL += LLbase[i] + (fmask[i] * (Imodel - I[i] * np.log(Imodel))).sum()
        elif ML_type == 'Euclid':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                new_grad_probe_mode = new_grad_probe_mode_view[i]
                new_grad_obj_mode = new_grad_obj_mode_view[i]

                if subpix_started:
                    probe_mode = [do_subpix(pr,subpix_parameters[i]) for pr in probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fprop(pr * ob) for pr in probe_mode for ob in obj_mode]
                else:
                    fpsij = [fprop(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                fmodel = abs(fpsij)
                if floating_intensity:
                    float_intens_coeff[i] = fmag_sum[i] / fmodel.sum()
                    fmodel *= float_intens_coeff[i] 
                DI = fmask[i] * (1 - fmag[i]/fmodel)
                if dp_shift_ramp[i] is None:
                    xsi_j = ifprop(DI*fpsij)
                else:
                    xsi_j = dp_shift_ramp[i].conj() * ifprop(DI*fpsij)
                #for xi, pr, ob, ng_pr, ng_ob in zip(xsi_j, probe_mode, obj_mode, new_grad_probe_mode, new_grad_obj_mode):
                po_list = [(pr,ng_pr,ob,ng_ob) for pr,ng_pr in izip(probe_mode, new_grad_probe_mode) for ob,ng_ob in izip(obj_mode, new_grad_obj_mode)] 
                for xi, po in izip(xsi_j, po_list):
                    pr, ng_pr, ob, ng_ob = po
                    if not (p.flat_object and is_flat[i]):
                        ng_ob += 2. * xi * pr.conj()
                    if subpix_started:
                        ng_pr += 2. * do_subpix(xi * ob.conj(),subpix_parameters[i],forward=False) 
                    else:
                        ng_pr += 2. * xi * ob.conj()
                    nrm_obj_view[i] += U.abs2(pr)
                LL += (fmask[i] * (abs(fpsij) - fmag[i])**2).sum()

        if parallel:
            comm.Allreduce(MPI.IN_PLACE,new_grad_obj)
            comm.Allreduce(MPI.IN_PLACE,new_grad_probe)
            comm.Allreduce(MPI.IN_PLACE,nrm_obj)
            comm.Allreduce(MPI.IN_PLACE,LL)
            if remove_incoherent_scattering:
                comm.Allreduce(MPI.IN_PLACE,fgrad_Qinc)

        if remove_incoherent_scattering:
            new_grad_Qinc = 2.*Qinc_support * ifprop(fgrad_Qinc)

        if average_probes:
            if Nmodes > 1:
                raise RuntimeError('Probe averaging has not been modified to take into account multiple modes')
            # Compute new average and find  alignment parameters
            av_probe1 = np.zeros_like(probe[0])
            r1_shift = []
            r2_shift = []
            for ipr in range(num_probes):
                aprtmp,r,alpha = U.shift_best(av_probe,probe[ipr])
                r1_shift.append(r)
                # Only phase shifts are unconstrained, so divide with abs(alpha)
                av_probe1 += aprtmp / abs(alpha)
            av_probe = av_probe1/num_probes
            
            # Add penalization term
            LLav = 0.
            r_shift = []
            for ipr in range(num_probes):
                pr_shift,r,alpha = U.shift_best(probe[ipr], av_probe)
                r2_shift.append(r)
                d_av_probe[ipr] = (probe[ipr] - pr_shift / abs(alpha))
                new_grad_probe[ipr] += average_probe_amp * average_probe_normalization * d_av_probe[ipr]
                #probe[ipr] = pr_alpha[ipr] * U.pshift(av_probe, pr_r[ipr])
                LLav += average_probe_amp * average_probe_normalization * U.norm2(d_av_probe[ipr])
            verbose(3, 'Log-likelihood contribution from probe averaging = %f' % LLav)
            LL += LLav

        if altmin:
            scale_p_o = scale_probe_object * U.norm2(new_grad_obj) / U.norm2(new_grad_probe)
            verbose(3, 'Scale P/O: %6.3g' % scale_p_o)
        else:
            scale_p_o = scale_probe_object

        if use_probe_support:
            new_grad_probe *= probe_support

        if reg_del2:
            new_grad_obj += regularizer.gradient(obj)

        if reg_TV:
            # Total variation regularization
            # Backward and forward discrete differences 
            obj_xf = U.delxf(obj,axis=-2)
            obj_yf = U.delxf(obj,axis=-1)
            obj_xb = U.delxb(obj,axis=-2)
            obj_yb = U.delxb(obj,axis=-1)

            # New gradient contribution
            epsilon = 1e-10
            abs_obj_xb = np.abs(obj_xb)
            abs_obj_yb = np.abs(obj_yb)
            abs_obj_xf = np.abs(obj_xf)
            abs_obj_yf = np.abs(obj_yf)

            abs_obj_xb[abs_obj_xb < epsilon] = epsilon
            abs_obj_yb[abs_obj_yb < epsilon] = epsilon
            abs_obj_xf[abs_obj_xf < epsilon] = epsilon
            abs_obj_yf[abs_obj_yf < epsilon] = epsilon

            new_grad_obj += reg_TV_amplitude*(obj_xb/abs_obj_xb + obj_yb/abs_obj_yb - obj_xf/abs_obj_xf - obj_yf/abs_obj_yf)
            #new_grad_obj += reg_TV_amplitude*(np.exp(1j*np.angle(obj_xb)) + np.exp(1j*np.angle(obj_yb)) - np.exp(1j*np.angle(obj_xf)) - np.exp(1j*np.angle(obj_yf)))

        if reg_Huber:
            # Huber regularization
            # Backward and forward discrete differences 
            obj_xf = U.delxf(obj,axis=-2)
            obj_yf = U.delxf(obj,axis=-1)
            obj_xb = U.delxb(obj,axis=-2)
            obj_yb = U.delxb(obj,axis=-1)

            # New gradient contribution
            epsilon = 1e-6
            fp_xb = 1./np.sqrt(reg_Huber_parameter + U.abs2(obj_xb))
            fp_yb = 1./np.sqrt(reg_Huber_parameter + U.abs2(obj_yb))
            fp_xf = 1./np.sqrt(reg_Huber_parameter + U.abs2(obj_xf))
            fp_yf = 1./np.sqrt(reg_Huber_parameter + U.abs2(obj_yf))

            new_grad_obj += reg_Huber_amplitude*(obj_xb * fp_xb + obj_yb * fp_yb - obj_xf * fp_xf - obj_yf * fp_yf)

        if object_smooth_gradient:
            # Apply smoothing filter preconditioning
            #for new_grad_obj_slice in new_grad_obj:
            #    for x in new_grad_obj_slice:
            #        x[:] = object_smooth_filter(x)
            new_grad_obj[:] = object_smooth_filter(new_grad_obj)

        newLL= LL.item() / tot_measpts
        LL_list.append(newLL)

        if probe_change_start > itcg:
            scale_p_o = scale_probe_object
            new_grad_probe.fill(0.)

        if start:
            start = False
            bt = 0
        else:
            bt_num = scale_p_o * ( U.norm2(new_grad_probe) - np.real(np.vdot(new_grad_probe.flat,grad_probe.flat))) +\
                                 ( U.norm2(new_grad_obj) - np.real(np.vdot(new_grad_obj.flat, grad_obj.flat))) 
            bt_denom = scale_p_o * U.norm2(grad_probe) + U.norm2(grad_obj) 
            if remove_incoherent_scattering:
                bt_num += scale_p_o * (U.norm2(new_grad_Qinc) - np.real(np.vdot(new_grad_Qinc.flat, grad_Qinc.flat)))
                bt_denom += scale_p_o*U.norm2(grad_Qinc)
            bt = max(0,bt_num/bt_denom)

        #verbose(3,'Polak-Ribiere coefficient: %f ' % bt)

        grad_obj[:] = new_grad_obj
        grad_probe[:] = new_grad_probe
        if remove_incoherent_scattering:
            grad_Qinc[:] = new_grad_Qinc    

        # 3. Next conjugate
        h_obj *= bt
        if object_smooth_gradient:
            #for ii_h_obj in range(len(h_obj)):
            #    for jj_h_obj in range(len(h_obj[ii_h_obj])):
            #        h_obj[ii_h_obj, jj_h_obj] -= object_smooth_filter(grad_obj[ii_h_obj,jj_h_obj])
            h_obj -= object_smooth_filter(grad_obj)
        else:
            h_obj -= grad_obj
        h_probe *= bt
        h_probe -= scale_p_o * grad_probe
        if remove_incoherent_scattering:
            h_Qinc *= bt
            h_Qinc -= scale_p_o * grad_Qinc

        # 4. Minimize
        B = np.zeros((9,),dtype=FType)
        Brenorm = 1./ LL.item()**2
        if remove_incoherent_scattering:
            fh_Qinc = fprop(h_Qinc)
            cfh_Qinc = fh_Qinc.conj()

        if ML_type == 'Gauss':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
    
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                h_probe_mode = h_probe_mode_view[i]
                h_obj_mode = h_obj_mode_view[i]

                if subpix_started:
                    #probe_mode = [do_subpix(pr,subpix_parameters[i]) for pr in probe_mode]
                    probe_mode = shifted_probe_mode_view[i]
                    h_probe_mode = [do_subpix(h_pr, subpix_shift_probe[i]) for h_pr in h_probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fprop(pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fprop(pr * h_ob + h_pr * ob) for (pr,h_pr) in izip(probe_mode, h_probe_mode) for (ob,h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fprop(h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
                else:
                    fpsij = [fprop(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fprop(dp_shift_ramp[i] * (pr * h_ob + h_pr * ob)) for (pr,h_pr) in izip(probe_mode, h_probe_mode) for (ob,h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fprop(dp_shift_ramp[i] * h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
    
                A0 = sum( U.abs2(fi) for fi in fpsij)
                A1 = 2*np.real( sum(fi * aji.conj() for (fi,aji) in izip(fpsij, aj) )  )
                A2 = 2*np.real( sum(fi * bji.conj() for (fi,bji) in izip(fpsij, bj) )  ) + sum( U.abs2(aji) for aji in aj )
    
                if p.remove_incoherent_scattering:
                    A0 += Qinc_amp[i] * Iinc
                    A1 += 2*Qinc_amp[i]*np.real( fQinc * cfh_Qinc)
                    A2 += Qinc_amp[i]*U.abs2(fh_Qinc)

                if floating_intensity:
                    A0 *= float_intens_coeff[i]
                    A1 *= float_intens_coeff[i]
                    A2 *= float_intens_coeff[i]
                A0 -= I[i]
    
                if quad_approx:
                    B[0] += np.dot(w[i].flat,(A0**2).flat) * Brenorm
                    B[1] += np.dot(w[i].flat,(2*A0*A1).flat) * Brenorm
                    B[2] += np.dot(w[i].flat,(A1**2 + 2*A0*A2).flat) * Brenorm
                else:
                    A3 = 2*np.real( sum(aji * bji.conj() for (aji,bji) in izip(aj,bj) )  )
                    A4 = sum( U.abs2(bji) for bji in bj )
                    if floating_intensity:
                        A3 *= float_intens_coeff[i]
                        A4 *= float_intens_coeff[i]
                    B[0] += np.dot(w[i].flat,(A0**2).flat) * Brenorm
                    B[1] += np.dot(w[i].flat,(2*A0*A1).flat) * Brenorm
                    B[2] += np.dot(w[i].flat,(A1**2 + 2*A0*A2).flat) * Brenorm
                    B[3] += np.dot(w[i].flat,(2*A1*A2 + 2*A0*A3).flat) * Brenorm
                    B[4] += np.dot(w[i].flat,(2*A1*A3 + 2*A0*A4 + A2**2).flat) * Brenorm
                    B[5] += np.dot(w[i].flat,(2*A2*A3).flat) * Brenorm
                    B[6] += np.dot(w[i].flat,(2*A2*A4 + A3**2).flat) * Brenorm
                    B[7] += np.dot(w[i].flat,(2*A3*A4).flat) * Brenorm
                    B[8] += np.dot(w[i].flat,(A4**2).flat) * Brenorm

            if use_empty_probe:
                probe_mode = probe[0]
                h_probe_mode = h_probe[0]

                fpsij = [fprop(pr) for pr in probe_mode]
                aj = [fprop(h_pr) for h_pr in h_probe_mode]
    
                A0 = sum( U.abs2(fi) for fi in fpsij)
                A1 = 2*np.real( sum(fi * aji.conj() for (fi,aji) in izip(fpsij, aj) )  )
                A2 = sum( U.abs2(aji) for aji in aj )
    
                A0 -= empty_probe_I
    
                B[0] += np.dot(empty_probe_w.flat,(A0**2).flat) * Brenorm
                B[1] += np.dot(empty_probe_w.flat,(2*A0*A1).flat) * Brenorm
                B[2] += np.dot(empty_probe_w.flat,(A1**2 + 2*A0*A2).flat) * Brenorm

        elif ML_type == 'Poisson':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                h_probe_mode = h_probe_mode_view[i]
                h_obj_mode = h_obj_mode_view[i]
    
                if subpix_started:
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]
                    h_probe_mode = [do_subpix(h_pr, subpix_shift_probe[i]) for h_pr in h_probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fprop(pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fprop(pr * h_ob + h_pr * ob) for (pr,h_pr) in izip(probe_mode, h_probe_mode) for (ob,h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fprop(h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
                else:
                    fpsij = [fprop(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fprop(dp_shift_ramp[i] * (pr * h_ob + h_pr * ob)) for (pr,h_pr) in izip(probe_mode, h_probe_mode) for (ob,h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fprop(dp_shift_ramp[i] * h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
    
                A0 = sum( U.abs2(fi) for fi in fpsij)
                A1 = 2*np.real( sum(fi * aji.conj() for (fi,aji) in izip(fpsij, aj) )  )
                A2 = 2*np.real( sum(fi * bji.conj() for (fi,bji) in izip(fpsij, bj) )  ) + sum( U.abs2(aji) for aji in aj )
     
                if p.remove_incoherent_scattering:
                    A0 += Qinc_amp[i] * Iinc
                    A1 += 2*Qinc_amp[i]*np.real( fQinc * cfh_Qinc)
                    A2 += Qinc_amp[i]*U.abs2(fh_Qinc)

                if floating_intensity:
                    A0 *= float_intens_coeff[i]
                    A1 *= float_intens_coeff[i]
                    A2 *= float_intens_coeff[i]

                DI = 1. - I[i]/A0
    
                B[1] += np.dot(fmask[i].flat,(A1*DI).flat) * Brenorm
                B[2] += (np.dot(fmask[i].flat, (A2*DI).flat) + .5*np.dot(fmask[i].flat, (I[i]*(A1/A0)**2).flat)) * Brenorm
                #B[1] += np.dot(A1.flat,DI.flat) * Brenorm
                #B[2] += (np.dot(A2.flat,DI.flat) + .5*np.dot(I[i].flat,((A1/A0)**2).flat)) * Brenorm

        elif ML_type == 'Euclid':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                h_probe_mode = h_probe_mode_view[i]
                h_obj_mode = h_obj_mode_view[i]
    
                if subpix_started:
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]
                    h_probe_mode = [do_subpix(h_pr, subpix_shift_probe[i]) for h_pr in h_probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fprop(pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fprop(pr * h_ob + h_pr * ob) for (pr,h_pr) in izip(probe_mode, h_probe_mode) for (ob,h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fprop(h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
                else:
                    fpsij = [fprop(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fprop(dp_shift_ramp[i] * (pr * h_ob + h_pr * ob)) for (pr,h_pr) in izip(probe_mode, h_probe_mode) for (ob,h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fprop(dp_shift_ramp[i] * h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
    
                A0 = U.abs2(fpsij)
                A1 = 2*np.real( fpsij * aj.conj() )
                A2 = 2*np.real( fpsij * bj.conj() ) + U.abs2(aj)
    
                if p.remove_incoherent_scattering:
                    A0 += Qinc_amp[i] * Iinc
                    A1 += 2*Qinc_amp[i]*np.real( fQinc * cfh_Qinc)
                    A2 += Qinc_amp[i]*U.abs2(fh_Qinc)

                if floating_intensity:
                    A0 *= float_intens_coeff[i]
                    A1 *= float_intens_coeff[i]
                    A2 *= float_intens_coeff[i]

                DI = 1. - fmag[i] / np.sqrt(A0)
    
                B[1] += np.dot(fmask[i].flat, (A1*DI).flat) * Brenorm
                B[2] += (np.dot(fmask[i].flat, (A2*DI).flat) + .25*np.dot(fmask[i].flat, (A1**2 * fmag[i]/A0**(3/2)).flat)) * Brenorm
                #B[1] += np.dot(A1.flat,DI.flat) / Brenorm
                #B[2] += (np.dot(A2.flat,DI.flat) + .25*np.dot((A1**2).flat,(fmag[i]/A0**(3/2)).flat))/ Brenorm

        if parallel:
            comm.Allreduce(MPI.IN_PLACE,B)

        if average_probes:
            av_h_probe = np.zeros_like(av_probe)
            for ipr in range(num_probes):
                av_h_probe += U.pshift(h_probe[ipr], r1_shift[ipr])
            av_h_probe /= num_probes

            for ipr in range(num_probes):
                d_av_h_probe[ipr] = h_probe[ipr] - U.pshift(av_h_probe, r2_shift[ipr])

            B[0] += Brenorm * average_probe_amp * average_probe_normalization * U.norm2(d_av_probe)
            B[1] += Brenorm * average_probe_amp * average_probe_normalization * 2*np.real(np.vdot(d_av_probe.flat, d_av_h_probe.flat))
            B[2] += Brenorm * average_probe_amp * average_probe_normalization * U.norm2(d_av_h_probe) 

        if reg_del2:
            # Quadratic distance regularization on the object (gaussian prior)
            c0,c1,c2 = regularizer.poly_line_coeffs(h_obj)
            B[0] += Brenorm*c0
            B[1] += Brenorm*c1
            B[2] += Brenorm*c2
            R_list.append(c0 / tot_measpts)

        if reg_TV:
            # Total variation regularization
            h_obj_xf = U.delxf(h_obj,axis=-2)
            h_obj_yf = U.delxf(h_obj,axis=-1)
            h_obj_xb = U.delxb(h_obj,axis=-2)
            h_obj_yb = U.delxb(h_obj,axis=-1)

            epsilon = 1e-10
            abs_obj_xf = np.abs(obj_xf)
            abs_obj_yf = np.abs(obj_yf)
            abs_obj_xb = np.abs(obj_xb)
            abs_obj_yb = np.abs(obj_yb)

            abs_obj_xb[abs_obj_xb < epsilon] = epsilon
            abs_obj_yb[abs_obj_yb < epsilon] = epsilon
            abs_obj_xf[abs_obj_xf < epsilon] = epsilon
            abs_obj_yf[abs_obj_yf < epsilon] = epsilon

            B[0] += Brenorm * reg_TV_amplitude * (abs_obj_xf.sum() + abs_obj_yf.sum() + abs_obj_xb.sum() + abs_obj_yb.sum())
            B[1] += Brenorm * reg_TV_amplitude * np.real(np.vdot((obj_xf / abs_obj_xf), h_obj_xf) +\
                                               np.vdot((obj_yf / abs_obj_yf), h_obj_yf) +\
                                               np.vdot((obj_xb / abs_obj_xb), h_obj_xb) +\
                                               np.vdot((obj_yb / abs_obj_yb), h_obj_yb)) 
            #B[2] += Brenorm * .5 * reg_TV_amplitude * np.real(np.vdot((h_obj_xf / abs_obj_xf), h_obj_xf.flat) +\
            #                                        np.vdot((h_obj_yf / abs_obj_yf).flat, h_obj_yf.flat) +\
            #                                        np.vdot((h_obj_xb / abs_obj_xb).flat, h_obj_xb.flat) +\
            #                                        np.vdot((h_obj_yb / abs_obj_yb).flat, h_obj_yb.flat))
            #B[2] -= Brenorm * .5 * reg_TV_amplitude * ( (np.real(obj_xf * h_obj_xf.conj() / abs_obj_xf)**2 / abs_obj_xf).sum() +\
            #                                  (np.real(obj_yf * h_obj_yf.conj() / abs_obj_yf)**2 / abs_obj_yf).sum() +\
            #                                  (np.real(obj_xb * h_obj_xb.conj() / abs_obj_xb)**2 / abs_obj_xb).sum() +\
            #                                  (np.real(obj_yb * h_obj_yb.conj() / abs_obj_yb)**2 / abs_obj_yb).sum() )

            #ph_obj_xf = np.exp(1j*np.angle(obj_xf))
            #ph_obj_xb = np.exp(1j*np.angle(obj_xb))
            #ph_obj_yf = np.exp(1j*np.angle(obj_yf))
            #ph_obj_yb = np.exp(1j*np.angle(obj_yb))
            #B[0] += Brenorm * reg_TV_amplitude * (abs_obj_xf.sum() + abs_obj_yf.sum() + abs_obj_xb.sum() + abs_obj_yb.sum())
            #B[1] += reg_TV_amplitude * np.real(np.vdot(ph_obj_xf.flat, h_obj_xf.flat) +\
            #                                   np.vdot(ph_obj_yf.flat, h_obj_yf.flat) +\
            #                                   np.vdot(ph_obj_xb.flat, h_obj_xb.flat) +\
            #                                   np.vdot(ph_obj_yb.flat, h_obj_yb.flat)) 
            #B[2] += .5 * reg_TV_amplitude * np.real(np.vdot((h_obj_xf / abs_obj_xf).flat, h_obj_xf.flat) +\
            #                                        np.vdot((h_obj_yf / abs_obj_yf).flat, h_obj_yf.flat) +\
            #                                        np.vdot((h_obj_xb / abs_obj_xb).flat, h_obj_xb.flat) +\
            #                                        np.vdot((h_obj_yb / abs_obj_yb).flat, h_obj_yb.flat))
            #B[2] -= reg_TV_amplitude * ( (np.real(ph_obj_xf * h_obj_xf.conj())**2 / abs_obj_xf).sum() +\
            #                                  (np.real(ph_obj_yf * h_obj_yf.conj())**2 / abs_obj_yf).sum() +\
            #                                  (np.real(ph_obj_xb * h_obj_xb.conj())**2 / abs_obj_xb).sum() +\
            #                                  (np.real(ph_obj_yb * h_obj_yb.conj())**2 / abs_obj_yb).sum() )

        if reg_Huber:
            # Huber regularization
            h_obj_xf = U.delxf(h_obj,axis=-2)
            h_obj_yf = U.delxf(h_obj,axis=-1)
            h_obj_xb = U.delxb(h_obj,axis=-2)
            h_obj_yb = U.delxb(h_obj,axis=-1)

            f_obj_xf = np.sqrt(reg_Huber_parameter + U.abs2(obj_xf))
            f_obj_yf = np.sqrt(reg_Huber_parameter + U.abs2(obj_yf))
            f_obj_xb = np.sqrt(reg_Huber_parameter + U.abs2(obj_xb))
            f_obj_yb = np.sqrt(reg_Huber_parameter + U.abs2(obj_yb))

            B[0] += Brenorm * reg_Huber_amplitude * (f_obj_xf.sum() + f_obj_yf.sum() + f_obj_xb.sum() + f_obj_yb.sum())
            B[1] += Brenorm * reg_Huber_amplitude * np.real(np.vdot((obj_xf / f_obj_xf).flat, h_obj_xf.flat) +\
                                               np.vdot((obj_yf / f_obj_yf).flat, h_obj_yf.flat) +\
                                               np.vdot((obj_xb / f_obj_xb).flat, h_obj_xb.flat) +\
                                               np.vdot((obj_yb / f_obj_yb).flat, h_obj_yb.flat)) 
            B[2] += Brenorm * .5 * reg_Huber_amplitude * np.real(np.vdot((h_obj_xf / f_obj_xf).flat, h_obj_xf.flat) +\
                                                    np.vdot((h_obj_yf / f_obj_yf).flat, h_obj_yf.flat) +\
                                                    np.vdot((h_obj_xb / f_obj_xb).flat, h_obj_xb.flat) +\
                                                    np.vdot((h_obj_yb / f_obj_yb).flat, h_obj_yb.flat))
            B[2] -= Brenorm * .5 * reg_Huber_amplitude * ( (np.real(obj_xf * h_obj_xf.conj() / f_obj_xf)**2 / f_obj_xf).sum() +\
                                              (np.real(obj_yf * h_obj_yf.conj() / f_obj_yf)**2 / f_obj_yf).sum() +\
                                              (np.real(obj_xb * h_obj_xb.conj() / f_obj_xb)**2 / f_obj_xb).sum() +\
                                              (np.real(obj_yb * h_obj_yb.conj() / f_obj_yb)**2 / f_obj_yb).sum() )

        if np.isinf(B).any() or np.isnan(B).any():
            print 'Warning! inf or nan found! Trying to continue...'
            B[np.isinf(B)] = 0.
            B[np.isnan(B)] = 0.
        coeff = B.tolist()
        coeff.reverse()
        coeff_p = coeff[:-1]*np.array([8, 7, 6, 5, 4, 3, 2, 1])         # Polynomial coefficient of the first derivative
        coeff_pp = coeff_p[:-1]*np.array([7, 6, 5, 4, 3, 2, 1])         # Second derivative

        if quad_approx:
            tmin = -coeff_p[-1] / coeff_p[-2]
        else:
            t0 = np.roots(coeff_p)                        # Roots of first derivative
            t0p = np.real(t0[t0.imag == 0])                       # Only real roots should be considered
            pos_curv = (np.polyval(coeff_pp, t0p) > 0)    # Points for which curvature is positive
            t0p = t0p[pos_curv]
            id_abs_min=np.abs(t0p).argmin()		# 2010-11-08: Pick the root with the smallest absolut value.
            tmin=t0p[id_abs_min]		        # 2010-11-08: Pick the root with the smallest absolut value.
	#####removed 2010-11-08
        #id = np.polyval(coeff, t0p).argmin()     # Minimum among positive curvature points.
        #tmin = t0p[id]
	#####        

        if reg_del2:
            verbose(1,'%d - L=%g - R=%g (%3.2g%%)' % (itcg, LL_list[-1], R_list[-1], 100.*R_list[-1]/(LL_list[-1]+R_list[-1])))
        else:
            verbose(1,'%d - %g' % (itcg, LL_list[-1]))

        # Move to this point
        obj += tmin*h_obj
        probe += tmin*h_probe
        if remove_incoherent_scattering:
            Qinc += tmin*h_Qinc
        verbose(3, 'Object displacement: %8.3e' % (tmin*np.real(U.norm(h_obj))))
        verbose(3, 'Probe displacement: %8.3e' % (tmin*np.real(U.norm(h_probe))))

#       Removed - could have worked but seems to interfere to strongly with the minimization process.
#       "average_probes" is instead used as a penalization term which only affect the computation of gradients. 
#   
#        if average_probes:
#            # Compute new average and find  alignment parameters
#            av_probe1 = np.zeros_like(probe[0])
#            #pr_r = []
#            #pr_alpha = []
#            for ipr in range(num_probes):
#                aprtmp,r,alpha = U.shift_best(av_probe,probe[ipr])
#                # Only phase shifts are unconstrained, so divide with abs(alpha)
#                av_probe1 += aprtmp / abs(alpha)
#                # Store inverse displacements and phase scaling
#                #pr_r.append(-r)
#                #pr_alpha.append(abs(alpha)/alpha)
#                print 'Probe # %d: displacement : %s \t factor : %f' % (ipr, str(-r), 1/abs(alpha))
#            av_probe = av_probe1/num_probes
#        
#            # Replace probes with new average, shifted back in place.
#            for ipr in range(num_probes):
#                probe[ipr] = U.shift_best(probe[ipr], av_probe)[0]
#                #probe[ipr] = pr_alpha[ipr] * U.pshift(av_probe, pr_r[ipr])
	
        # Apply periodic boundary conditions, added 2010-11-23, MD
        if p.use_periodic_boundary:
            verbose(2, 'Using periodic boundary conditions')
            left_stripe = obj[:,:,:asize[0]].copy();
            right_stripe = obj[:,:,-asize[0]:].copy();
            obj[:,:,-asize[0]:] = (right_stripe + left_stripe)/2
            obj[:,:,:asize[0]] = (right_stripe + left_stripe)/2

        if dump_object and (itcg % dump_object_interval == 0) and (not parallel or prank==0):
            io.h5write(dump_object_pattern % itcg, obj=obj)            
        if dump_probe and (itcg % dump_probe_interval == 0) and (not parallel or prank==0):
            io.h5write(dump_probe_pattern % itcg, probe=probe)            

        
#        if (p.doplot or p.dump_plot) and (not parallel or prank==0) and (itcg % p.plot_interval == 0):
#            nobj = U.rmphaseramp(obj[0], nrm_obj[0])
#            mean_nobj = (nobj*nrm_obj[0]).sum() / nrm_obj[0].sum()
#            angle_obj = np.angle(nobj / mean_nobj)
#            abs_obj = np.abs(nobj)
#            angle_obj_bounds = (angle_obj[plot_mask].min(), angle_obj[plot_mask].max())
#            abs_obj_bounds = (abs_obj[plot_mask].min(), abs_obj[plot_mask].max())
#            plot_axes[0].imshow(abs_obj,vmin=abs_obj_bounds[0], vmax=abs_obj_bounds[1])
#            plot_axes[1].imshow(angle_obj,vmin=angle_obj_bounds[0], vmax=angle_obj_bounds[1])
#            plot_axes[2].imshow(U.imsave(probe[0,0]))
#            plot_axes[3].semilogy(LL_list)
#            if p.doplot:
#                plot_fig.canvas.resize_event()
#                plot_fig.canvas.draw()
#                time.sleep(.5)
#            if p.dump_plot:
#                try:
#                    dump_plot_file = p.dump_plot_patt % dump_plot_counter
#                except TypeError:
#                    dump_plot_file = p.dump_plot_patt
#                ispdf = (os.path.splitext(dump_plot_file)[-1].lower() == '.pdf')
#                if ispdf:
#                    plot_fig.savefig(dump_plot_file, dpi=600)
#                else:
#                    plot_fig.savefig(dump_plot_file)
#                dump_plot_counter += 1

        if (p.doplot or p.dump_plot) and (not parallel or prank==0) and (itcg % p.plot_interval == 0):
            plot_fig.plot(p)
            if p.doplot:
                plot_fig.draw()
            if p.dump_plot:
                try:
                    dump_plot_file = p.dump_plot_patt % dump_plot_counter
                except TypeError:
                    dump_plot_file = p.dump_plot_patt
                ispdf = (os.path.splitext(dump_plot_file)[-1].lower() == '.pdf')
                if ispdf:
                    plot_fig.savefig(dump_plot_file, dpi=600)
                else:
                    plot_fig.savefig(dump_plot_file)
                dump_plot_counter += 1
        
        for f in hooks['loop_ML']:
            verbose(3, 'Calling loop-ML hooks')
            f(locals(), globals())
              
        if debug_subpix_disp and subpix_disp_started: 
            # pos_estimates = [(sp_pr[0] + d[0], sp_pr[1] + d[1]) for sp_pr, d in izip(subpix_shift_probe, displacements)]
            one = sum([(-est_disp[0] + sp_disp[0]) ** 2 for sp_disp, est_disp in izip(real_subpix_displacements, subpix_shift_linear)]) / Ndata
            two = sum([(-est_disp[1] + sp_disp[1]) ** 2 for sp_disp, est_disp in izip(real_subpix_displacements, subpix_shift_linear)]) / Ndata
            three = (sum([-est_disp[0] + sp_disp[0] for sp_disp, est_disp in izip(real_subpix_displacements, subpix_shift_linear)]) / Ndata) ** 2
            four = (sum([-est_disp[1] + sp_disp[1] for sp_disp, est_disp in izip(real_subpix_displacements, subpix_shift_linear)]) / Ndata) ** 2
            del_r = one + two - three - four
            delta_r.append(del_r)
            verbose(3, '------------------------------------------------------------------------------------------------------------------------------------')
            #verbose(3, 'sum[(x-xhut)^2] = %f' % one)
            #verbose(3, 'sum[(y-yhut)^2] = %f' % two)
            #verbose(3, 'sum[(x-xhut)]^2 = %f' % three)
            #verbose(3, 'sum[(y-yhut)]^2 = %f' % four)
            verbose(3, 'Delta_r = %f' % del_r)
            verbose(3, '------------------------------------------------------------------------------------------------------------------------------------')
    
                
    ################
    ## -21- END MAIN LOOP
    ################------------------------------------------------------------------------------')
    
    verbose(3, '===============================FINAL DISPLACEMENTS==================================')
    map(lambda i, d: verbose(1, 'N=%d: (%.3f,%.3f)' % (i, d[0], d[1])), range(Ndata), subpix_shift_linear)
    verbose(3, '===============================FINAL DISPLACEMENTS==================================')
    
    ################
    ## -22- write delta_r to file
    ################    
    if subpix_disp:
        p.delta_r = delta_r

    for f in hooks['post_ML']:
        verbose(3, 'Calling post-ML hooks')
        f(locals(),globals())
	
    if parallel:
        header = 'Process # %d ' % prank
    else:
        header = ''
    verbose(1, header + 'Finished', mpi=True)
    

    p.obj = obj
    p.probe = probe
    p.LL_list = LL_list
    if subpix_disp:
        if parallel:
            alldisp = comm.gather(subpix_shift_linear, root=0)
            if prank == 0:
                subpix_shift_linear = []
                for dsp in zip(*alldisp):
                    gooddsp = [x for x in dsp if x is not None][0]
                    subpix_shift_linear.append(gooddsp)
        p.subpix_shift_linear = subpix_shift_linear
    if hasattr(verbose, 'get_log'):
        p.verbose_log = verbose.get_log()
    p.history.append(('ML', numit, LL_list))
    #p.probe_view = probe_view
    #p.obj_view = obj_view

    if remove_incoherent_scattering:
        p.Qinc = Qinc
        p.Iinc = Iinc
        p.Qinc_support = Qinc_support

    # Save reconstruction    
    if p.save and (not parallel or prank==0):
        #filename = p.run_name + '_ML.h5'
        #save_run(filename, p.paramdict)
        save_run(save_file, p.paramdict)
        verbose(2, 'Saved reconstruction in file %s' % save_file)

    if p.MPI_timing:
        fname = os.path.abspath(os.path.curdir) + '/Timing_proc_%03d.h5' % prank
        io.h5write(fname, timing=comm.timing)

    # Last plot (blocking!)
    if p.last_plot and (not parallel or prank==0):
        pyplot.interactive(False)
        plot_axes[0].imshow(np.abs(obj[0]))
        plot_axes[1].imshow(np.angle(obj[0]))
        plot_axes[2].imshow(U.imsave(probe[0]))
        plot_axes[3].plot(LL_list)
        pyplot.show()

    return p.paramdict

class Regul(object):
    """\
    Base class for regularizer.
    """
    def __init__(self,axes):
        self.g = None
        self.coeffs = None
        self.axes = axes
        
    def gradient(self,x,**kwargs):
        raise NotImplementedError
        
    def poly_line_coeffs(self,h,**kwargs):
        raise NotImplementedError
        
        
class Regul_del2(Regul):
    """\
    Regularizer - Gaussian prior
    """
    def __init__(self, amplitude, axes=[-2,-1]):
        Regul.__init__(self, axes)
        self.amplitude = amplitude
        self.delxy = None        
        
    def gradient(self, x):
        ax0,ax1 = self.axes
        del_xf = U.delxf(x,axis=ax0)
        del_yf = U.delxf(x,axis=ax1)
        del_xb = U.delxb(x,axis=ax0)
        del_yb = U.delxb(x,axis=ax1)

        self.delxy = [del_xf, del_yf, del_xb, del_yb]
        self.g = 2 * self.amplitude*(del_xb + del_yb - del_xf - del_yf)

        return self.g
        
    def poly_line_coeffs(self,h,x=None):
        ax0,ax1 = self.axes
        if x is None:
            del_xf,del_yf,del_xb,del_yb = self.delxy
        else:
            del_xf = U.delxf(x,axis=ax0)
            del_yf = U.delxf(x,axis=ax1)
            del_xb = U.delxb(x,axis=ax0)
            del_yb = U.delxb(x,axis=ax1)
            
        hdel_xf = U.delxf(h,axis=ax0)
        hdel_yf = U.delxf(h,axis=ax1)
        hdel_xb = U.delxb(h,axis=ax0)
        hdel_yb = U.delxb(h,axis=ax1)
        
        c0 = self.amplitude * (U.norm2(del_xf) + U.norm2(del_yf) + U.norm2(del_xb) + U.norm2(del_yb))
        c1 = 2 * self.amplitude * np.real(np.vdot(del_xf, hdel_xf) + np.vdot(del_yf, hdel_yf) +\
                                          np.vdot(del_xb, hdel_xb) + np.vdot(del_yb, hdel_yb))
        c2 = self.amplitude * (U.norm2(hdel_xf) + U.norm2(hdel_yf) + U.norm2(hdel_xb) + U.norm2(hdel_yb))
        
        self.coeff = [c0,c1,c2]
        return self.coeff


class Regul_TV(Regul):
    """\
    Regularizer - Exponential prior (Total variation)
    """
    def __init__(self, amplitude, axes=[-2,-1]):
        Regul.__init__(self, axes)
        self.amplitude = amplitude
        self.delxy = None        
        self.abs_delxy = None        
        
    def gradient(self, x):
        ax0,ax1 = self.axes
        del_xf = U.delxf(x,axis=ax0)
        del_yf = U.delxf(x,axis=ax1)
        del_xb = U.delxb(x,axis=ax0)
        del_yb = U.delxb(x,axis=ax1)

        epsilon = 1e-10
        abs_del_xb = np.abs(del_xb)
        abs_del_yb = np.abs(del_yb)
        abs_del_xf = np.abs(del_xf)
        abs_del_yf = np.abs(del_yf)

        abs_del_xb[abs_del_xb < epsilon] = epsilon
        abs_del_yb[abs_del_yb < epsilon] = epsilon
        abs_del_xf[abs_del_xf < epsilon] = epsilon
        abs_del_yf[abs_del_yf < epsilon] = epsilon

        self.delxy = [del_xf, del_yf, del_xb, del_yb]
        self.abs_delxy = [abs_del_xf, abs_del_yf, abs_del_xb, abs_del_yb]

        self.g = self.amplitude * (del_xb/abs_del_xb + del_yb/abs_del_yb - del_xf/abs_del_xf - del_yf/abs_del_yf)

        return self.g
        
    def poly_line_coeffs(self,h,x=None):
        ax0,ax1 = self.axes
        if x is None:
            del_xf,del_yf,del_xb,del_yb = self.delxy
            abs_del_xf,abs_del_yf,abs_del_xb,abs_del_yb = abs_self.delxy
        else:
            del_xf = U.delxf(x,axis=ax0)
            del_yf = U.delxf(x,axis=ax1)
            del_xb = U.delxb(x,axis=ax0)
            del_yb = U.delxb(x,axis=ax1)

            epsilon = 1e-10
            abs_del_xb = np.abs(del_xb)
            abs_del_yb = np.abs(del_yb)
            abs_del_xf = np.abs(del_xf)
            abs_del_yf = np.abs(del_yf)
    
            abs_del_xb[abs_del_xb < epsilon] = epsilon
            abs_del_yb[abs_del_yb < epsilon] = epsilon
            abs_del_xf[abs_del_xf < epsilon] = epsilon
            abs_del_yf[abs_del_yf < epsilon] = epsilon
            
        hdel_xf = U.delxf(h,axis=ax0)
        hdel_yf = U.delxf(h,axis=ax1)
        hdel_xb = U.delxb(h,axis=ax0)
        hdel_yb = U.delxb(h,axis=ax1)
        
        c0 = self.amplitude * (abs_del_xf.sum() + abs_del_yf.sum() + abs_del_xb.sum() + abs_del_yb.sum())
        c1 = self.amplitude * np.real(np.vdot((del_xf / abs_del_xf), h_del_xf) +\
                                      np.vdot((del_yf / abs_del_yf), h_del_yf) +\
                                      np.vdot((del_xb / abs_del_xb), h_del_xb) +\
                                      np.vdot((del_yb / abs_del_yb), h_del_yb)) 
        self.coeff = [c0,c1,0.]
        return self.coeff    
        


def ortho(modes):
    """\
    Orthogonalize the given list of modes.
    """
    N = len(modes)
    A = np.array([[np.vdot(p2,p1) for p1 in modes] for p2 in modes])
    e,v = np.linalg.eig(A)
    ei = (-e).argsort()
    nplist = [sum(modes[i] * v[i,j] for i in range(N)) for j in ei]
    amp = np.array([U.norm2(npi) for npi in nplist])
    amp /= amp.sum()
    return amp, nplist
 
def save_run(filename, p):
    """\
    Save the complete parameter dictionary including the reconstruction results.
    """
    io.h5write(filename, **p)
    
def load_run(filename):
    """\
    Return the complete dictionary from a previous run.
    """
    p = io.h5read(filename) 
    
    # update parallel info
    p['parallel'] = parallel
    p['psize'] = psize
    p['prank'] = prank 

    # Recompute how to divide the data with current MPI configuration      
    return _prepare_datainfo(p)
    

def load_intens(p=None, **kwargs):
    """\
    Load intensities using parameters in p.
    this function returns I,w
    where:
      I is the list of intensities
      w is the list of weight (according to Poisson statistics)
    """
    if type(p) == dict:
        p = U.Param(p)
    return _load_intens_or_fmag(p.datainfo, p.Ndata, p.Nscan, p.ctr, is_intens=True, flip=p.flip_data, intens_renorm=p.intens_renorm)

def load_fmag(p=None, **kwargs):
    """\
    Load Fourier magnitudes using parameters in p.
    this function returns fmag,fmask
    where:
      fmag is the list of Fourier magnitudes
      fmask is the list of masks
    """
    if type(p) == dict:
        p = U.Param(p)

    return _load_intens_or_fmag(p.datainfo, p.Ndata, p.Nscan, p.ctr, is_intens=False, flip=p.flip_data, intens_renorm=p.intens_renorm)

def _matlab_transpose(a):
    """\
    Helper function to take care of matlab's array ordering format.
    """
    if a.ndim == 3:
        #aout = a.transpose((2,1,0)).swapaxes(1,2)
        aout = a.transpose((2,0,1))
    elif a.ndim == 2:
        #aout = a.swapaxes(0,1)
        aout = a
    else:
        aout = a
    return aout

def _load_data(f, *args):
    """\
    Helper function that tries to guess the file type.
    """
    f = os.path.abspath(os.path.expanduser(f))
    root,ext = os.path.splitext(f)
    if ext == '.mat':
        d = io.loadmat(f, *args)
    elif ext in ['.h5', '.hd5']:
        d = io.h5read(f, *args)
    else:
        raise RuntimeError('File : %s\nType not known.' % f)
    verbose(2, 'Loaded file ' + f + ' successfully.')
    return d

load_data = _load_data
    
def _load_intens_or_fmag(datainfo, Npts, Nscan, ctr, is_intens=True, flip=True, intens_renorm=None):
    """\
    Helper function to load data.
    
    Depending on the value of is_intens, this returns either the intensities and the weights
    according to Poisson statistics (I,w) or the Fourier magnitudes and the mask (fmag,fmask).
    The third element returned is the maximum intensity among all loaded files.
    """
    def flipper(A):
        if flip=='lr':
            return np.fliplr(A)
        elif flip=='ud':
            return np.flipud(A)
        else:
            # backward compatibility where flip means "flip all axis"
            return np.flipud(np.fliplr(A))
            
    if is_intens:
        I = Npts*[None,]
        w = Npts*[None,]
    else:
        fmag = Npts*[None,]
        fmask = Npts*[None,]
    num_not_None = 0
    # Loop through data files and indices.
    
    for datafile, indexlist in datainfo.items():
        
        # indexlist can be None for MPI runs (meaning that all data 
        # in datafile is read by another process)
        if not indexlist:
            pass

        root,ext = os.path.splitext(datafile)
        is_matlab = (ext == '.mat')

        # Load file
        d = _load_data(datafile)
        fm = d['fmask'].astype(FType)

        # Files saved with matlab are organized differently
        if is_matlab:
            verbose(3,'Data file is matlab file.')
            #data = d['data'].transpose((2,1,0)).swapaxes(1,2).copy()
            #if fm.ndim == 3:
            #    fm = fm.transpose((2,1,0)).swapaxes(1,2)
            #else:
            #    fm = fm.swapaxes(0,1)
            data = _matlab_transpose(d['data'].copy())
            fm = _matlab_transpose(fm)
        else:
            data = d['data']
        del d

        irenorm = 1.
        frenorm = 1.
        if intens_renorm is not None:
            irenorm = intens_renorm[datafile]
            frenorm = np.sqrt(intens_renorm[datafile])
        
        datafile
        
        if fm.ndim == 2:
            if ctr is not None:
                fm = U.pshift(fm, ctr-1)
            if flip:
                fm = flipper(fm)

        # Put each diffraction pattern at its place
        for j,k in indexlist:
            if fm.ndim == 3:
                if ctr is not None:
                    fmask_k = U.pshift(fm[j],ctr-1)
                else:
                    fmask_k = fm[j]
                if flip:
                    fmask_k = flipper(fmask_k)
            else:
                fmask_k = fm
            if ctr is not None:
                intens = U.pshift(data[j],ctr-1)
            else:
                intens = data[j]
            if flip:
                intens = fmask_k * flipper(intens)
            else:
                intens *= fmask_k
            if is_intens:
                I[k] = irenorm * intens.astype(FType)
                w[k] = (fmask_k / (irenorm * I[k] + 1)).astype(FType)
            else:
                fmag[k] = frenorm * np.real(np.sqrt(intens)).astype(FType)
                fmask[k] = fmask_k.astype(FType)
            num_not_None += 1


    if parallel:
        if num_not_None == 1:
            verbose(2, 'Process #%d: 1 dataset to manage.' % prank, mpi=True)
        else:
            verbose(2, 'Process #%d: %d datasets to manage.' % (prank, num_not_None), mpi=True)

    if is_intens:
        return I, w
    else:
        return fmag, fmask



def _prepare_datainfo(p):
    """\
    Helper to divide the data to load between processes.
    """
    was_dict = False
    if type(p) == dict:
        was_dict = True
        p = U.Param(p, store=True)
    
    Ndata = p.get('Ndata')
    Ndata_scan = p.get('Ndata_scan')
    # This for backward compatibility
    if Ndata is None:
        Ndata = p.numpts_scan * p.Nscan
        p.Ndata = Ndata
    if Ndata_scan is None:
        Ndata_scan = p.numpts_scan
        p.Ndata_scan = Ndata_scan

    # Divide the data to read between processes
    if parallel:
        p_Ndata = Ndata // psize + (Ndata % psize > prank)
        p_num0 = prank * (Ndata // psize) + min(Ndata % psize, prank)
        p_num1 = p_num0 + p_Ndata
    else:
        p_num0 = 0
        p_num1 = Ndata
    
    # Loop through files and store only those relevant to the current process
    p_num = 0
    datainfo = {}
    for k,dfile in enumerate(p.datafile_list):
        indexlist = []
        if p.flat_object and k == p.num_objects - 1:
            Ns = p.Ndata_flat
        else:
            Ns = Ndata_scan
        for j in range(Ns):
            if (p_num >= p_num0) and (p_num < p_num1):
                indexlist.append((j,p_num))
            p_num += 1
        datainfo[str(dfile)] = indexlist
        
    p.datainfo = datainfo

    if was_dict:
        return p.paramdict
    else:
        return p
    
def plotLL(LLarr, itcg, Ndata):
    xaxis = np.arange(0, Ndata)
    fig1 = plt.figure(figsize=(20, 6))
    plt.plot(xaxis[0:100], LLarr[0:100], 'r*')
    plt.xlim(-1, 100)
    plt.savefig('LL0_' + str(itcg) + '.png', dpi=(100))   
    plt.clf()                                   
    plt.close()                          
    fig2 = plt.figure(figsize=(20, 6))
    plt.plot(xaxis[101:Ndata - 1], LLarr[101:Ndata - 1], 'r*')
    plt.xlim(100, Ndata)
    plt.savefig('LL1_' + str(itcg) + '.png', dpi=(100)) 
    plt.clf()                                   
    plt.close()

