# -*- coding: utf-8 -*-
"""\
Main ptycho module. Might be broken in pieces soon as the file is getting long.

Author: Pierre Thibault
Started July 5th 2010
"""

__all__ = ['useMPI', 'FType', 'CType', 'default_type', 'prepare_params',
           'print_summary', 'ptycho_DM', 'ptycho_ML', 'ptycho_ePIE', 'save_run', 'load_run',
           'load_intens', 'load_fmag', 'verbose', 'Plotter', 'load_data',
           'raster_scan_positions', 'round_scan_positions',
           'round_scan_ROI_positions', 'default_parameters',
           'prepare_data', 'ortho', 'hooks']

from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
from numpy import linalg as LA

DEBUG = False
USE_FFTPACK = True
import csv
import numpy as np
#from numpy import fft as FFT
from pyE17 import utils as U
from pyE17 import prop
from pyE17 import io
from pyE17 import wave


io.h5options['UNSUPPORTED'] = 'ignore' # otherwise saving e.g. a function will fail. 
from pyE17 import verbose

#if USE_FFTPACK:
import scipy

from scipy import fftpack as FFT

if [int(x) for x in scipy.__version__.split('.')] < [0, 8]:
    print('Warning: scipy.fftpack version (< 0.8) does not support single-precision ffts.')
    
import time
import sys
import os
import os.path
import re
from data_tools import prepare_data
from itertools import izip

import scipy.ndimage
def gaussian_filter(x, sigma):
    return scipy.ndimage.gaussian_filter(x.real, sigma) + 1j * scipy.ndimage.gaussian_filter(x.imag, sigma)

if DEBUG:
    Debug = U.verbosemod.Verbose()
    Debug.mpi_action = True
    debug = lambda msg: Debug(1, msg)
else:
    debug = lambda msg: None

# A timing wrapper class.

def _load_default_parameters():
    """\
    Load default reconstruction parameters, looking for the file 'ptycho.default', starting
    from the current working directory, down to the root of the filesystem. Hard-coded
    parameters (below) are used if no file is found. 
    """
    ptycho_default_filename = 'ptycho.default'
    d = os.path.abspath(os.path.curdir)
    p = None
    while True:
        param_filename = os.path.join(d, ptycho_default_filename)
        try:
            p = U.Param(param_filename)
            verbose(2, 'Loaded default parameter file "%s".' % param_filename)
            break
        except:
            d, t = os.path.split(d)
            if not t:
                break 
    if p is None:
        p = U.Param()
        p.update(dict(
            clip_object=False,
            clip_min=.5,
            clip_max=1.,
            dump_data=False,
            dump_interval=10,
            dump_pattern='./%(run_name)s_dump.hd5',
            plot_fig=None,
            plot_interval=10,
            average_start=50,
            average_interval=5,
            probe_change_start=3,
            pbound=None,
            quad_interval=5,
            scale_precond=True,
            data_type='double',
            #initial_probe = None,
            initial_object_type='ones',
            save=True,
            save_dir_patt='./',
            run_label='0',
            last_plot=False,
            ctr=None,
            flip_data=True,
            use_periodic_boundary=False,
            average_probes=False,
            average_probe_amp=1e-2,
            dump_plot=False,
            dump_plot_patt='dump_plot_%04d.png',
            flat_object=False,
            flat_scan=None,
            flat_object_weight=1.,
            dp_shift=False,
            probe_support=False,
            probe_support_type='disc',
            probe_support_area=.5,
            probe_support_start=0,
            probe_before_object=False,
            reg_del2=False,
            reg_del2_amplitude=1e3,
            reg_TV=False,
            reg_TV_amplitude=1e3,
            reg_Huber=False,
            reg_Huber_amplitude=1e3,
            reg_Huber_parameter=1e-6,
            Ndp=1,
            Nmodes_probe=1,
            Nmodes_object=1,
            remove_incoherent_scattering=False,
            incoherent_scattering_mask_count=50,
            proportional_probes=False,
            nearfield=False,
            ePIE_regul_probe=0.,
            ePIE_regul_object=0.,
            ePIE_alpha=1.,
            ePIE_beta=1.,
            intens_renorm=None,
            object_smooth_gradient=0.,
            scale_probe_object=1.,
            dump_object=False,
            dump_object_interval=1,
            dump_probe=False,
            dump_probe_interval=1,
            ML_type='Gauss',
            float_intens=False,
            fourier_relax_factor=.5,
            scan_roi=None,
            probe_change_start_ML=0,
            plot_mask=None,
            MPI_timing=False,
            LL_in_DM=False,
            subpix=False,
            subpix_method='fourier',
            subpix_start=0,
            DM_smooth_amplitude=None,
            DM_smooth_std=5.
            ))

    p.update()
    return p

# Parallel computation
global comm

try:
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    psize = comm.Get_size()
    prank = comm.Get_rank()
except:
    verbose(2, 'MPI initialization failed. Proceeding with one processor')
    MPI = None
    psize = 1
    prank = 0
    comm = None

global parallel
parallel = not (psize == 1)

def useMPI(do=None):    
    """\
    Toggle using MPI or not.
    
    """
    global parallel
    if do is None:
        return parallel
    if MPI is None:
        parallel = False
    else:
        parallel = do

default_parameters = _load_default_parameters()

# Default single/double precision
global FType, CType
FType = None
CType = None
def default_type(datatype):
    """\
    default_type('single')
    or
    default_type('double')
    
    Select the floating point precision for reconstructions.
    """
    if datatype not in ['single', 'double']:
        raise RuntimeError("Expected 'single' or 'double'.")
    global FType, CType
    FType = np.dtype('f' + str(np.dtype(np.typeDict[datatype]).itemsize)).type
    CType = np.dtype('c' + str(2 * np.dtype(np.typeDict[datatype]).itemsize)).type
    verbose(3, 'Default data type is %s' % datatype)

default_type(default_parameters.data_type)

def raster_scan_positions(nx, ny, sx, sy):
    iix, iiy = np.indices((nx + 1, ny + 1))
    positions = [(sx * i, sy * j) for i, j in zip(iix.ravel(), iiy.ravel())]
    return positions

def round_scan_positions(r_in, r_out, nr, nth):
    """\
    Round scan positions, defined as in spec and matlab.
    """
    dr = (r_out - r_in) / nr
    positions = []
    for ir in range(1, nr + 2):
        rr = r_in + ir * dr
        dth = 2 * np.pi / (nth * ir)
        positions.extend([(rr * np.sin(ith * dth), rr * np.cos(ith * dth)) for ith in range(nth * ir)])
    return positions

def round_scan_ROI_positions(dr, lx, ly, nth):
    """\
    Round scan positions with ROI, defined as in spec and matlab.
    """
    rmax = np.sqrt((lx / 2) ** 2 + (ly / 2) ** 2)
    nr = np.floor(rmax / dr) + 1
    positions = []
    for ir in range(1, int(nr + 2)):
        rr = ir * dr
        dth = 2 * np.pi / (nth * ir)
        th = 2 * np.pi * np.arange(nth * ir) / (nth * ir)
        x1 = rr * np.sin(th)
        x2 = rr * np.cos(th)
        positions.extend([(xx1, xx2) for xx1, xx2 in zip(x1, x2) if (np.abs(xx1) <= ly / 2) and (np.abs(xx2) <= lx / 2)])
    return positions

# -A- Shifting functions for subpixel precision in probe positions
def subpix_fourier(a, ramp, forward=True):
    """\
    Perform a shift with the provided ramp.
    """
    if forward:
        return np.fft.ifftn(np.fft.fftn(a) * ramp)
    else:
        return np.fft.ifftn(np.fft.fftn(a) * ramp.conj())

# -B- subpix linear
def subpix_linear(a, pts, forward=True):
    """\
    Perform a subpixel shift using bilinear interpolation.
    """
    x0, x1 = pts
    if not forward: x0, x1 = -x0, -x1
    a00 = U.pshift(a, [np.floor(x0), np.floor(x1)], 'nearest')
    a01 = U.pshift(a, [np.floor(x0), np.ceil(x1)], 'nearest')
    a10 = U.pshift(a, [np.ceil(x0), np.floor(x1)], 'nearest')
    a11 = U.pshift(a, [np.ceil(x0), np.ceil(x1)], 'nearest')
    x0 = x0 % 1
    x1 = x1 % 1
    return a00 * (1 - x0) * (1 - x1) + a01 * (1 - x0) * x1 + a10 * x0 * (1 - x1) + a11 * x0 * x1

# List of hooks that can be called at various points in the reconstructions
hooks = dict(
pre_DM=[],
loop_DM=[],
obj_inner_loop_DM=[],
post_DM=[],
pre_ML=[],
loop_ML=[],
post_ML=[],
pre_ePIE=[],
loop_ePIE=[],
post_ePIE=[]
)

   
def prepare_params(pdict, **kwargs):
    """\
    Preparation routine for ptychographic reconstructions of any flavor.
    
    The routine draws the initial parameters from three possible sources, in
    this order:
        1) the key=value pairs in the argument
        2) pdict, the provided dictionnary
        3) default_parameters, a dictionnary containing some (but not all) default
           parameters.
    Two intended uses of this routine are:
        a) For script initialization. For convenience, or readability, important
           parameters can be defined in a script, at the end of which a call to
           prepare_params(globals()) will take care of extracting all necessary parameters
           and derive the missing parts.
        b) To update an already existing parameter dictionnary.
    """

    # Npos: number of positions in a scan
    # Nscan: number of scans
    # Npts : total number of points to iterate over
    # Npts_scan : number of points to iterate over in a single scan
    # Npos_flat : number of positions in a flat scan
    # Npts_flat : number of points to iterate over in the flat scan
    # Nmodes_probe : number of probe modes
    # Nmodes_object : number of object modes
    # Nmodes : total number of extra modes
    # Ndp : number of diffraction patterns per scan point.
    # Ndata : total number of diffraction patterns
    # Ndata_scan : number of diffraction patterns in a scan
    # Ndata_flat : number of diffraction patterns in the flat scan
    # 
    # Nmodes = Nmodes_probe * Nmodes_object
    # Npts_scan = Ndp * Nmodes * Npos
    # Npts_flat = Ndp * Nmodes * Npos_flat
    # Ndata = Ndp * Npos * Nscan
    # Ndata_scan = Ndp * Npos
    # Ndata_flat = Ndp * Npos_flat
    # Npts = Nscan * Npts_scan + Npts_flat
    #      = Ndp * Nmodes * (Nscan*Npos + Npos_flat)

    p = U.Param(kwargs, pdict, default_parameters)

    verbose.set_level(p.verbose_level)

    verbose(3, 'Entering "prepare_params"...')

    # Set data_type
    default_type(p.data_type)
    verbose(3, '%-25s%15s' % ('Data type:', p.data_type))

    # Prefix, suffix, run_name
    p.asize = np.asarray(p.asize)
    asize = p.asize
    suffix = '_%03dx%03d_%s' % (tuple(asize) + (p.run_label,))
    scans = p.scans
    Nscan = len(scans)
    verbose(3, '%-25s%15d' % ('Number of scans:', Nscan))

    prefix = scans[0]
    if Nscan > 1:
        prefix += '_' + scans[-1]
    run_name = prefix + suffix
    verbose(3, '%-25s%25s' % ('Run name:', run_name))

    p.suffix = suffix
    p.Nscan = Nscan
    p.prefix = prefix
    p.run_name = run_name
    if p.save:
        p.save_file = (p.save_dir_patt % {'write_dir':prefix}) + p.run_name + '_%s.h5'
        verbose(3, '%-25s%25s' % ('File will be saved to:', p.save_file))
    else:
        verbose(2, 'No file will be saved at the end of this run.')

    if p.dp_shift is not None and p.Nscan == 1:
        verbose(2, 'Only one scan is being used in this reconstruction. dp_shift will be reset to False')
        p.dp_shift = False

    # Wavelength
    lam = 1.2398e-9 / p.energy

    # Near/far field
    nearfield = p.nearfield
    if nearfield:
        dx_spec = p.ds    # resolution in the specimen plane = pixel size.
        verbose(3, '%-25s%f x %f (nanometers)' % ('Resolution:', 1e9 * dx_spec[0], 1e9 * dx_spec[1]))
    else:
        dx_spec = lam * p.z / (asize * p.ds)  # resolution in the specimen plane
        verbose(3, '%-25s%f x %f (nanometers)' % ('Resolution:', 1e9 * dx_spec[0], 1e9 * dx_spec[1]))

    p.lam = lam
    p.dx_spec = dx_spec
    verbose(3, '%-25s%f x %f (meters)' % ('Resolution:', p.dx_spec[0], p.dx_spec[1]))

    # Prepare scan positions
    scan_type = p.scan_type
    verbose(3, '%-25s%25s' % ('Scan type:', p.scan_type))
    if scan_type == 'raster':
        #raise RuntimeError('Raster needs to be implemented')
        pos = np.array(raster_scan_positions(p.nx, p.ny, p.step_size_x, p.step_size_y))
    elif scan_type == 'round':
        pos = np.array(round_scan_positions(p.radius_in, p.radius_out, p.nr, p.nth))
    elif scan_type == 'round_roi':
        pos = np.array(round_scan_ROI_positions(p.dr, p.lx, p.ly, p.nth))
    elif scan_type == 'custom':
        pos = np.asarray(p.positions)
    else:
        raise RuntimeError('Unknown scan type : %s' % str(scan_type))

    # Here apply roi constraint and keep track of the position indices
    # pos_in_roi = ... bool
    # data_pos_index = ...
    # Npos_no_roi = len(pos)
    # pos = pos[pos_in_roi]

    Npos = len(pos)
    pos_mindist = dx_spec
    if Npos > 1:
        pos_mindist = min([U.norm(ppos - pos[0]) for ppos in pos[1:]]) 
    pos_subpix_portion = (pos - pos.min(axis=0)) / dx_spec
    pos = np.round(pos_subpix_portion)
    pos_dev = pos_subpix_portion - pos           # Sub-pixel correction
    
    p.pos_subpix_portion = pos_subpix_portion

    Nmodes_probe = p.Nmodes_probe
    Nmodes_object = p.Nmodes_object
    Nmodes = Nmodes_probe * Nmodes_object
    p.Nmodes = Nmodes
    Ndp = p.Ndp
    #if Nmodes != 1:
    #    raise RuntimeError('Coherent modes not yet implemented!')
    if Ndp != 1:
        raise RuntimeError('Multiple diffration patterns per point (multiple exposures) not yet implemented!')

    Npts_scan = Nmodes * Ndp * Npos
    Npts = Nscan * Npts_scan
    Ndata_scan = Ndp * Npos
    Ndata = Nscan * Ndata_scan
    p.Npts_scan = Npts_scan
    p.Npts = Npts
    p.Npos = Npos
    p.Ndata = Ndata
    p.Ndata_scan = Ndata_scan
    verbose(3, '%-25s%d' % ('Number of points per scan: ', p.Npos))
    verbose(3, '%-25s%d' % ('Total number of points: ', p.Npts))


    if p.ctr is None:
        p.ctr = p.asize // 2
    else:
        p.ctr = np.asarray(p.ctr)
    verbose(3, '%-25s%25s' % ('Diffraction pattern center: ', str(tuple(p.ctr))))

    # Scan pattern
    scan_pattern = p.scan_pattern
    if scan_pattern is None:
        pr_indices = p.scan_pattern_probe_indices
        ob_indices = p.scan_pattern_object_indices
    else:
        pr_interval = scan_pattern[0] or 2 ** 15
        ob_interval = scan_pattern[1] or 2 ** 15
        pr_indices = np.arange(Nscan) // pr_interval
        ob_indices = np.arange(Nscan) // ob_interval

    subpix = p.subpix
    if subpix:
        verbose(2, 'Using subpixel precision (after %dth iteration) for probe positions (interpollation method "%s").' % (p.subpix_start, p.subpix_method))

    # List of data files to load and positions
    datafile_list = []
    scan_info = {}
    positions = []
    for i, scan in enumerate(p.scans):
        pathdir = p.pathdir_patt % {'scan':scan}
        datafile_name = p.datafile_patt % {'path':pathdir, 'scan':scan, 'a0':asize[0], 'a1':asize[1]}
        datafile_list.append(datafile_name)
        pri = pr_indices[i]
        obi = ob_indices[i]           
        for j in range(Npos):
            positions.append(tuple(pos[j]) + (pri, obi) + tuple(pos_dev[j]))
        scan_info[str(scan)] = [datafile_name, len(positions) - Npos, len(positions)]
    p.datafile_list = datafile_list
    p.scan_info = scan_info

    if len(p.datafile_list) == 1:
        verbose(3, '%-25s%s' % ('Data file:', p.datafile_list[0]))
    else:
        verbose(3, '%s\n%s\n...\n%s' % ('Data files:', p.datafile_list[0], p.datafile_list[-1]))

    p.num_probes = np.max(pr_indices) + 1
    p.num_objects = np.max(ob_indices) + 1

    verbose(3, '%-25s%d' % ('Number of independent probes:', p.num_probes))
    verbose(3, '%-25s%d' % ('Number of independent objects:', p.num_objects))

    if p.flat_object:
        verbose(2, 'Flat object will be enforced')
        if p.num_probes > 1:
            verbose(1, 'Warning: more than one probe will be reconstructed, but flat_object couples with #0 only')

        # Add the relevant scan file
        scan = p.flat_scan
        pathdir = p.pathdir_patt % {'scan':scan}
        flat_scan_file = p.datafile_patt % {'path':pathdir, 'scan':scan, 'a0':asize[0], 'a1':asize[1]}
        verbose(3, 'Will load file %s' % flat_scan_file)

        # Figure out how many diffraction patterns there are in the flat scan
        if (not parallel) or prank == 0:
            # Load file
            root, ext = os.path.splitext(flat_scan_file)
            is_matlab = (ext == '.mat')
            d = _load_data(flat_scan_file)
            if is_matlab:
                data = _matlab_transpose(d['data'])
            else:
                data = d['data']
            del d
            Npos_flat = data.shape[0]
            verbose(3, 'Flat scan file %s loaded successfully.' % flat_scan_file)
            verbose(3, '%d diffraction patterns in the flat scan file.' % Npos_flat)
            del data
        else:
            Npos_flat = 0


        if parallel:
            Npos_flat = comm.bcast(np.array(Npos_flat), root=0).item()

        Ndata_flat = Ndp * Npos_flat   

        p.datafile_list.append(flat_scan_file)

        # One more object layer
        p.num_objects += 1
        obi = p.num_objects - 1

        # Add fake positions
        # Default for now: a raster scan
        linear_extent = int(np.ceil(np.sqrt(Npos_flat)))
        flat_step = .6181 * pos_mindist
        flat_pos = np.array(raster_scan_positions(linear_extent, linear_extent, flat_step, flat_step))
        flat_pos = flat_pos[:Npos_flat]
        flat_pos_float = (flat_pos - flat_pos.min(axis=0)) / dx_spec
        flat_pos = np.round(flat_pos_float)
        flat_pos_dev = flat_pos_float - flat_pos
        verbose(3, 'Flat scan : using a %d x %d grid with step size of %6.2e meters' % (linear_extent, linear_extent, flat_step))

        for j in range(Npos_flat):
            positions.append(tuple(flat_pos[j]) + (0, obi) + tuple(flat_pos_dev[j]))

        # Append new positions
        pos = np.r_[pos, flat_pos]
        pos_dev = np.r_[pos_dev, flat_pos_dev]

        # Update total number of points
        Npts += Ndp * Nmodes * Npos_flat
        p.Npts = Npts
        p.Npos_flat = Npos_flat
        p.Ndata_flat = Ndata_flat
        p.Ndata += p.Ndata_flat

        # Switch updating of probe and object in the inner loop
        p.probe_before_object = True
        verbose(3, 'Flat object weight is set to %f' % p.flat_object_weight)

    positions = np.array(positions)
    p.positions = positions

    # Intensity renormalization 
    intens_renorm = p.intens_renorm
    if intens_renorm is not None:
        # Transform scalar or list input into a datafile-dependent dictionary
        verbose(3, 'Using intensity renormalization.')
        if not isinstance(intens_renorm, dict):
            if np.isscalar(intens_renorm):
                intens_renorm = [intens_renorm] * len(p.datafile_list)
            p.intens_renorm = dict(zip(p.datafile_list, intens_renorm))     

    # Prepare scan positions and MPI data sharing
    _prepare_datainfo(p)
 
    osize = tuple(asize + pos.max(axis=0))
    p.object_size = (p.num_objects, Nmodes_object) + osize
    p.probe_size = (p.num_probes, Nmodes_probe) + tuple(asize)
    
    if p.probe_support is not False:
        if p.probe_support_type == 'disc':
            verbose(2, 'Using a disc support for the probe.')
            supp_radius2 = np.ceil(np.prod(asize) * p.probe_support_area / np.pi)
            p.probe_support = (np.fft.fftshift(U.fvec2(asize)) < supp_radius2).astype('float')
        elif p.probe_support_type is None:
            verbose(2, 'Using the provided probe support.')
        else:
            raise RuntimeError('Unsupported probe support type : %s' % str(p.probe_support_type))

    # Prepare probe
    initial_probe_type = p.initial_probe_type

    # ----For backward compatibility:
    if initial_probe_type != 'file' and p.get('initial_probe_file') is not None: 
        verbose(1, "Warning: 'initial_probe_file' is set but 'initial_probe_type' is '%s' rather than 'file'." % initial_probe_type) 
    # ----

    verbose(3, '%-25s%s' % ('Initial probe type:', initial_probe_type)) 
    if initial_probe_type == 'disc':
        # A defocused circular aperture
        r2 = np.fft.fftshift(U.fvec2(asize, dx_spec))
        probe = (r2 < (p.probe_diameter / 2.) ** 2).astype(CType)
        probe = prop.free_nf(probe, lam, p.probe_propdist, dx_spec[0]);
    elif initial_probe_type == 'focus':
        #Focussed probe
        t_psize = dx_spec[0] # focussed_probe has (y,x) convention for pixels. that maybe different for dx_spec
        t_pupil = p.probe_focus_aperture_type # 'rect'*,'circ'  *default
        t_pupildims = p.probe_focus_aperture_size # size of beam aperture at position of focussing optics
        t_fdist = p.probe_focaldist # distance of optics to focus
        t_defocus = p.probe_propdist # free space propagation distance (nearfield) of probe after focus
        t_focusdims = p.probe_diameter # characterize focal spot by focal spot size and not by optics aperture dimensions
    if p.probe_antialiasing is not None:
        t_antialiasing = p.probe_antialiasing #(>1.0) remove artifacts in probe generation, use antialiasing especially for defocussed probe
        probe = wave.focussed_probe(np.ones(asize), lam, t_psize, t_fdist, t_defocus, t_pupil, t_pupildims, t_focusdims, t_antialiasing, Plotting=False, WaveOut=False)
        if p.probe_use_conjugate:
            probe = probe.conj() 
    elif initial_probe_type == 'gaussian':
        # Gaussian probe
        r2 = np.fft.fftshift(U.fvec2(asize, dx_spec))
        probe = np.exp(-.5 * r2 / (p.probe_diameter / 2.) ** 2).astype(CType)
    else:
        if initial_probe_type == 'file':
            # Load file
            initial_probe = _load_data(p.initial_probe_file, 'probe')['probe'].astype(CType)
            root, ext = os.path.splitext(p.initial_probe_file)
            if (ext == '.mat'):
                initial_probe = _matlab_transpose(initial_probe)
        else:
            # Last try: use an "initial_probe"
            verbose(2, "Using in-memory initial probe.")
            initial_probe = p.initial_probe.astype(CType)
        if initial_probe.ndim == 2:
            initial_probe.resize((1,) + initial_probe.shape)
        if initial_probe.ndim == 3: # and Nmodes > 1:
            # Augment the dimension to 4 and initialize the higher modes to other random (small) values 
            new_initial_probe = []
            for pr in initial_probe:
                new_initial_probe.append([pr] + [pr * .1 * np.random.normal(size=asize) for mode in range(Nmodes_probe - 1)])
            initial_probe = np.array(new_initial_probe)
            del new_initial_probe
        # Repeat in case the new reconstruction has more probes than initial_probe
        probe = np.repeat(initial_probe, p.num_probes // initial_probe.shape[0], axis=0)

    if probe.ndim == 2:
        # Tile a 2D probe into a 4D array, filling higher modes to other random values
        probe = np.array([[probe] + [probe * .1 * np.random.normal(size=asize) for mode in range(Nmodes_probe - 1)] for ii in range(p.num_probes)])

    try:
        assert probe.shape == p.probe_size        
    except AssertionError:
        print 'probe.shape = ' + str(probe.shape)
        print 'p.probe_size = ' + str(p.probe_size)
        raise

    # Normalization
    probe = probe.astype(CType)
    a2 = np.prod(asize)
    probe *= np.sqrt(a2 * p.num_probes / U.norm2(probe))

    if p.probe_support is not False:
        p.probe = p.probe_support * probe
    else:
        p.probe = probe

    # Check for initial object
    initial_object_type = p.initial_object_type
    verbose(3, '%-25s%s' % ('Initial object type:', initial_object_type)) 
    if initial_object_type == 'ones':
        object = np.ones(p.object_size, dtype=CType)
        if Nmodes_object > 1:
            object[:, 1:, :, :] += .1 * np.random.normal(size=object[:, 1:, :, :].shape) + .1j * np.random.normal(size=object[:, 1:, :, :].shape)
    elif initial_object_type == 'stxm':
        raise RuntimeError('STXM analysis for initial object is not yet implemented.')
    else:
        if initial_object_type == 'file':
            initial_object_dict = _load_data(p.initial_object_file)
            dict_has_key_object = initial_object_dict.has_key('object')
            dict_has_key_obj = initial_object_dict.has_key('obj')
            if dict_has_key_obj and dict_has_key_object:
                verbose(1, 'Warning, provided initial object file has both "object" and "obj" variables. Using default ("object").')
                initial_object = initial_object_dict['object'].astype(CType)
            elif dict_has_key_object:
                initial_object = initial_object_dict['object'].astype(CType)
            else:
                initial_object = initial_object_dict['obj'].astype(CType)
        else:
            verbose(2, "Using in-memory initial object.")
            initial_object = p.initial_object.astype(CType)
        if initial_object.ndim == 2:
            initial_object.resize((1,) + initial_object.shape)
        if initial_object.ndim == 3: # and Nmodes > 1:
            # Augment the dimension to 4 and initialize the higher modes to other random (small) values 
            new_initial_object = []
            for ob in initial_object:
                new_initial_object.append([ob] + [ob * .1 * np.random.normal(size=osize) for mode in range(Nmodes_object - 1)])
            initial_object = np.array(new_initial_object)
            del new_initial_object
        object = np.repeat(initial_object, p.num_objects // initial_object.shape[0], axis=0)
        
    assert object.shape == p.object_size

    if p.flat_object:
        object[-1] = 1. # This sets all object modes to 1.

    if p.probe_before_object:
        verbose(2, 'Will switch object and probe updates in inner loop (DM only).')

    p.object = object
    p.cfact = .000000001 * Npts_scan

    if p.remove_incoherent_scattering:
        verbose(2, 'Will try to subtract addaptively a smooth incoherent background in the diffraction patterns (ML only).')

    p.object_smooth_filter = None
    if p.object_smooth_gradient > 0.:
        verbose(2, 'Using a smooth gradient filter (Gaussian blur - only for ML)')
        #p.object_smooth_filter = np.exp(-p.object_smooth_gradient * U.fvec2(p.object_size[-2:], 1/np.array(p.object_size[-2:])))
        def sfilt(x):
            return ifft(FFT(x) * sfilt.object_smooth_filter)
        sfilt.object_smooth_filter = np.exp(-p.object_smooth_gradient * U.fvec2(p.object_size[-2:], 1 / np.array(p.object_size[-2:])))
        p.object_smooth_filter = sfilt
    elif p.object_smooth_gradient < 0.:
        verbose(2, 'Using a smooth gradient filter (Hann window - only for ML)')
        from scipy.signal import correlate2d
        def sfilt(a):
            return correlate2d(a, sfilt.kernel, mode='same')
        sfilt.kernel = np.array([[.0625, .125, .0625], [.125, .25, .125], [.0625, .125, .0625]])
        p.object_smooth_filter = sfilt

    verbose(2, 'Scale object/probe: %3.1g' % p.scale_probe_object)

    if p.dump_object:
        p.dump_object_pattern = './' + p.scans[0] + '_' + p.run_label + '_object_%04d.h5'
        verbose(2, 'Will dump object in files "%s"... (interval = %d)' % ((p.dump_object_pattern % 0), p.dump_object_interval))
    if p.dump_probe:
        p.dump_probe_pattern = './' + p.scans[0] + '_' + p.run_label + '_probe_%04d.h5'
        verbose(2, 'Will dump probe in files "%s"... (interval = %d)' % ((p.dump_probe_pattern % 0), p.dump_probe_interval))

    if p.MPI_timing:
        comm = U.TimingWrapper(comm)
        verbose(2, 'Using MPI timing')

    if p.LL_in_DM:
        verbose(2, 'Will compute Gaussian Log-likelihood in DM')

    if p.reg_del2 or p.reg_TV or p.reg_Huber:
        if p.scale_precond:
            verbose(1, 'Warning: Regularization will be used - disabling the P/O scaling preconditioner')
            p.scale_precond = False

    if p.DM_smooth_amplitude is not None:
        verbose(2, 'Using object smoothing in DM (amplitude = %g, std = %g)' % (p.DM_smooth_amplitude, p.DM_smooth_std))


    # Other variables that need to pass through for the reconstruction
    p.subpix_disp
    p.subpix_disp_start
    p.numit
    p.doplot
    p.dump_plot
    p.dump_plot_patt
    p.pbound
    p.average_start
    p.average_interval
    p.scale_precond
    p.plot_interval
    p.flip_data
    p.probe_change_start
    p.probe_change_start_ML
    p.average_probes
    p.average_probe_amp
    p.probe_support_start
    p.reg_del2
    p.reg_del2_amplitude
    p.reg_TV
    p.reg_TV_amplitude
    p.reg_Huber
    p.reg_Huber_amplitude
    p.reg_Huber_parameter
    p.remove_incoherent_scattering
    p.incoherent_scattering_mask_count
    p.proportional_probes
    p.ePIE_regul_probe
    p.ePIE_regul_object
    p.ePIE_alpha
    p.ePIE_beta
    p.quad_interval
    p.ML_type
    p.float_intens
    p.fourier_relax_factor
    p.plot_mask

    # Convenient to store or documentation purposes.
    p.parallel = parallel
    p.psize = None
    if parallel:
        p.psize = psize

    return p.paramdict

def print_summary(p):
    """\
    Returns a text summary of the reconstruction parameters in dictionary p.
    """    
    
    # Format string
    s = """\
    ### Run summary ###
    Time now: {dtime}
    Run name: {run_name}
    Scan type: {scan_type}
    Number of scans: {Nscan}
    Number of diffraction patterns per scan: {Npts_scan}
    Parallel run : {parallel} ({psize} processes)
    Object size: {object_size}
    Probe size: {probe_size}
    Pixel size (meters): {dx_spec}
    ### ------------ ###
    """
    
    # Extract all keys, substituting a default value if not present
    keys = re.findall('\{([^\}]*)\}', s)
    not_there = '<not set>'
    ndict = {}
    for k in keys:
        ndict[k] = p.get(k, not_there)
    ndict['dtime'] = time.asctime()
    return s.format(**ndict)
    
class Plotter(object):

    def __init__(self, interactive=True):
        from matplotlib import pyplot
        self.interactive = interactive
        self.pp = pyplot
        pyplot.interactive(interactive)
        plot_fig = pyplot.figure(1)
        plot_fig.clf()
        plot_fig.hold(False)
        plot_axes = [plot_fig.add_subplot(2, 2, i) for i in range(1, 5)]
        for pl in plot_axes: pl.hold(False)
        self.plot_fig = plot_fig
        self.plot_axes = plot_axes

    def plot(self, p=None, pri=0, obi=0, **kwargs):
        """\
        Plots ptycho results, drawing necessary variables from 
        dictionnary p and/or keywords arguments. Passing 
        globals() works.
        pri,obi are the indices of probe and object to show if there are more than one.
        (default: 0)
        """
        p = U.Param(p, kwargs)

        err = p.get('LL_list')
        if err is None:
            err = p.get('err')
            if err is None:
                err = [0]
            else:
                err_label = 'Error'
            obj = p.get('object')
            if obj is None:
                obj = p.obj
        else:
            err_label = 'Log-likelihood'
            obj = p.obj

        prmode = 0
        obmode = 0
        probe = p.probe
        objsh = obj.shape[-2:]

        assert pri < probe.shape[0]
        assert obi < obj.shape[0]

        plot_mask = p.get('plot_mask')
        if plot_mask is None:
            plot_mask = np.fft.fftshift(U.fvec2(objsh)) < .25 * (min(objsh) - max(probe.shape)) ** 2

        obj_weight = p.get('pr_nrm')

        if p.get('Nmodes_object') is None: 
        #legacy case: make plotter work with scans processed before object modes
            if obj_weight is None:
                nobj = U.rmphaseramp(obj[obi], np.abs(obj[obi]) * plot_mask.astype(float))
                mean_nobj = (nobj * plot_mask).sum() / plot_mask.sum()
            else:
                obj_weight = obj_weight[obi]
                nobj = U.rmphaseramp(obj[obi], np.abs(obj[obi]) * obj_weight)
                mean_nobj = (nobj * obj_weight).sum() / obj_weight.sum()
        else:
            if obj_weight is None:
                nobj = U.rmphaseramp(obj[obi, obmode], np.abs(obj[obi, obmode]) * plot_mask.astype(float))
                mean_nobj = (nobj * plot_mask).sum() / plot_mask.sum()
            else:
                obj_weight = obj_weight[obi, obmode]
                nobj = U.rmphaseramp(obj[obi, obmode], np.abs(obj[obi, obmode]) * obj_weight)
                mean_nobj = (nobj * obj_weight).sum() / obj_weight.sum()
        angle_obj = np.angle(nobj / mean_nobj)
        abs_obj = np.abs(nobj)

        angle_obj_bounds = (angle_obj[plot_mask].min(), angle_obj[plot_mask].max())
        abs_obj_bounds = (abs_obj[plot_mask].min(), abs_obj[plot_mask].max())

        plot_axes = self.plot_axes

        if not plot_axes[0].images:
            plot_axes[0].imshow(abs_obj, vmin=abs_obj_bounds[0], vmax=abs_obj_bounds[1])
        else:
            plot_axes[0].images[0].set_data(abs_obj)
            plot_axes[0].images[0].set_clim(vmin=abs_obj_bounds[0], vmax=abs_obj_bounds[1])
        #plot_axes[0].imshow(abs_obj,vmin=abs_obj_bounds[0], vmax=abs_obj_bounds[1])

        if not plot_axes[1].images:
            plot_axes[1].imshow(angle_obj, vmin=angle_obj_bounds[0], vmax=angle_obj_bounds[1])
        else:
            plot_axes[1].images[0].set_data(angle_obj)
            plot_axes[1].images[0].set_clim(vmin=angle_obj_bounds[0], vmax=angle_obj_bounds[1])
        #plot_axes[1].imshow(angle_obj,vmin=angle_obj_bounds[0], vmax=angle_obj_bounds[1])

        if probe.ndim == 3:
            probe_i = U.imsave(probe[pri])
        else:
            probe_i = U.imsave(probe[pri, prmode])
        
        if not plot_axes[2].images:
            plot_axes[2].imshow(probe_i)
        else:
            plot_axes[2].images[0].set_data(probe_i)            
            
        plot_axes[3].plot(err)

        plot_axes[0].set_title('Object amplitude')
        plot_axes[1].set_title('Object phase')
        plot_axes[2].set_title('Probe')
        plot_axes[3].set_title(err_label)
        
        self.plot_fig.canvas.set_window_title(p.run_name)
        return

    def draw(self):
        if self.interactive:
            #self.plot_fig.canvas.draw()
            self.pp.draw()
            U.pause(0.01)
        else:
            self.pp.show()

    def savefig(self, *args, **kwargs):
        self.plot_fig.savefig(*args, **kwargs)

def ptycho_DM(pdict=None, **kwargs):
    """\
    Ptychography reconstruction based on difference map.
    """
    # Create the parameter holder
    p = U.Param(kwargs, pdict, default_parameters, store=(0, 1))

    verbose.set_level(p.verbose_level)
    verbose(3, 'Entering ptycho_DM...')
   
    positions = p.positions
    asize = p.asize
    Npts = p.Npts
    Ndata = p.Ndata

    save_file = p.save_file % 'DM'
    verbose(2, 'Reconstruction will be saved as %s' % save_file)

    # Load data
    fmag, fmask = load_fmag(p)
    
    # Manage diffraction pattern shifts
    if p.dp_shift is True:
        verbose(2, 'dp_shift is True: automated shifts of the diffraction patterns will be used.')
        # Compute the average of every scan's diffraction patterns
        dp_average = np.zeros((p.Nscan,) + tuple(asize), FType)
        fmask_all = np.zeros((p.Nscan,) + tuple(asize), FType)
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            if fmag[lstart] is not None:
                #print fmask_all[iscan].shape, fmask[lstart].shape
                fmask_all[iscan] = fmask[lstart]
            for jj in range(lstart, lend):
                if fmag[jj] is None:
                    continue
                dp_average[iscan] += fmag[jj] ** 2
        if parallel:
            comm.Allreduce(MPI.IN_PLACE, dp_average)
            comm.Allreduce(MPI.IN_PLACE, fmask_all)
        dp_average /= p.Npts_scan
        fmask_all_sum = fmask_all.sum(axis=0)
        fmask_all_sum += (fmask_all_sum == 0)
        dp_global_average = (dp_average * fmask_all).sum(axis=0) / fmask_all_sum
        # Compute shift relative to global average
        dp_shift = []
        #io.h5write('average_dump.h5', dp_global_average=dp_global_average, dp_average=dp_average, fmask_all=fmask_all)
        for ii in range(len(dp_average)):
            #r = U.shift_best(dp_global_average, dp_average[ii], fmask[ii])[1]
            r = U.shift_best(np.log(dp_global_average + 1), np.log(dp_average[ii] + 1), fmask_all[ii])[1]
            dp_shift.append(r)
        p.dp_shift = dp_shift
    elif p.dp_shift is not False:
        verbose(2, 'Using provided sub-pixel shifts for the diffraction patterns.')

    dp_shift_ramp = [None] * Npts
    dp_shift_ramp_conj = [None] * Npts
    if p.dp_shift:
        verbose(3, ''.join([p.scans[ii] + ' : ' + str(p.dp_shift[ii]) + '\n' for ii in range(len(p.scans))]))
        fx0, fx1 = U.fgrid(asize, 1. / asize)
        fx0 = np.fft.fftshift(fx0).astype(FType)
        fx1 = np.fft.fftshift(fx1).astype(FType)
        dp_shift_ramp_per_scan = [np.exp(2j * np.pi * (fx0 * x0 + fx1 * x1)).astype(CType) for x0, x1 in p.dp_shift]
        dp_shift_ramp_per_scan_conj = [np.exp(-2j * np.pi * (fx0 * x0 + fx1 * x1)).astype(CType) for x0, x1 in p.dp_shift]
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            for ll in range(lstart, lend):
                if fmag[ll] is not None:
                    dp_shift_ramp[ll] = dp_shift_ramp_per_scan[iscan]
                    dp_shift_ramp_conj[ll] = dp_shift_ramp_per_scan_conj[iscan]

    # Maximum power : used for normalization
    max_power = max([U.norm2(ff) if ff is not None else 0. for ff in fmag])
    if parallel:
        max_power = np.array([max_power])
        comm.Allreduce(MPI.IN_PLACE, max_power, op=MPI.MAX)
        max_power = max_power[0]

    # Total number of measurements - used for normalization in LL_in_DM
    tot_measpts = sum([fm.sum() for fm in fmask if fm is not None])
    if parallel:
        tot_measpts = np.array([tot_measpts])
        comm.Allreduce(MPI.IN_PLACE, tot_measpts)
        tot_measpts = tot_measpts.item()

    # Is this a continuing run?
    cont_run = False
    if p.hasattr('history'):
        verbose(3, 'Continuing previous run.')
        cont_run = True
        probe = p.probe
        obj = p.obj
    else:
        verbose(3, 'No previous run. Starting from initialized objects and probe.')
        p.history = []
        probe = p.probe
        probe *= np.sqrt(p.num_probes * max_power / U.norm2(probe))
        obj = p.object

    p.obj = obj
    p.probe = probe

    # Quick sanity check
    assert (asize == np.array(probe.shape[2:])).all()

    if p.pbound is None:
        # This formulation is consistent with Giewekemeyer (2010). 
        pbound = .25 * p.fourier_relax_factor ** 2
        verbose(3, 'Computed pbound is %g (would be %g in the old formulation)' % (pbound, pbound * np.prod(asize) / max_power))  
    else:
        # Renormalize pbound (old forumulation)
        pbound = p.pbound * max_power / np.prod(asize)

    # Local references of various parameters
    numit = p.numit
    object_size = p.object_size
    clip_object = p.clip_object
    if clip_object:
        clip_max = p.clip_max
        clip_min = p.clip_min

    nearfield = p.nearfield
    probe_before_object = p.probe_before_object

    # Local references of regular task parameters
    dump_data = p.dump_data
    if dump_data:
        dump_interval = p.dump_interval
        dump_filename = p.dump_pattern % {'run_name': p.run_name}
    dump_object = p.dump_object
    if dump_object:
        dump_object_interval = p.dump_object_interval
        dump_object_pattern = p.dump_object_pattern
    dump_probe = p.dump_probe
    if dump_probe:
        dump_probe_interval = p.dump_probe_interval
        dump_probe_pattern = p.dump_probe_pattern
    average_start = p.average_start
    average_interval = p.average_interval
    probe_change_start = p.probe_change_start
    
    # Initialization
    err = []
    p.err = err
    rfact = []
    avob = np.zeros_like(obj)
    numav = 0
    im_number = 0
    cfact = p.cfact

    compute_LL = p.LL_in_DM
    if compute_LL:
        LL_DM = []
        p.LL_DM = LL_DM

    DM_smooth_obj = (p.DM_smooth_amplitude is not None)
    if DM_smooth_obj:
        DM_smooth_amplitude = (p.DM_smooth_amplitude * max_power * p.num_probes * Ndata) / np.prod(asize)
        DM_smooth_std = p.DM_smooth_std

    # Probe support
    use_probe_support = False
    if p.probe_support is not False:
        probe_support = p.probe_support
        use_probe_support = True

    subpix = p.subpix
    subpix_method = p.subpix_method
    subpix_start = p.subpix_start
    subpix_started = False
    do_subpix = None
    if subpix_method == 'fourier':
        do_subpix = subpix_fourier
    elif subpix_method == 'linear':
        do_subpix = subpix_linear
    else:
        raise RuntimeError('Unknown subpix method : %s' % str(subpix_method))

    # Prepare statistics
    proj1_time = 0
    proj2_time = 0
    plot_time = 0
    
    osh = obj.shape
    sh = probe.shape
    num_probes = p.num_probes
    num_objects = p.num_objects
    Nmodes_probe = p.Nmodes_probe
    Nmodes_object = p.Nmodes_object
    Nmodes = p.Nmodes

    nobj = np.zeros_like(obj)
    obj_denom = np.zeros_like(obj)
    nprobe = np.zeros_like(probe)
    probe_denom = np.zeros_like(probe)
    
    # Setup views
    position_ranges = [(x[3], # Object index
                        x[0], # start y
                        x[0] + sh[2], # end y
                        x[1], # start x
                        x[1] + sh[3], # end x
                        x[2], # probe index,
                        x[4], # subpixel shift y 
                        x[5])            # subpixel shift x
                        for x in positions]

    # Take care of all the ugly indexing here.
    # This makes the loop parts below more readable.
    iter_flat = []               # A flat list of all independent terms
    iter_data = []          # A list of lists, made to map to fmag.
    num_elements_in_iter_by_fmag = 8
    for i in range(Ndata):
        if fmag[i] is None:
            iter_flat.extend(Nmodes * [None])
            iter_data.append(num_elements_in_iter_by_fmag * [None])
            continue
        x = position_ranges[i]
        psilist = []
        probelist = []
        objlist = []
        subpix_parameter = None
        if subpix:
            if subpix_method.lower() == 'fourier':
                sp_shift = -np.array([x[6], x[7]])
                subpix_parameter = np.exp(2j * np.pi * np.sum(U.fgrid(asize, sp_shift / asize), axis=0))
            elif subpix_method.lower() == 'linear':
                subpix_parameter = -np.array([x[6], x[7]])
                
        for prm in range(Nmodes_probe):
            for obm in range(Nmodes_object):
                psi = probe[x[5], prm] * obj[x[0], obm, x[1]:x[2], x[3]:x[4]]  # This is the initial guess.
                iter_flat.append([\
                    psi, # 0 - iterate
                    probe[x[5], prm], # 1 - probe
                    nprobe[x[5], prm], # 2 - updated probe
                    probe_denom[x[5], prm], # 3 - denominator for the sum
                    obj[x[0], obm, x[1]:x[2], x[3]:x[4]], # 4 - object
                    nobj[x[0], obm, x[1]:x[2], x[3]:x[4]], # 5 - updated object
                    obj_denom[x[0], obm, x[1]:x[2], x[3]:x[4]], # 6 - object denominator for the sum
                    p.flat_object and (x[0] == num_objects - 1), # 7 - flat object flag
                    p.flat_object_weight if p.flat_object else 0., # 8 - flat object weight
                    subpix_parameter])                                     # 9 - Parameter for subpixel shift
                psilist.append(psi)
                probelist.append(probe[x[5], prm])
                objlist.append(obj[x[0], obm, x[1]:x[2], x[3]:x[4]])
        iter_data.append([\
            psilist, # 0 - iterate list
            probelist, # 1 - probe views
            objlist, # 2 - object views
            fmag[i], # 3 - fmag
            fmask[i], # 4 - fmask
            dp_shift_ramp[i], # 5 - dp shift
            dp_shift_ramp_conj[i], # 6 - dp shift (conj)
            subpix_parameter])                                             # 7 - Parameter for probe subpixel shift 

    # List of flat object
    if p.flat_object:
        is_flat = [(x[3] == num_objects - 1) for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
        is_flat_mode = [(x[3] == num_objects - 1) for x in positions]
        flat_w = [1. if isfl else p.flat_object_weight for isfl in is_flat]


    # Plotting
    if (p.doplot or p.dump_plot or p.last_plot) and (not parallel or prank == 0):
        plot_interval = p.plot_interval
        if p.doplot:
            from matplotlib import pyplot
            pyplot.interactive(True)
        plot_fig = Plotter(interactive=p.doplot)
        p.plot_mask = np.fft.fftshift(U.fvec2(osh[-2:]) < .25 * (max(osh[-2:]) - max(sh[-2:])) ** 2)
        if p.dump_plot:
            dump_plot_counter = 0

    a2 = np.prod(asize)
    if USE_FFTPACK:
        fnorm_fw = 1 / np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = FFT.fft2
        ifft = FFT.ifft2
    else:
        fnorm_fw = 1 / np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = np.fft.fft2
        ifft = np.fft.ifft2

    for f in hooks['pre_DM']:
        verbose(3, 'Calling pre-DM hooks')
        f(locals(), globals())

    def LL_in_DM():
        """\
        Computes the gaussian negative log-likelihood from within DM
        """
        LL = np.array([0.])
        for psilist, prlist, oblist, fmagi, fmaski, dp_rampi, dp_ramp_conji, sp_param in iter_data:
            if fmagi is None:
                continue
            if subpix_started:
                prlist = [do_subpix(pr, sp_param) for pr in prlist]
            fpsij = [fnorm_fw * fft(pr * ob) for pr, ob in izip(prlist, oblist)]
            Imodel = sum(U.abs2(fi) for fi in fpsij)
            Ii = fmagi ** 2
            wi = fmaski / (Ii + 1)
            DI = Imodel - Ii  
            LL += (wi * DI ** 2).sum()
        if parallel:
            comm.Allreduce(MPI.IN_PLACE, LL)
        return LL.item() / tot_measpts
        
    # main loop
    for it in range(numit):
        if parallel:
            comm.Barrier()
        verbose(1, '%s, iteration # %d of %d' % (p.scans[0] if len(p.scans) == 0 else p.scans[0] + ' - ' + p.scans[-1], it, numit))
        verbose(1, time.asctime())
        
        # 1. Overlap projection - a large loop where probe and object are refined.
        verbose(1, " - projection 1: overlap constraint - ")
        tm = time.time()
        prch0 = 0
        
        subpix_started = subpix and (it >= subpix_start)        
        
        for inner in range(10):

            if not ((probe_change_start < it) and (inner == 0) and probe_before_object):
                # Additional condition to allow updating the probe first

                #obj.fill(0.)
                if parallel and prank != 0:
                    obj_denom.fill(0.)
                    obj.fill(0.)
                else:
                    if DM_smooth_obj:
                        obj[:] = DM_smooth_amplitude * gaussian_filter(obj, DM_smooth_std)
                        obj_denom.fill(DM_smooth_amplitude)
                    else:
                        obj.fill(0.)
                        obj_denom.fill(.001)

                for x in iter_flat:
                    if x is None:
                        continue
                    psi, pr, npr, prdenom, ob, nob, obdenom, fo, fw, sp = x
                    if fo:
                        continue
                    if subpix_started:
                        pr = do_subpix(pr, sp)
                    ob += pr.conj() * psi
                    obdenom += U.abs2(pr)
                
                if parallel:
                    comm.Allreduce(MPI.IN_PLACE, obj)
                    comm.Allreduce(MPI.IN_PLACE, obj_denom)

                obj /= obj_denom

                for f in hooks['obj_inner_loop_DM']:
                    verbose(3, 'Calling obj_inner_loop_DM hooks')
                    f(locals(), globals())

                if p.flat_object:
                    obj[-1] = 1.
    
                # Object clipping between clip_min and clip_max
                if clip_object:
                    aobj = np.abs(obj);
                    phobj = np.exp(1j * np.angle(obj))
                    too_high = (aobj > clip_max)
                    too_low = (aobj < clip_min)
                    obj[too_high] = clip_max * phobj[too_high]
                    obj[too_low] = clip_min * phobj[too_low]
            
            # Probe update: exit the inner loop if it is not time yet.
            if probe_change_start >= it:
                break
    
            if parallel and prank != 0:
                nprobe.fill(0.)
                probe_denom.fill(0.)
            else:
                nprobe[:] = cfact * probe
                probe_denom.fill(cfact)

            for x in iter_flat:
                if x is None:
                        continue
                psi, pr, npr, prdenom, ob, nob, obdenom, fo, fw, sp = x
                tpr = psi * ob.conj()
                tprdenom = U.abs2(ob)
                if subpix_started:
                    tpr = do_subpix(tpr, sp, forward=False)
                    tprdenom = do_subpix(tprdenom, sp, forward=False)
                if fo:
                    npr += fw * tpr
                    prdenom += fw * tprdenom
                else:
                    npr += tpr
                    prdenom += tprdenom
                
            if parallel:
                comm.Allreduce(MPI.IN_PLACE, nprobe)
                comm.Allreduce(MPI.IN_PLACE, probe_denom)
                
            nprobe /= probe_denom
            if use_probe_support:
                nprobe *= probe_support
            prch = U.norm(probe - nprobe);
            if not parallel or prank == 0:
                print('Change in probe: %f' % prch)
            probe[:] = nprobe
    
            if prch0 == 0:
                prch0 = prch
            elif prch < .1 * prch0:
                break
        
        if p.proportional_probes and probe_change_start < it:
            verbose(3, 'Making probes proportional now.')
            #if Nmodes > 1:
            #    raise RuntimeError('Probe averaging has not been modified to take into account multiple modes')
            probe_norm = [U.norm(pr) for pr in probe]
            probe_normalized = [pr / U.norm(pr) for pr in probe]
            probe_normalized_averaged = sum(ppp for ppp in probe_normalized) / len(probe_normalized)
            best_angle = [np.angle((pr * probe_normalized_averaged.conj()).sum()) for pr in probe_normalized]
            probe_factor = [prn * np.exp(1j * pra) for prn, pra in zip(probe_norm, best_angle)]
            new_probe_normalized = [pr / prn for pr, prn in zip(probe, probe_factor)]
            new_probe_averaged = sum(nppp for nppp in new_probe_normalized) / len(new_probe_normalized)
            for iprobe_index in range(num_probes):
                probe[iprobe_index] = probe_factor[iprobe_index] * new_probe_averaged

        tm_old, tm = tm, time.time()
        proj1_time = proj1_time + tm - tm_old;
    
        er2 = 0.
        # 2. Fourier projection + complete diffmap loop
        verbose(1, ' - projection 2: Fourier modulus constraint - ')
        rf = 0
        rf_nrm = 0
    
        for psilist, prlist, oblist, fmagi, fmaski, dp_rampi, dp_ramp_conji, sp_param in iter_data:
            if fmagi is None:
                continue
            if subpix_started:
                prlist = [do_subpix(pr, sp_param) for pr in prlist]

            p1 = [pr * ob for pr, ob in izip(prlist, oblist)]

            if dp_rampi is None:
                f = [fnorm_fw * fft(2 * p1i - psi) for (p1i, psi) in izip(p1, psilist)]
            else:
                f = [fnorm_fw * fft(dp_shift_ramp[i] * (2 * p1i - psi)) for (p1i, psi) in izip(p1, psilist)]

            af = np.sqrt(sum(U.abs2(fi) for fi in f))
            fdev = af - fmagi
            fdev2 = fmaski * fdev ** 2
            power = fdev2.mean()
            if power > pbound:
                renorm = np.sqrt(pbound / power)
                fm = (1 - fmaski) + fmaski * (fmagi + fdev * renorm) / (af + 1e-10)
            if dp_rampi is None:
                p2 = [fnorm_bw * ifft(fm * fi) for fi in f]
            else:
                p2 = [dp_ramp_conji * fnorm_bw * ifft(fi) for fi in f]

            df = [p2i - p1i for (p2i, p1i) in izip(p2, p1)]
            for (psi, dfi) in izip(psilist, df):
                psi += dfi
                er2 += U.norm2(dfi)
    
#        if Nmodes == 1:
#            for i in range(Ndata):
#                if fmag[i] is None:
#                    continue
#                p1 = probe_view[i] * obj_view[i]
#                if dp_shift_ramp[i] is None:
#                    f = fnorm_fw * fft( 2*p1 - iter_flat[i]) 
#                else:
#                    f = fnorm_fw * fft(dp_shift_ramp[i] * (2*p1 - iter_flat[i]) )
#                af = abs(f)
#                ph = f / (af+1e-10)
#                fdev = af - fmag[i]
#                fdev2 = fmask[i]*fdev**2
#                power = fdev2.mean()
#                if power > pbound:
#                    renorm = np.sqrt(pbound / power)
#                    af = af*(1-fmask[i]) + fmask[i]*(fmag[i] + fdev*renorm)
#                if dp_shift_ramp[i] is None: 
#                    p2 = fnorm_bw * ifft(af * ph)
#                else:
#                    p2 = dp_shift_ramp_conj[i] * fnorm_bw * ifft(af * ph)
#                df = p2 - p1
#                iter[i] += df
#                er2 += U.norm2(df)
#
#        else:
#            for i in range(Ndata):
#                if fmag[i] is None:
#                    continue
#                probe_mode = probe_mode_view[i]
#                iter_mode = iter_mode_view[i]
#                obj_mode = obj_mode_view[i]
#
#                p1 = [pr*ob for pr in probe_mode for ob in obj_mode]
#                if dp_shift_ramp[i] is None:
#                    f = [fnorm_fw * fft( 2*p1i - iteri) for (p1i,iteri) in izip(p1,iter_mode)]
#                else:
#                    f = [fnorm_fw * fft(dp_shift_ramp[i] * (2*p1i - iteri) ) for (p1i,iteri) in izip(p1,iter_mode)]
#                af = np.sqrt(sum(U.abs2(fi) for fi in f))
#                fm = fmag[i]/(af + 1e-10)
#                nf = [fi*(1-fmask[i] + fmask[i]*fm) for fi in f]
#                if dp_shift_ramp[i] is None:
#                    p2 = [fnorm_bw * ifft(nfi) for nfi in nf]
#                else:
#                    p2 = [dp_shift_ramp_conj[i] * fnorm_bw * ifft(nfi) for nfi in nf]
#                df = [p2i - p1i for (p2i, p1i) in izip(p2, p1)]
#                for (iti,dfi) in izip(iter_mode, df):
#                    iti += dfi
#                    er2 += U.norm2(dfi)

    
        if parallel:
            er2 = np.array([er2])
            comm.Allreduce(MPI.IN_PLACE, er2)
            er2 = er2[0]

        if np.isnan(er2):
            raise RuntimeError('Error is NaN! If you are using FFTW, make sure ffts are called with the right type (double/single)')
        """\
        # Hack to check datatypes and nans, left there for documentation.
        for k,v in locals().iteritems():
            try:
                dt = v.dtype.name
                is_single = (dt in ['complex64', 'float32'])
                is_a_nan = np.any(np.isnan(v))
                print '%30s : %20s - %10s' % (k, 'single' if is_single else dt.name, 'NAN!' if is_a_nan else 'no nan')
            except:
                try:
                    v = v[0]
                    dt = v.dtype.name
                    is_single = (dt in ['complex64', 'float32'])
                    is_a_nan = np.any(np.isnan(v))
                    print '%30s : %20s - %10s' % (k, 'single' if is_single else dt.name, 'NAN!' if is_a_nan else 'no nan')
                except:
                    print '%20s : %s' % (k, ' -- no dtype -- ')
        """

        err.append(np.sqrt(er2 / (max_power * Npts)))

        if compute_LL:
            LL_DM.append(LL_in_DM())
            verbose(1, 'Error: %12.3f\tNeg. LL: %12.3g' % (err[-1], LL_DM[-1]))
        else:
            verbose(1, 'Error: %12.3f' % err[-1])
    
        tm_old, tm = tm, time.time()
        proj2_time += tm - tm_old
        
        """\
        if (p.doplot or p.dump_plot) and (not parallel or prank==0) and (it % p.plot_interval == 0):
            nobj = U.rmphaseramp(obj[0], pr_nrm[0])
            mean_nobj = (nobj*pr_nrm[0]).sum() / pr_nrm[0].sum()
            angle_obj = np.angle(nobj / mean_nobj)
            abs_obj = np.abs(nobj)
            angle_obj_bounds = (angle_obj[plot_mask].min(), angle_obj[plot_mask].max())
            abs_obj_bounds = (abs_obj[plot_mask].min(), abs_obj[plot_mask].max())
            plot_axes[0].imshow(abs_obj,vmin=abs_obj_bounds[0], vmax=abs_obj_bounds[1])
            plot_axes[1].imshow(angle_obj,vmin=angle_obj_bounds[0], vmax=angle_obj_bounds[1])
            plot_axes[2].imshow(U.imsave(probe[0]))
            plot_axes[3].plot(err)
            if p.doplot:
                plot_fig.canvas.draw()
            if p.dump_plot:
                try:
                    dump_plot_file = p.dump_plot_patt % dump_plot_counter
                except TypeError:
                    dump_plot_file = p.dump_plot_patt
                plot_fig.savefig(dump_plot_file)
                dump_plot_counter += 1
        """

        if (p.doplot or p.dump_plot) and (not parallel or prank == 0):
            if (it % p.plot_interval == 0):
                plot_fig.plot(p)
                if p.dump_plot:
                    try:
                        dump_plot_file = p.dump_plot_patt % dump_plot_counter
                    except TypeError:
                        dump_plot_file = p.dump_plot_patt
                    ispdf = (os.path.splitext(dump_plot_file)[-1].lower() == '.pdf')
                    if ispdf:
                        plot_fig.savefig(dump_plot_file, dpi=600)
                    else:
                        plot_fig.savefig(dump_plot_file)
                    dump_plot_counter += 1
            if p.doplot:
                plot_fig.draw()
 
        if dump_object and (it % dump_object_interval == 0) and (not parallel or prank == 0):
            print 'Dumping!'
            io.h5write(dump_object_pattern % it, obj=obj)            
        if dump_probe and (it % dump_probe_interval == 0) and (not parallel or prank == 0):
            io.h5write(dump_probe_pattern % it, probe=probe)            

        tm_old, tm = tm, time.time()
        plot_time += tm - tm_old

        if (it >= average_start) & (it % average_interval == 0):
            avob += obj
            numav += 1
 
        for f in hooks['loop_DM']:
            verbose(3, 'Calling loop-DM hooks')
            f(locals(), globals())

    for f in hooks['post_DM']:
        verbose(3, 'Calling post-DM hooks')
        f(locals(), globals())

    header = ''
    if parallel:
        header = 'Process # %d ' % prank

    msg = """\
    {header} Finished
    {header} Time elapsed in projection 1: {0:6.2f} seconds
    {header} Time elapsed in projection 2: {1:6.2f} seconds
    {header} Time spent plotting: {2:6.2f} seconds""".format(proj1_time, proj2_time, plot_time, header=header)

    verbose(1, msg, mpi=True)
    
    # Average
    if average_start < numit:
        object = avob / numav
    else:
        object = obj
    

    # Store important variables
    p.obj = obj
    p.object = object
    p.probe = probe
    p.err = err
    p.history.append(('DM', numit, err))
    if hasattr(verbose, 'get_log'):
        p.verbose_log = verbose.get_log()
    # Save stuff
    if p.save and (not parallel or prank == 0):
        #filename = p.run_name + '_DM.h5'
        #save_run(filename, p.paramdict)
        save_run(save_file, p.paramdict)
        verbose(2, 'Saved reconstruction in file %s' % save_file)

    if p.MPI_timing:
        fname = os.path.abspath(os.path.curdir) + '/Timing_proc_%03d.h5' % prank
        io.h5write(fname, timing=comm.timing)

    #p.probe_view = probe_view
    #p.obj_view = obj_view
    #p.fmag = fmag

    # Last plot (blocking!)
#    if p.last_plot and (not parallel or prank==0):
#        pyplot.interactive(False)
#        plot_axes[0].imshow(np.abs(object[0]))
#        plot_axes[1].imshow(np.angle(object[0]))
#        plot_axes[2].imshow(U.imsave(probe[0,0]))
#        plot_axes[3].plot(err)
#        pyplot.show()

    return p.paramdict

def ptycho_ePIE(pdict=None, **kwargs):
    """\
    Ptychography reconstruction - extended PIE 
    """
    raise RuntimeError('Not done implementing modes!')
    # Create the parameter holder
    p = U.Param(kwargs, pdict, default_parameters, store=(0, 1))

    verbose.set_level(p.verbose_level)
    verbose(3, 'Entering ptycho_ePIE...')
   
    positions = p.positions
    asize = p.asize
    Npts = p.Npts
    Ndata = p.Ndata

    save_file = p.save_file % 'ePIE'
    verbose(2, 'Reconstruction will be saved as %s' % save_file)

    # Load data
    fmag, fmask = load_fmag(p)
    
    # Manage diffraction pattern shifts
    if p.dp_shift is True:
        verbose(2, 'dp_shift is True: automated shifts of the diffraction patterns will be used.')
        # Compute the average of every scan's diffraction patterns
        dp_average = np.zeros((p.Nscan,) + tuple(asize), FType)
        fmask_all = np.zeros((p.Nscan,) + tuple(asize), FType)
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            if fmag[lstart] is not None:
                #print fmask_all[iscan].shape, fmask[lstart].shape
                fmask_all[iscan] = fmask[lstart]
            for jj in range(lstart, lend):
                if fmag[jj] is None:
                    continue
                dp_average[iscan] += fmag[jj] ** 2
        if parallel:
            comm.Allreduce(MPI.IN_PLACE, dp_average)
            comm.Allreduce(MPI.IN_PLACE, fmask_all)
        dp_average /= p.Npts_scan
        fmask_all_sum = fmask_all.sum(axis=0)
        fmask_all_sum += (fmask_all_sum == 0)
        dp_global_average = (dp_average * fmask_all).sum(axis=0) / fmask_all_sum
        # Compute shift relative to global average
        dp_shift = []
        #io.h5write('average_dump.h5', dp_global_average=dp_global_average, dp_average=dp_average, fmask_all=fmask_all)
        for ii in range(len(dp_average)):
            #r = U.shift_best(dp_global_average, dp_average[ii], fmask[ii])[1]
            r = U.shift_best(np.log(dp_global_average + 1), np.log(dp_average[ii] + 1), fmask_all[ii])[1]
            dp_shift.append(r)
        p.dp_shift = dp_shift
    elif p.dp_shift is not False:
        verbose(2, 'Using provided sub-pixel shifts for the diffraction patterns.')

    dp_shift_ramp = [None] * Npts
    dp_shift_ramp_conj = [None] * Npts
    if p.dp_shift:
        verbose(3, ''.join([p.scans[ii] + ' : ' + str(p.dp_shift[ii]) + '\n' for ii in range(len(p.scans))]))
        fx0, fx1 = U.fgrid(asize, 1. / asize)
        fx0 = np.fft.fftshift(fx0).astype(FType)
        fx1 = np.fft.fftshift(fx1).astype(FType)
        dp_shift_ramp_per_scan = [np.exp(2j * np.pi * (fx0 * x0 + fx1 * x1)).astype(CType) for x0, x1 in p.dp_shift]
        dp_shift_ramp_per_scan_conj = [np.exp(-2j * np.pi * (fx0 * x0 + fx1 * x1)).astype(CType) for x0, x1 in p.dp_shift]
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            for ll in range(lstart, lend):
                if fmag[ll] is not None:
                    dp_shift_ramp[ll] = dp_shift_ramp_per_scan[iscan]
                    dp_shift_ramp_conj[ll] = dp_shift_ramp_per_scan_conj[iscan]


    # Maximum power : used for normalization
    max_power = max([U.norm2(ff) if ff is not None else 0. for ff in fmag])
    if parallel:
        max_power = np.array([max_power])
        comm.Allreduce(MPI.IN_PLACE, max_power, op=MPI.MAX)
        max_power = max_power[0]

    # Is this a continuing run?
    cont_run = False
    if p.hasattr('history'):
        verbose(3, 'Continuing previous run.')
        cont_run = True
        probe = p.probe
        obj = p.obj
    else:
        verbose(3, 'No previous run. Starting from initialized objects and probe.')
        p.history = []
        probe = p.probe
        probe *= np.sqrt(p.num_probes * max_power / U.norm2(probe))
        obj = p.object

    # Quick sanity check
    assert (asize == np.array(probe.shape[2:])).all()

    # regularization parameter
    regul_probe = p.ePIE_regul_probe
    regul_object = p.ePIE_regul_object

    ePie_alpha = p.ePie_alpha
    ePie_beta = p.ePie_beta

    # Local references of various parameters
    numit = p.numit
    object_size = p.object_size
    clip_object = p.clip_object
    if clip_object:
        clip_max = p.clip_max
        clip_min = p.clip_min

    nearfield = p.nearfield

    # Local references of regular task parameters
    dump_data = p.dump_data
    if dump_data:
        dump_interval = p.dump_interval
        dump_filename = p.dump_pattern % {'run_name': p.run_name}
    average_start = p.average_start
    average_interval = p.average_interval
    probe_change_start = p.probe_change_start
    
    # Initialization
    err = []
    p.err = err
    rfact = []
    avob = np.zeros_like(obj)
    numav = 0
    im_number = 0

    # Probe support
    use_probe_support = False
    if p.probe_support is not False:
        probe_support = p.probe_support
        use_probe_support = True

    if p.pbound is None:
        # This formulation is consistent with Giewekemeyer (2010). 
        pbound = .25 * p.fourier_relax_factor ** 2
        verbose(3, 'Computed pbound is %g (would be %g in the old formulation)' % (pbound, pbound * np.prod(asize) / max_power))
    else:
        # Renormalize pbound (old forumulation)
        pbound = p.pbound * max_power / np.prod(asize)

    # Prepare statistics
    loop_time = 0
    plot_time = 0
    
    osh = obj.shape
    sh = probe.shape
    num_probes = p.num_probes
    num_objects = p.num_objects
    Nmodes_probe = p.Nmodes_probe
    Nmodes_object = p.Nmodes_object
    Nmodes = p.Nmodes

    obj_change = np.zeros_like(obj)
    probe_change = np.zeros_like(probe)

    # Setup views
    position_ranges = [(x[3], x[0], x[0] + sh[2], x[1], x[1] + sh[3]) for x in positions]

    obj_view = [obj[x[0], obm, x[1]:x[2], x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    obj_change_view = [obj_change[x[0], obm, x[1]:x[2], x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]

    probe_view = [probe[x[2], prm] for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]
    probe_change_view = [probe_change[x[2], prm] for x in positions for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]

    probe_mode_view = [[probe[x[2], prm] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in positions]
    obj_mode_view = [[obj[x[0], obm, x[1]:x[2], x[3]:x[4]] for prm in range(Nmodes_probe) for obm in range(Nmodes_object)] for x in position_ranges]

    in_node = [fm is not None for fm in fmag for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]

    # List of flat object
    if p.flat_object:
        is_flat = [(x[3] == num_objects - 1) for x in positions for mode in range(Nmodes)]
	flat_w = [1. if isfl else p.flat_object_weight for isfl in is_flat]

    # Plotting
    if (p.doplot or p.dump_plot or p.last_plot) and (not parallel or prank == 0):
        p.obj = obj
        p.probe = probe
        plot_interval = p.plot_interval
        if p.doplot:
            from matplotlib import pyplot
            pyplot.interactive(True)
        plot_fig = Plotter(interactive=p.doplot)
        p.plot_mask = FFT.fftshift(U.fvec2(osh[1:]) < .25 * (max(osh[1:]) - max(sh[1:])) ** 2)
        if p.dump_plot:
            dump_plot_counter = 0

    # This is just for debugging ------------
    #for ooo in obj_view:
    #    ooo += np.random.rand(1).item()
    #pyplot.interactive(False)
    #plot_axes[0].imshow(np.abs(obj[0]))
    #plot_axes[1].imshow(np.abs(obj[1]))
    #pyplot.show()
    # ---------------------------------------
    
    a2 = np.prod(asize)
    if USE_FFTPACK:
        fnorm_fw = 1 / np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = FFT.fft2
        ifft = FFT.ifft2
    else:
        fnorm_fw = 1 / np.sqrt(a2)
        fnorm_bw = np.sqrt(a2)
        fft = FFT.fft2
        ifft = FFT.ifft2

    sequential = True

    for f in hooks['pre_ePIE']:
        verbose(3, 'Calling pre-ePIE hooks')
        f(locals(), globals())

    # main loop
    for it in range(numit):
        if parallel:
            comm.Barrier()
        verbose(1, '%s, iteration # %d of %d' % (p.scans[0] if len(p.scans) == 0 else (p.scans[0] + ' - ' + p.scans[-1]), it, numit))
        verbose(1, time.asctime())
        
        tm = time.time()
        
        obj_change.fill(0.)
        probe_change.fill(0.)
        er2 = 0.
        for i in np.random.permutation(Npts):
            if not in_node[i]:
                continue
            # Do not update flat object
            if p.flat_object and is_flat[i]:
                continue

            p1 = probe_view[i] * obj_view[i]
            if dp_shift_ramp[i] is None:
                f = fnorm_fw * fft(p1) 
            else:
                f = fnorm_fw * fft(dp_shift_ramp[i] * p1)
            af = abs(f)
            ph = f / (af + 1e-10)
            fdev = af - fmag[i]
            fdev2 = fmask[i] * fdev ** 2
            power = fdev2.mean()
            if power > pbound:
                renorm = np.sqrt(pbound / power)
                af = af * (1 - fmask[i]) + fmask[i] * (fmag[i] + fdev * renorm)
            if dp_shift_ramp[i] is None: 
                p2 = fnorm_bw * ifft(af * ph)
            else:
                p2 = dp_shift_ramp_conj[i] * fnorm_bw * ifft(af * ph)

            a2pr = U.abs2(probe_view[i])
            a2ob = U.abs2(obj_view[i])
            obj_change_view[i] += ePie_alpha * probe_view[i].conj() * (p2 - p1) / (a2pr * regul_probe + (1 - regul_probe) * a2pr.max())
            probe_change_view[i] += ePie_beta * obj_view[i].conj() * (p2 - p1) / (a2ob * regul_object + (1 - regul_object) * a2ob.max())
            er2 += U.norm2(p2 - p1)

            if sequential:
                obj_view[i] += obj_change_view[i]
                probe_view[i] += probe_change_view[i]
                obj_change.fill(0.)
                probe_change.fill(0.)                

        if not sequential:
            if parallel:
                comm.Allreduce(MPI.IN_PLACE, obj_change)
                comm.Allreduce(MPI.IN_PLACE, probe_change)
                er2 = np.array([er2])
                comm.Allreduce(MPI.IN_PLACE, er2)
                er2 = er2[0]
            obj += obj_change
            probe += probe_change
        if use_probe_support:
            probe *= probe_support

        # Object clipping between clip_min and clip_max
        if clip_object:
            aobj = np.abs(obj);
            phobj = np.exp(1j * np.angle(obj))
            too_high = (aobj > clip_max)
            too_low = (aobj < clip_min)
            obj[too_high] = clip_max * phobj[too_high]
            obj[too_low] = clip_min * phobj[too_low]
            
        if p.proportional_probes and probe_change_start < it:
            verbose(3, 'Making probes proportional now.')
            #if Nmodes > 1:
            #    raise RuntimeError('Probe averaging has not been modified to take into account multiple modes')
            probe_norm = [U.norm(pr) for pr in probe]
            probe_normalized = [pr / U.norm(pr) for pr in probe]
            probe_normalized_averaged = sum(ppp for ppp in probe_normalized) / len(probe_normalized)
            best_angle = [np.angle((pr * probe_normalized_averaged.conj()).sum()) for pr in probe_normalized]
            probe_factor = [prn * np.exp(1j * pra) for prn, pra in zip(probe_norm, best_angle)]
            new_probe_normalized = [pr / prn for pr, prn in zip(probe, probe_factor)]
            new_probe_averaged = sum(nppp for nppp in new_probe_normalized) / len(new_probe_normalized)
            for iprobe_index in range(num_probes):
                probe[iprobe_index] = probe_factor[iprobe_index] * new_probe_averaged

        err.append(np.sqrt(er2 / (max_power * Npts)))
        verbose(1, 'Error: %12.3f' % err[-1])

        tm_old, tm = tm, time.time()
        loop_time = loop_time + tm - tm_old;
        
        if (p.doplot or p.dump_plot) and (not parallel or prank == 0) and (it % p.plot_interval == 0):
            plot_fig.plot(p)
            if p.doplot:
                plot_fig.draw()
            if p.dump_plot:
                try:
                    dump_plot_file = p.dump_plot_patt % dump_plot_counter
                except TypeError:
                    dump_plot_file = p.dump_plot_patt
                ispdf = (os.path.splitext(dump_plot_file)[-1].lower() == '.pdf')
                if ispdf:
                    plot_fig.savefig(dump_plot_file, dpi=600)
                else:
                    plot_fig.savefig(dump_plot_file)
                dump_plot_counter += 1
 
        tm_old, tm = tm, time.time()
        plot_time += tm - tm_old
        
        for f in hooks['loop_ePIE']:
            verbose(3, 'Calling loop-ePIE hooks')
            f(locals(), globals())

    for f in hooks['post_ePIE']:
        verbose(3, 'Calling post-ePIE hooks')
        f(locals(), globals())

    header = ''
    if parallel:
        header = 'Process # %d ' % prank

    msg = """\
    {header} Finished
    {header} Time elapsed in loop: {0:6.2f} seconds
    {header} Time spent plotting: {1:6.2f} seconds""".format(loop_time, plot_time, header=header)

    verbose(1, msg, mpi=True)
    
    # Store important variables
    p.obj = obj
    p.probe = probe
    p.err = err
    p.history.append(('ePIE', numit, err))
    if hasattr(verbose, 'get_log'):
        p.verbose_log = verbose.get_log()
    # Save stuff
    if p.save and (not parallel or prank == 0):
        #filename = p.run_name + '_ePIE.h5'
        #save_run(filename, p.paramdict)
        save_run(save_file, p.paramdict)
        verbose(2, 'Saved reconstruction in file %s' % save_file)

    # Last plot (blocking!)
#    if p.last_plot and (not parallel or prank == 0):
#        pyplot.interactive(False)
#        plot_axes[0].imshow(np.abs(obj[0]))
#        plot_axes[1].imshow(np.angle(obj[0]))
#        plot_axes[2].imshow(U.imsave(probe[0, 0]))
#        plot_axes[3].plot(err)
#        pyplot.show()

    return p.paramdict

def ptycho_ML(pdict=None, **kwargs):
    """\
     Ptychography reconstruction based on optimization of the Gaussian log-likelihood
    """
    
    # Create the parameter holder
    p = U.Param(kwargs, pdict, default_parameters, store=(0, 1))
    verbose.set_level(p.verbose_level)

    ML_type = p.ML_type
    positions = p.positions
    asize = p.asize
    probe = p.probe.astype(CType)
    obj = p.object.astype(CType)
    assert (asize == np.array(probe.shape[2:])).all()
    Npts = p.Npts
    Ndata = p.Ndata
    Nmodes_probe = p.Nmodes_probe
    Nmodes_object = p.Nmodes_object
    Nmodes = p.Nmodes
    pos_subpix_portion = p.pos_subpix_portion

    save_file = p.save_file % 'ML'
    verbose(2, 'Reconstruction will be saved as %s' % save_file)
   
    num_probes = p.num_probes
    num_objects = p.num_objects

    subpix = p.subpix
    subpix_disp = p.subpix_disp
    subpix_disp_start = p.subpix_disp_start
    subpix_method = p.subpix_method
    subpix_start = p.subpix_start
    subpix_started = False
    subpix_disp_started = False
    
    if subpix_disp:
        fn = p.datainfo.keys()[0]
        subpix_displacements = io.h5read(fn)['displacements']
    
    do_subpix = None
    if subpix_method == 'fourier':
        do_subpix = subpix_fourier
    elif subpix_method == 'linear':
        do_subpix = subpix_linear
    else:
        raise RuntimeError('Unknown subpix method : %s' % str(subpix_method))
    
    ##################################################################################################################################
    ### -1- Load data
    ##################################################################################################################################

    if ML_type == 'Gauss':
        I, w = load_intens(p)
        data_in_node = [II is not None for II in I]
        max_power = max([II.sum() if II is not None else 0. for II in I])
        tot_power = sum([II.sum() if II is not None else 0. for II in I])
        tot_measpts = sum([(ww > 0).sum() for ww in w if ww is not None])
    elif ML_type == 'Poisson':
        I, w = load_intens(p)
        data_in_node = [II is not None for II in I]
        max_power = max([II.sum() if II is not None else 0. for II in I])
        tot_power = sum([II.sum() if II is not None else 0. for II in I])
        tot_measpts = sum([(ww > 0).sum() for ww in w if ww is not None])
        from scipy import special
        LLbase = [(special.gammaln(II + 1)).sum() if II is not None else None for II in I]
        fmask = [(ww > 0) if ww is not None else None for ww in w]
        I_sum = [II.sum() if II is not None else None for II in I]
    elif ML_type == 'Euclid':
        fmag, fmask = load_fmag(p)
        data_in_node = [ff is not None for ff in fmag]
        max_power = max([(ff ** 2).sum() if ff is not None else 0. for ff in fmag])
        tot_power = sum([(ff ** 2).sum() if ff is not None else 0. for ff in fmag])
        tot_measpts = sum([fm.sum() for fm in fmask if fm is not None])
        fmag_sum = [fm.sum() if fm is not None else None for fm in fmag]

    if parallel:
        max_power = np.array([max_power])
        tot_power = np.array([tot_power])
        tot_measpts = np.array([tot_measpts])
        comm.Allreduce(MPI.IN_PLACE, max_power, op=MPI.MAX)
        comm.Allreduce(MPI.IN_PLACE, tot_power)
        comm.Allreduce(MPI.IN_PLACE, tot_measpts)
        max_power = max_power.item()
        tot_power = tot_power.item()
        tot_measpts = tot_measpts.item()

    ##################################################################################################################################
    ### -2- Shift diffraction patterns
    ##################################################################################################################################
    if p.dp_shift is True:
        verbose(2, 'dp_shift is True: automated shifts of the diffraction patterns will be used.')
        # Compute the average of every scan's diffraction patterns
        dp_average = np.zeros((p.Nscan,) + tuple(asize), FType)
        fmask_all = np.zeros((p.Nscan,) + tuple(asize), FType)
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            if ML_type in ['Gauss', 'Poisson']:
                if data_in_node[lstart]:
                    fmask_all[iscan] = (w[lstart] > 0)
                for jj in range(lstart, lend):
                    if data_in_node[jj]:
                        dp_average[iscan] += I[jj]
            elif ML_type == 'Euclid':
                if data_in_node[lstart]:
                    fmask_all[iscan] = fmask[lstart]
                for jj in range(lstart, lend):
                    if data_in_node[jj]:
                        dp_average[iscan] += fmag[jj] ** 2
                
        if parallel:
            comm.Allreduce(MPI.IN_PLACE, dp_average)
            comm.Allreduce(MPI.IN_PLACE, fmask_all)
        dp_average /= p.Npts_scan
        fmask_all_sum = fmask_all.sum(axis=0)
        fmask_all_sum += (fmask_all_sum == 0)
        dp_global_average = (dp_average * fmask_all).sum(axis=0) / fmask_all_sum
        # Compute shift relative to global average
        dp_shift = []
        for ii in range(len(dp_average)):
            #r = U.shift_best(dp_global_average, dp_average[ii], fmask[ii])[1]
            r = U.shift_best(np.log(dp_global_average + 1), np.log(dp_average[ii] + 1), fmask_all[ii])[1]
            dp_shift.append(r)
        p.dp_shift = dp_shift
    elif p.dp_shift is not None:
        verbose(2, 'Using provided sub-pixel shifts for the diffraction patterns.')

    dp_shift_ramp = [None] * Npts
    if p.dp_shift:
        verbose(3, ''.join([p.scans[ii] + ' : ' + str(p.dp_shift[ii]) + '\n' for ii in range(len(p.scans))]))
        fx0, fx1 = U.fgrid(asize, 1. / asize)
        fx0 = np.fft.fftshift(fx0)
        fx1 = np.fft.fftshift(fx1)
        dp_shift_ramp_per_scan = [np.exp(2j * np.pi * (fx0 * x0 + fx1 * x1)).astype(CType) for x0, x1 in p.dp_shift]
        dp_shift_ramp = [None] * Npts
        for iscan, scan in enumerate(p.scans):
            lstart, lend = p.scan_info[scan][1:]
            for ll in range(lstart, lend):
                if data_in_node[ll]:
                    dp_shift_ramp[ll] = dp_shift_ramp_per_scan[iscan]

    # Is this a continuing run?
    cont_run = False
    if p.hasattr('history'):
        cont_run = True
        probe = p.probe
        probe_amp = np.sqrt(num_probes * max_power)
        obj = p.obj
    else:
        p.history = []
        probe = p.probe
        probe_amp = np.sqrt(num_probes * max_power)
        probe *= probe_amp / U.norm(probe)
        obj = p.object

    object_smooth_gradient = False
    if p.object_smooth_filter is not None:
        object_smooth_gradient = True
        object_smooth_filter = p.object_smooth_filter

    # Metric correction parameter
    # scale_probe_object < 1 gives greater weight to the probe compared to the object.
    scale_probe_object = p.scale_probe_object
    
    float_intens = p.float_intens    
    if float_intens:
        float_intens_coeff = [None for i in range(len(I))]
        float_intens_coeff_kl = [None for i in range(len(I))]
        float_intens_coeff_kl0 = [None for i in range(len(I))]           

    # altmin = False turns off automatic rescaling of probe and object gradients
    # In principle, true should always work better, so this option should be removed
    # once that's confirmed.
    #altmin = True
    #altmin = False
    altmin = p.scale_precond

    # Number of iterations
    numit = p.numit

    # Interval to compute exact gradient instead of approximate
    quad_interval = p.quad_interval

    # Make local references of regular task parameters
    dump_data = p.dump_data
    if dump_data:
        dump_interval = p.dump_interval
        dump_filename = p.dump_pattern % {'run_name': p.run_name}
    dump_object = p.dump_object
    if dump_object:
        dump_object_interval = p.dump_object_interval
        dump_object_pattern = p.dump_object_pattern
    dump_probe = p.dump_probe
    if dump_probe:
        dump_probe_interval = p.dump_probe_interval
        dump_probe_pattern = p.dump_probe_pattern

    reg_del2 = p.reg_del2
    reg_del2_amplitude = p.reg_del2_amplitude
    reg_TV = p.reg_TV
    reg_TV_amplitude = p.reg_TV_amplitude
    reg_Huber = p.reg_Huber
    reg_Huber_amplitude = p.reg_Huber_amplitude
    reg_Huber_parameter = p.reg_Huber_parameter

    regularizer = None
    if reg_del2:
        obj_Npix = obj.size
        expected_obj_var = obj_Npix / tot_power  # Poisson
        reg_rescale = tot_measpts / (8. * obj_Npix * expected_obj_var) 
        verbose(2, 'Rescaling regularization amplitude using the Poisson distribution assumption.')
        verbose(2, 'Factor: %8.5g' % reg_rescale)
        reg_del2_amplitude *= reg_rescale
        regularizer = Regul_del2(amplitude=reg_del2_amplitude)
        R_list = []
        p.R_list = R_list

    ##################################################################################################################################
    ### -3- Probe support
    ##################################################################################################################################
    use_probe_support = False
    if p.probe_support is not False:
        probe_support = p.probe_support
        use_probe_support = True
        probe *= probe_support

    osh = obj.shape
    sh = probe.shape
    p.obj = obj
    p.probe = probe

    #in_node = [II is not None for II in I for mode in range(Nmodes)]

    # These are the arrays shaped like object
    grad_obj = np.zeros(osh, dtype=CType)
    new_grad_obj = np.zeros(osh, dtype=CType)
    h_obj = np.zeros(osh, dtype=CType)
    nrm_obj = np.zeros(osh, dtype=FType)
    obj_noramp = np.zeros(osh, dtype=CType)
 
    # These are the arrays shaped like probe
    grad_probe = np.zeros(sh, dtype=CType)
    new_grad_probe = np.zeros(sh, dtype=CType)
    h_probe = np.zeros(sh, dtype=CType)
    
    # List of object array coordinates used for slicing
    position_ranges = [(x[3], x[0], x[0] + sh[2], x[1], x[1] + sh[3]) for x in positions]

    ##################################################################################################################################
    ### -4- Set up object views (looping through object modes, probe modes and positions)
    ##################################################################################################################################
    nrm_obj_view = [nrm_obj[x[0], obm, x[1]:x[2], x[3]:x[4]] for x in position_ranges for prm in range(Nmodes_probe) for obm in range(Nmodes_object)]

    ##################################################################################################################################   
    # -5- Set up probe views (looping through object modes, probe modes and positions)
    # These are the same views as above, but re-ordered to make easier updates
    ##################################################################################################################################
    probe_mode_view = [[probe[x[2], prm] for prm in range(Nmodes_probe)] for x in positions]
    obj_mode_view = [[obj[x[0], obm, x[1]:x[2], x[3]:x[4]] for obm in range(Nmodes_object)] for x in position_ranges]
    new_grad_probe_mode_view = [[new_grad_probe[x[2], prm] for prm in range(Nmodes_probe)] for x in positions]
    new_grad_obj_mode_view = [[new_grad_obj[x[0], obm, x[1]:x[2], x[3]:x[4]] for obm in range(Nmodes_object)] for x in position_ranges]
    h_probe_mode_view = [[h_probe[x[2], prm] for prm in range(Nmodes_probe)] for x in positions]
    h_obj_mode_view = [[h_obj[x[0], obm, x[1]:x[2], x[3]:x[4]] for obm in range(Nmodes_object)] for x in position_ranges]

    ##################################################################################################################################
    # -6- subpixel shift
    ##################################################################################################################################
    if subpix or subpix_disp:
        subpix_shift_probe = []
        subpix_shift_linear = []
        if subpix_method.lower() == 'fourier':
            for x in positions:
                sp_shift = -np.array([x[4], x[5]])
                subpix_shift_probe.append(np.exp(2j * np.pi * np.sum(U.fgrid(asize, sp_shift / asize), axis=0)))
                subpix_shift_linear.append(-np.array([x[4], x[5]]))
        elif subpix_method.lower() == 'linear':
            for x in positions:
                subpix_shift_linear.append(-np.array([x[4], x[5]]))
                
    ##################################################################################################################################
    # -7- Incoherent background
    ##################################################################################################################################
    remove_incoherent_scattering = p.remove_incoherent_scattering
    if remove_incoherent_scattering:
        supp_radius2 = np.ceil(p.incoherent_scattering_mask_count / np.pi)
        Qinc_support = (U.fvec2(asize) < supp_radius2).astype('float')
        Qinc_amp = np.array([II.sum() if II is not None else 0. for II in I])
        Qinc_max = Qinc_amp.max()
        Qinc_amp /= Qinc_max
        Qinc = Qinc_support.astype(CType)
        Qinc *= probe_amp / np.sqrt(np.prod(asize))
        grad_Qinc = np.zeros(asize, dtype=CType)
        new_grad_Qinc = np.zeros(asize, dtype=CType)
        h_Qinc = np.zeros(asize, dtype=CType)
        Iinc = np.zeros(asize, dtype=FType)
        verbose(3, 'Initialized incoherent scattering removal')

    # List of flat object
    if p.flat_object:
        is_flat = [(x[3] == num_objects - 1) for x in positions] 

    probe_change_start = p.probe_change_start_ML
    
    average_probes = p.average_probes
    if average_probes:
        if num_probes == 1:
            average_probes = False # Nothing to do
        else:
            # Initialize arrays
            av_probe = probe.mean(axis=0)               # Probe average
            d_av_probe = np.zeros_like(probe)           # Deviation from probe average
            d_av_h_probe = np.zeros_like(probe)         # Correction to probe displacement
            average_probe_amp = p.average_probe_amp
            # Estimate the penalization amplitude normalization
            average_probe_normalization = .5 * np.prod(asize) * num_probes / (num_probes + 1)
            verbose(2, 'Using probe averaging (%d probes, amplitude term = %f)' % (num_probes, average_probe_amp))

    # Plotting
#    if (p.doplot or p.dump_plot) and (not parallel or prank==0):
#        plot_interval = p.plot_interval
#        from matplotlib import pyplot
#        if p.doplot:
#            pyplot.interactive(True)
#        plot_fig = pyplot.figure(1)
#        plot_fig.clf()
#        plot_fig.hold(False)
#        plot_axes = [plot_fig.add_subplot(2,2,i) for i in range(1,5)]
#        for pl in plot_axes: pl.hold(False)
#        plot_mask = p.get('plot_mask')
#        if plot_mask is None:
#            plot_mask = np.fft.fftshift(U.fvec2(osh[1:]) < .25*(max(osh[1:]) - max(sh[1:]))**2)
#        if p.dump_plot:
#            dump_plot_counter = 0

    if (p.doplot or p.dump_plot or p.last_plot) and (not parallel or prank == 0):
        plot_interval = p.plot_interval
        if p.doplot:
            from matplotlib import pyplot
            pyplot.interactive(True)
        plot_fig = Plotter(interactive=p.doplot)
        p.plot_mask = np.fft.fftshift(U.fvec2(osh[-2:]) < .25 * (max(osh[-2:]) - max(sh[-2:])) ** 2)
        if p.dump_plot:
            dump_plot_counter = 0

    a2 = np.prod(asize)
    if USE_FFTPACK:
        fnorm_fw = FType(1 / np.sqrt(a2))
        fnorm_bw = FType(np.sqrt(a2))
        fft = FFT.fft2
        ifft = FFT.ifft2
    else:
        fnorm_fw = FType(1 / np.sqrt(a2))
        fnorm_bw = FType(np.sqrt(a2))
        fft = FFT.fft2
        ifft = FFT.ifft2

    start = True
    LL_list = []
    p.LL_list = LL_list

    epsilon = 1e-6

    for f in hooks['pre_ML']:
        verbose(3, 'Calling pre-ML hooks')
        f(locals(), globals())
        
    eps = 0.1
    eps_x_plus = np.array([eps, 0])
    eps_y_plus = np.array([0, eps])
     
    displacements = [np.array([0.0, 0.0]) for i in range(Ndata)]   
    delta_r = []
    
    ##################################################################################################################################    
    # -8- MAIN LOOP
    ##################################################################################################################################    
    for itcg in range(numit):
        
        # Group all processes at this point
        if parallel:
            comm.Barrier()

        verbose(1, '%s, iteration # %d of %d' % (p.scans[0] if len(p.scans) == 1 else (p.scans[0] + ' - ' + p.scans[-1]), itcg, numit))
        verbose(1, time.asctime())

        quad_approx = (ML_type in ['Poisson', 'Euclid']) or (not quad_interval) or (not (itcg % quad_interval == 0))
        floating_intensity = float_intens #and (itcg >= floating_intensity_start)

        subpix_started = subpix and (itcg >= subpix_start)
        subpix_disp_started = subpix_disp and (itcg >= subpix_disp_start)

        # rescale probe and object
        probe_norm = U.norm(probe)
        probe *= probe_amp / probe_norm
        obj *= probe_norm / probe_amp

        if remove_incoherent_scattering:
            # also rescale the incoherent scattering field and compute the incoherent intensities
            Qinc *= probe_amp / probe_norm
            fQinc = fnorm_fw * fft(Qinc)
            Iinc = U.abs2(fQinc)

        # Initialization
        new_grad_obj.fill(0.)
        new_grad_probe.fill(0.)
        nrm_obj.fill(0.)
        LL = np.array([0.])
        
        LL_x_plus = np.array([0.])
        LL_x_minus = np.array([0.])
        LL_y_plus = np.array([0.])
        LL_y_minus = np.array([0.])              
        

        if remove_incoherent_scattering:
            new_grad_Qinc.fill(0.)
            fgrad_Qinc = np.zeros(asize, dtype=CType)

        ##################################################################################################################################
        # -9- Gradient computation - Gauss
        ##################################################################################################################################
        if ML_type == 'Gauss':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
                    
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                new_grad_probe_mode = new_grad_probe_mode_view[i]
                new_grad_obj_mode = new_grad_obj_mode_view[i]
                
                if subpix_started:                    
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]
                
                H = np.array([[0., 0.], [0., 0.]])
                grad_L = np.array([[0.], [0.]])
                gamma = 0.5
                
                np.set_printoptions(precision=3)
                
                if subpix_disp_started:
                    for l in [1, 2]:
                        steps = 1.0
                        x = np.arange(-steps , steps + 1.0 , 1.0)
                        y = np.arange(-steps , steps + 1.0 , 1.0)                        
                        X, Y = np.meshgrid(x, y)
                        Xnew, Ynew = np.meshgrid(x, y)
                        Z = np.zeros_like(X)
                
                        for k in (x + steps).tolist():
                            for l in (y + steps).tolist():
            
                                sp_shift = np.array([0, 0])
                                if subpix_method.lower() == 'fourier':                        
                                    sp_shift = subpix_shift_linear[i] + displacements[i] + eps_x_plus * X[0, k] + eps_y_plus * Y[l, 0]
                                    shift_xy = np.exp(2j * np.pi * np.sum(U.fgrid(asize, sp_shift / asize), axis=0))
                                elif subpix_method.lower() == 'linear': 
                                    sp_shift = subpix_shift_linear[i] + displacements[i] + eps_x_plus * X[0, k] + eps_y_plus * Y[l, 0]                      
                                    shift_xy = sp_shift                                   
                                
                                Xnew[l, k] = sp_shift[0]
                                Ynew[l, k] = sp_shift[1]
                                probe_mode_kl = [do_subpix(pr, shift_xy) for pr in probe_mode]
                                if dp_shift_ramp[i] is None:
                                    fpsij_kl = [fnorm_fw * fft(pr * ob) for pr in probe_mode_kl for ob in obj_mode]
                                else:
                                    fpsij_kl = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode_kl for ob in obj_mode]
                                Imodel_kl = sum(U.abs2(fi) for fi in fpsij_kl)
                                if remove_incoherent_scattering:
                                    Imodel_kl += Qinc_amp[i] * Iinc
                                if floating_intensity:
                                    float_intens_coeff_kl[i] = (w[i] * Imodel_kl * I[i]).sum() / (w[i] * Imodel_kl ** 2).sum()
                                    Imodel_kl *= float_intens_coeff_kl[i] 
                                DI_kl = Imodel_kl - I[i]    
                                LLj_kl = (w[i] * DI_kl ** 2).sum().item() 
                                Z[l, k] = LLj_kl      
                     
                        H[0, 1] = H[1, 0] = (Z[steps + 1, steps + 1] - Z[steps + 1, steps - 1] - Z[steps - 1, steps + 1] + Z[steps - 1, steps - 1]) / (4 * eps * eps)
                        H[0, 0] = (Z[steps, steps + 1] - 2 * Z[steps, steps] + Z[steps, steps - 1]) / (eps * eps)
                        H[1, 1] = (Z[steps + 1, steps ] - 2 * Z[steps, steps] + Z[steps - 1, steps]) / (eps * eps)
                        
                        grad_L[0, 0] = (Z[steps, steps + 1] - Z[steps, steps - 1]) / (2 * eps)
                        grad_L[1, 0] = (Z[steps + 1, steps] - Z[steps - 1 , steps ]) / (2 * eps)         
           
                        Hinv = LA.inv(H)
                        e, v = LA.eig(Hinv)                       
                
                        if i == 1: 
                            verbose(1, '---------------------------H---------------------------------------------------------------------------------------')
                            print H               
                            #verbose(1, '--------------------------------------------------------------------------------------------------------------------')              
                            verbose(1, 'Hinv positive definite: ' + str(e.min() > 0))      
                            verbose(1, '---------------------------Hinv---------------------------------------------------------------------------------------')
                            print Hinv               
                            #verbose(1, '--------------------------------------------------------------------------------------------------------------------') 
                            verbose(1, '---------------------------grad_L---------------------------------------------------------------------------------------')
                            print grad_L               
                            #verbose(1, '--------------------------------------------------------------------------------------------------------------------')        
                            steps0 = 5.0
                            x0 = np.arange(-steps0 , steps0 + 1.0 , 1.0)
                            y0 = np.arange(-steps0 , steps0 + 1.0 , 1.0)                        
                            X0, Y0 = np.meshgrid(x0, y0)
                            Xnew0, Ynew0 = np.meshgrid(x0, y0)
                            Z0 = np.zeros_like(X0)
                            for k in (x0 + steps0).tolist():
                                for l in (y0 + steps0).tolist():
                                    sp_shift = np.array([0, 0])
                                    
                                    if subpix_method.lower() == 'fourier':                        
                                        sp_shift = subpix_shift_linear[i] + displacements[i] + eps_x_plus * X0[0, k] + eps_y_plus * Y0[l, 0]
                                        shift_xy = np.exp(2j * np.pi * np.sum(U.fgrid(asize, sp_shift / asize), axis=0))
                                    elif subpix_method.lower() == 'linear': 
                                        sp_shift = subpix_shift_linear[i] + displacements[i] + eps_x_plus * X0[0, k] + eps_y_plus * Y0[l, 0]                      
                                        shift_xy = sp_shift  
                                    
                                    Xnew0[l, k] = sp_shift[0]
                                    Ynew0[l, k] = sp_shift[1]
                                    probe_mode_kl0 = [do_subpix(pr, shift_xy) for pr in probe_mode]
                                    if dp_shift_ramp[i] is None:
                                        fpsij_kl0 = [fnorm_fw * fft(pr * ob) for pr in probe_mode_kl0 for ob in obj_mode]
                                    else:
                                        fpsij_kl0 = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode_kl0 for ob in obj_mode]
                                    Imodel_kl0 = sum(U.abs2(fi) for fi in fpsij_kl0)
                                    if remove_incoherent_scattering:
                                        Imodel_kl0 += Qinc_amp[i] * Iinc
                                    if floating_intensity:
                                        float_intens_coeff_kl0[i] = (w[i] * Imodel_kl0 * I[i]).sum() / (w[i] * Imodel_kl0 ** 2).sum()
                                        Imodel_kl0 *= float_intens_coeff_kl0[i] 
                                    DI_kl0 = Imodel_kl0 - I[i] 
                                    LLj_kl0 = (w[i] * DI_kl0 ** 2).sum().item() 
                                    Z0[l, k] = LLj_kl0                          
                       
                            fig = plt.figure()
                            ax = fig.gca(projection='3d')
                            surf = ax.plot_surface(Xnew0, Ynew0, Z0, rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False)
                            ax.set_zlim(Z0.min() - 1, Z0.max() + 1) 
                            ax.view_init(elev=41, azim= -24)
                            ax.zaxis.set_major_locator(LinearLocator(10))
                            ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
                            
                            fig.colorbar(surf, shrink=0.5, aspect=5)                
                            plt.savefig('surface_' + str(itcg) + '.png')    
            
                            #x1 = [p2[0] + p_sub[0] + d[0] for p2, p_sub, d in izip(positions, pos_subpix_portion, displacements)]
                            #y1 = [p2[1] + p_sub[1] + d[1] for p2, p_sub, d in izip(positions, pos_subpix_portion, displacements)]
                            #x2 = [p2[0] + p_sub[0] + d[0] for p2, p_sub, d in izip(positions, pos_subpix_portion, subpix_displacements)]
                            #y2 = [p2[1] + p_sub[1] + d[1] for p2, p_sub, d in izip(positions, pos_subpix_portion, subpix_displacements)]
                                                        
                            x1 = [p_sub[4] + d[0] for  p_sub, d in izip(positions, displacements)]
                            y1 = [p_sub[5] + d[1] for  p_sub, d in izip(positions, displacements)]
                            x2 = [p_sub[4] + d[0] for  p_sub, d in izip(positions, subpix_displacements)]
                            y2 = [p_sub[5] + d[1] for  p_sub, d in izip(positions, subpix_displacements)]
                             
                            xaxis = range(1, Ndata + 1)
                            fig1 = plt.figure()
                            #plt.plot(x1, y1, 'r+', label='estimate')
                            #plt.plot(x2, y2, 'b+', label='real')
                            plt.plot(xaxis, x1, 'ro')
                            plt.plot(xaxis, x2, 'bo')
                            plt.plot(xaxis, y1, 'r*')
                            plt.plot(xaxis, y2, 'b*')
                            #plt.xlim(-100.0, 800.0)
                            #plt.ylim(-100.0, 800.0)
                            #plt.legend()
                            
                            plt.savefig('displacements_' + str(itcg) + '.png', dpi=(100))      
                              
                        p0 = np.dot(Hinv, grad_L)
                        #p = p * gamma          
                        if abs(p0[0, 0]) > 2.0: p0[0, 0] = 2.0 * cmp(p0[0, 0], 0)
                        if abs(p0[1, 0]) > 2.0: p0[1, 0] = 2.0 * cmp(p0[1, 0], 0)
                        
                        if i == 1:
                            verbose(1, '---------------------------p---------------------------------------------------------------------------------------')
                            print p0               
                            verbose(1, '--------------------------------------------------------------------------------------------------------------------')
    
                            
                        displacements[i] = displacements[i] - np.array([p0[0, 0], p0[1, 0]])   
                        
                    verbose(1, 'N=%d: estimate:(%f -> %f,%f -> %f) real(%f,%f)' % (i, displacements[i][0], displacements[i][0] + p0[0, 0], displacements[i][1], displacements[i][1] + p0[1, 0], subpix_displacements[i][0], subpix_displacements[i][1]))
             
                probe_mode = probe_mode_view[i]
                if subpix_started:                    
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]
                if subpix_disp_started:    
                    if subpix_method.lower() == 'fourier':                        
                        sp_shift = subpix_shift_linear[i] + displacements[i] 
                        shift_xy = np.exp(2j * np.pi * np.sum(U.fgrid(asize, sp_shift / asize), axis=0))
                    elif subpix_method.lower() == 'linear': 
                        sp_shift = subpix_shift_probe[i] + displacements[i]                     
                        shift_xy = sp_shift                  
                    probe_mode = [do_subpix(pr, shift_xy) for pr in probe_mode]  
                                       
                if dp_shift_ramp[i] is None:
                    fpsij = [fnorm_fw * fft(pr * ob) for pr in probe_mode for ob in obj_mode]
                else:
                    fpsij = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                Imodel = sum(U.abs2(fi) for fi in fpsij)
                if remove_incoherent_scattering:
                    Imodel += Qinc_amp[i] * Iinc                  
                if floating_intensity:                   
                    float_intens_coeff[i] = (w[i] * Imodel * I[i]).sum() / (w[i] * Imodel ** 2).sum()
                    Imodel *= float_intens_coeff[i]                     
                DI = Imodel - I[i]            

                if dp_shift_ramp[i] is None:
                    xsi_j = [fnorm_bw * ifft(w[i] * DI * fi) for fi in fpsij]
                else:
                    xsi_j = [dp_shift_ramp[i].conj() * fnorm_bw * ifft(w[i] * DI * fi) for fi in fpsij]

                #for xi, pr, ob, ng_pr, ng_ob in zip(xsi_j, probe_mode, obj_mode, new_grad_probe_mode, new_grad_obj_mode):
                po_list = [(pr, ng_pr, ob, ng_ob) for pr, ng_pr in izip(probe_mode, new_grad_probe_mode) for ob, ng_ob in izip(obj_mode, new_grad_obj_mode)] 
                for xi, po in izip(xsi_j, po_list):
                    pr, ng_pr, ob, ng_ob = po
                    if not (p.flat_object and is_flat[i]):
                        ng_ob += 2. * xi * pr.conj()
                    if subpix_started:
                        ng_pr += 2. * do_subpix(xi * ob.conj(), subpix_shift_probe[i], forward=False) 
                    else:
                        ng_pr += 2. * xi * ob.conj()
                    nrm_obj_view[i] += U.abs2(pr)
                    
                if remove_incoherent_scattering:
                    fgrad_Qinc += fQinc * Qinc_amp[i] * w[i] * DI
                
                LLj = (w[i] * DI ** 2).sum() 
                
                LL += LLj                 
                
        ##################################################################################################################################
        # -10- Gradient computation - Poisson
        ##################################################################################################################################                
        elif ML_type == 'Poisson':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                new_grad_probe_mode = new_grad_probe_mode_view[i]
                new_grad_obj_mode = new_grad_obj_mode_view[i]

                if subpix_started:
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fnorm_fw * fft(pr * ob) for pr in probe_mode for ob in obj_mode]
                else:
                    fpsij = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                Imodel = sum(U.abs2(fi) for fi in fpsij) + epsilon
                if remove_incoherent_scattering:
                    Imodel += Qinc_amp[i] * Iinc
                if floating_intensity:
                    float_intens_coeff[i] = I_sum[i] / Imodel.sum()
                    Imodel *= float_intens_coeff[i] 
                DI = fmask[i] * (1 - I[i] / Imodel)
                if dp_shift_ramp[i] is None:
                    xsi_j = [fnorm_bw * ifft(DI * fi) for fi in fpsij]
                else:
                    xsi_j = [dp_shift_ramp[i].conj() * fnorm_bw * ifft(DI * fi) for fi in fpsij]
                #for xi, pr, ob, ng_pr, ng_ob in zip(xsi_j, probe_mode, obj_mode, new_grad_probe_mode, new_grad_obj_mode):
                po_list = [(pr, ng_pr, ob, ng_ob) for pr, ng_pr in izip(probe_mode, new_grad_probe_mode) for ob, ng_ob in izip(obj_mode, new_grad_obj_mode)] 
                for xi, po in izip(xsi_j, po_list):
                    pr, ng_pr, ob, ng_ob = po
                    if not (p.flat_object and is_flat[i]):
                        ng_ob += 2. * xi * pr.conj()
                    if subpix_started:
                        ng_pr += 2. * do_subpix(xi * ob.conj(), subpix_shift_probe[i], forward=False) 
                    else:
                        ng_pr += 2. * xi * ob.conj()
                    nrm_obj_view[i] += U.abs2(pr)
                if remove_incoherent_scattering:
                    fgrad_Qinc += fQinc * Qinc_amp[i] * DI
                LL += LLbase[i] + (fmask[i] * (Imodel - I[i] * np.log(Imodel))).sum()
        ##################################################################################################################################
        # -11- Gradient computation - Euclid
        ##################################################################################################################################
        elif ML_type == 'Euclid':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                new_grad_probe_mode = new_grad_probe_mode_view[i]
                new_grad_obj_mode = new_grad_obj_mode_view[i]

                if subpix_started:
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fnorm_fw * fft(pr * ob) for pr in probe_mode for ob in obj_mode]
                else:
                    fpsij = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                fmodel = abs(fpsij)
                if floating_intensity:
                    float_intens_coeff[i] = fmag_sum[i] / fmodel.sum()
                    fmodel *= float_intens_coeff[i] 
                DI = fmask[i] * (1 - fmag[i] / fmodel)
                if dp_shift_ramp[i] is None:
                    xsi_j = fnorm_bw * ifft(DI * fpsij)
                else:
                    xsi_j = dp_shift_ramp[i].conj() * fnorm_bw * ifft(DI * fpsij)
                #for xi, pr, ob, ng_pr, ng_ob in zip(xsi_j, probe_mode, obj_mode, new_grad_probe_mode, new_grad_obj_mode):
                po_list = [(pr, ng_pr, ob, ng_ob) for pr, ng_pr in izip(probe_mode, new_grad_probe_mode) for ob, ng_ob in izip(obj_mode, new_grad_obj_mode)] 
                for xi, po in izip(xsi_j, po_list):
                    pr, ng_pr, ob, ng_ob = po
                    if not (p.flat_object and is_flat[i]):
                        ng_ob += 2. * xi * pr.conj()
                    if subpix_started:
                        ng_pr += 2. * do_subpix(xi * ob.conj(), subpix_shift_probe[i], forward=False) 
                    else:
                        ng_pr += 2. * xi * ob.conj()
                    nrm_obj_view[i] += U.abs2(pr)
                LL += (fmask[i] * (abs(fpsij) - fmag[i]) ** 2).sum()
                
        ##################################################################################################################################
        # -C- calculating delta_r
        ##################################################################################################################################             
        

        if parallel:
            comm.Allreduce(MPI.IN_PLACE, new_grad_obj)
            comm.Allreduce(MPI.IN_PLACE, new_grad_probe)
            comm.Allreduce(MPI.IN_PLACE, nrm_obj)
            comm.Allreduce(MPI.IN_PLACE, LL)
            if remove_incoherent_scattering:
                comm.Allreduce(MPI.IN_PLACE, fgrad_Qinc)

        if remove_incoherent_scattering:
            new_grad_Qinc = 2.*Qinc_support * fnorm_bw * ifft(fgrad_Qinc)
            
        ##################################################################################################################################
        # -12- Probe averaging
        ##################################################################################################################################
        if average_probes:
            if Nmodes > 1:
                raise RuntimeError('Probe averaging has not been modified to take into account multiple modes')
            # Compute new average and find  alignment parameters
            av_probe1 = np.zeros_like(probe[0])
            r1_shift = []
            r2_shift = []
            for ipr in range(num_probes):
                aprtmp, r, alpha = U.shift_best(av_probe, probe[ipr])
                r1_shift.append(r)
                # Only phase shifts are unconstrained, so divide with abs(alpha)
                av_probe1 += aprtmp / abs(alpha)
            av_probe = av_probe1 / num_probes
            
            # Add penalization term
            LLav = 0.
            r_shift = []
            for ipr in range(num_probes):
                pr_shift, r, alpha = U.shift_best(probe[ipr], av_probe)
                r2_shift.append(r)
                d_av_probe[ipr] = (probe[ipr] - pr_shift / abs(alpha))
                new_grad_probe[ipr] += average_probe_amp * average_probe_normalization * d_av_probe[ipr]
                #probe[ipr] = pr_alpha[ipr] * U.pshift(av_probe, pr_r[ipr])
                LLav += average_probe_amp * average_probe_normalization * U.norm2(d_av_probe[ipr])
            verbose(3, 'Log-likelihood contribution from probe averaging = %f' % LLav)
            LL += LLav

        if altmin:
            scale_p_o = scale_probe_object * U.norm2(new_grad_obj) / U.norm2(new_grad_probe)
            verbose(3, 'Scale P/O: %6.3g' % scale_p_o)
        else:
            scale_p_o = scale_probe_object

        if use_probe_support:
            new_grad_probe *= probe_support

        if reg_del2:
            new_grad_obj += regularizer.gradient(obj)

        ##################################################################################################################################
        # -13- regularisation - total variation
        ##################################################################################################################################
        if reg_TV:
            # Total variation regularization
            # Backward and forward discrete differences 
            obj_xf = U.delxf(obj, axis= -2)
            obj_yf = U.delxf(obj, axis= -1)
            obj_xb = U.delxb(obj, axis= -2)
            obj_yb = U.delxb(obj, axis= -1)

            # New gradient contribution
            epsilon = 1e-10
            abs_obj_xb = np.abs(obj_xb)
            abs_obj_yb = np.abs(obj_yb)
            abs_obj_xf = np.abs(obj_xf)
            abs_obj_yf = np.abs(obj_yf)

            abs_obj_xb[abs_obj_xb < epsilon] = epsilon
            abs_obj_yb[abs_obj_yb < epsilon] = epsilon
            abs_obj_xf[abs_obj_xf < epsilon] = epsilon
            abs_obj_yf[abs_obj_yf < epsilon] = epsilon

            new_grad_obj += reg_TV_amplitude * (obj_xb / abs_obj_xb + obj_yb / abs_obj_yb - obj_xf / abs_obj_xf - obj_yf / abs_obj_yf)
            #new_grad_obj += reg_TV_amplitude*(np.exp(1j*np.angle(obj_xb)) + np.exp(1j*np.angle(obj_yb)) - np.exp(1j*np.angle(obj_xf)) - np.exp(1j*np.angle(obj_yf)))

        ##################################################################################################################################
        # -14- regularisation - huber
        ##################################################################################################################################
        if reg_Huber:
            # Huber regularization
            # Backward and forward discrete differences 
            obj_xf = U.delxf(obj, axis= -2)
            obj_yf = U.delxf(obj, axis= -1)
            obj_xb = U.delxb(obj, axis= -2)
            obj_yb = U.delxb(obj, axis= -1)

            # New gradient contribution
            epsilon = 1e-6
            fp_xb = 1. / np.sqrt(reg_Huber_parameter + U.abs2(obj_xb))
            fp_yb = 1. / np.sqrt(reg_Huber_parameter + U.abs2(obj_yb))
            fp_xf = 1. / np.sqrt(reg_Huber_parameter + U.abs2(obj_xf))
            fp_yf = 1. / np.sqrt(reg_Huber_parameter + U.abs2(obj_yf))

            new_grad_obj += reg_Huber_amplitude * (obj_xb * fp_xb + obj_yb * fp_yb - obj_xf * fp_xf - obj_yf * fp_yf)

        ##################################################################################################################################
        # -15- smoothing precond
        ##################################################################################################################################
        if object_smooth_gradient:
            # Apply smoothing filter preconditioning
            for new_grad_obj_slice in new_grad_obj:
                for x in new_grad_obj_slice:
                    x[:] = object_smooth_filter(x)

        newLL = LL.item() / tot_measpts
        LL_list.append(newLL)

        if probe_change_start > itcg:
            scale_p_o = scale_probe_object
            new_grad_probe.fill(0.)

        if start:
            start = False
            bt = 0
        else:
            bt_num = scale_p_o * (U.norm2(new_grad_probe) - np.real(np.vdot(new_grad_probe.flat, grad_probe.flat))) + \
                                 (U.norm2(new_grad_obj) - np.real(np.vdot(new_grad_obj.flat, grad_obj.flat))) 
            bt_denom = scale_p_o * U.norm2(grad_probe) + U.norm2(grad_obj) 
            if remove_incoherent_scattering:
                bt_num += scale_p_o * (U.norm2(new_grad_Qinc) - np.real(np.vdot(new_grad_Qinc.flat, grad_Qinc.flat)))
                bt_denom += scale_p_o * U.norm2(grad_Qinc)
            bt = max(0, bt_num / bt_denom)

        #verbose(3,'Polak-Ribiere coefficient: %f ' % bt)

        grad_obj[:] = new_grad_obj
        grad_probe[:] = new_grad_probe
        if remove_incoherent_scattering:
            grad_Qinc[:] = new_grad_Qinc    

        ##################################################################################################################################
        # -16- next conjugate
        ##################################################################################################################################
        h_obj *= bt
        if object_smooth_gradient:
            for ii_h_obj in range(len(h_obj)):
                for jj_h_obj in range(len(h_obj[ii_h_obj])):
                    h_obj[ii_h_obj, jj_h_obj] -= object_smooth_filter(grad_obj[ii_h_obj, jj_h_obj])
        else:
            h_obj -= grad_obj
        h_probe *= bt
        h_probe -= scale_p_o * grad_probe
        if remove_incoherent_scattering:
            h_Qinc *= bt
            h_Qinc -= scale_p_o * grad_Qinc


        B = np.zeros((9,), dtype=FType)
        Brenorm = 1. / LL.item() ** 2
        if remove_incoherent_scattering:
            fh_Qinc = fnorm_fw * fft(h_Qinc)
            cfh_Qinc = fh_Qinc.conj()
            
        ##################################################################################################################################
        # -17- minimize - Gauss
        ##################################################################################################################################
        if ML_type == 'Gauss':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
    
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                h_probe_mode = h_probe_mode_view[i]
                h_obj_mode = h_obj_mode_view[i]

                if subpix_started:
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]
                    h_probe_mode = [do_subpix(h_pr, subpix_shift_probe[i]) for h_pr in h_probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fnorm_fw * fft(pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fnorm_fw * fft(pr * h_ob + h_pr * ob) for (pr, h_pr) in izip(probe_mode, h_probe_mode) for (ob, h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fnorm_fw * fft(h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
                else:
                    fpsij = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fnorm_fw * fft(dp_shift_ramp[i] * (pr * h_ob + h_pr * ob)) for (pr, h_pr) in izip(probe_mode, h_probe_mode) for (ob, h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fnorm_fw * fft(dp_shift_ramp[i] * h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
    
                A0 = sum(U.abs2(fi) for fi in fpsij)
                A1 = 2 * np.real(sum(fi * aji.conj() for (fi, aji) in izip(fpsij, aj)))
                A2 = 2 * np.real(sum(fi * bji.conj() for (fi, bji) in izip(fpsij, bj))) + sum(U.abs2(aji) for aji in aj)
    
                if p.remove_incoherent_scattering:
                    A0 += Qinc_amp[i] * Iinc
                    A1 += 2 * Qinc_amp[i] * np.real(fQinc * cfh_Qinc)
                    A2 += Qinc_amp[i] * U.abs2(fh_Qinc)

                if floating_intensity:
                    A0 *= float_intens_coeff[i]
                    A1 *= float_intens_coeff[i]
                    A2 *= float_intens_coeff[i]
                A0 -= I[i]
    
                if quad_approx:
                    B[0] += np.dot(w[i].flat, (A0 ** 2).flat) * Brenorm
                    B[1] += np.dot(w[i].flat, (2 * A0 * A1).flat) * Brenorm
                    B[2] += np.dot(w[i].flat, (A1 ** 2 + 2 * A0 * A2).flat) * Brenorm
                else:
                    A3 = 2 * np.real(sum(aji * bji.conj() for (aji, bji) in izip(aj, bj)))
                    A4 = sum(U.abs2(bji) for bji in bj)
                    if floating_intensity:
                        A3 *= float_intens_coeff[i]
                        A4 *= float_intens_coeff[i]
                    B[0] += np.dot(w[i].flat, (A0 ** 2).flat) * Brenorm
                    B[1] += np.dot(w[i].flat, (2 * A0 * A1).flat) * Brenorm
                    B[2] += np.dot(w[i].flat, (A1 ** 2 + 2 * A0 * A2).flat) * Brenorm
                    B[3] += np.dot(w[i].flat, (2 * A1 * A2 + 2 * A0 * A3).flat) * Brenorm
                    B[4] += np.dot(w[i].flat, (2 * A1 * A3 + 2 * A0 * A4 + A2 ** 2).flat) * Brenorm
                    B[5] += np.dot(w[i].flat, (2 * A2 * A3).flat) * Brenorm
                    B[6] += np.dot(w[i].flat, (2 * A2 * A4 + A3 ** 2).flat) * Brenorm
                    B[7] += np.dot(w[i].flat, (2 * A3 * A4).flat) * Brenorm
                    B[8] += np.dot(w[i].flat, (A4 ** 2).flat) * Brenorm
                    
        ##################################################################################################################################
        # -18- minimize - Poisson
        ##################################################################################################################################
        elif ML_type == 'Poisson':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                h_probe_mode = h_probe_mode_view[i]
                h_obj_mode = h_obj_mode_view[i]
    
                if subpix_started:
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]
                    h_probe_mode = [do_subpix(h_pr, subpix_shift_probe[i]) for h_pr in h_probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fnorm_fw * fft(pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fnorm_fw * fft(pr * h_ob + h_pr * ob) for (pr, h_pr) in izip(probe_mode, h_probe_mode) for (ob, h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fnorm_fw * fft(h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
                else:
                    fpsij = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fnorm_fw * fft(dp_shift_ramp[i] * (pr * h_ob + h_pr * ob)) for (pr, h_pr) in izip(probe_mode, h_probe_mode) for (ob, h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fnorm_fw * fft(dp_shift_ramp[i] * h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
    
                A0 = sum(U.abs2(fi) for fi in fpsij)
                A1 = 2 * np.real(sum(fi * aji.conj() for (fi, aji) in izip(fpsij, aj)))
                A2 = 2 * np.real(sum(fi * bji.conj() for (fi, bji) in izip(fpsij, bj))) + sum(U.abs2(aji) for aji in aj)
     
                if p.remove_incoherent_scattering:
                    A0 += Qinc_amp[i] * Iinc
                    A1 += 2 * Qinc_amp[i] * np.real(fQinc * cfh_Qinc)
                    A2 += Qinc_amp[i] * U.abs2(fh_Qinc)

                if floating_intensity:
                    A0 *= float_intens_coeff[i]
                    A1 *= float_intens_coeff[i]
                    A2 *= float_intens_coeff[i]

                DI = 1. - I[i] / A0
    
                B[1] += np.dot(fmask[i].flat, (A1 * DI).flat) * Brenorm
                B[2] += (np.dot(fmask[i].flat, (A2 * DI).flat) + .5 * np.dot(fmask[i].flat, (I[i] * (A1 / A0) ** 2).flat)) * Brenorm
                #B[1] += np.dot(A1.flat,DI.flat) * Brenorm
                #B[2] += (np.dot(A2.flat,DI.flat) + .5*np.dot(I[i].flat,((A1/A0)**2).flat)) * Brenorm

        ##################################################################################################################################
        # -19- minimize - Euklid
        ##################################################################################################################################
        elif ML_type == 'Euclid':
            for i in range(Ndata):
                if not data_in_node[i]:
                    continue
                probe_mode = probe_mode_view[i]
                obj_mode = obj_mode_view[i]
                h_probe_mode = h_probe_mode_view[i]
                h_obj_mode = h_obj_mode_view[i]
    
                if subpix_started:
                    probe_mode = [do_subpix(pr, subpix_shift_probe[i]) for pr in probe_mode]
                    h_probe_mode = [do_subpix(h_pr, subpix_shift_probe[i]) for h_pr in h_probe_mode]

                if dp_shift_ramp[i] is None:
                    fpsij = [fnorm_fw * fft(pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fnorm_fw * fft(pr * h_ob + h_pr * ob) for (pr, h_pr) in izip(probe_mode, h_probe_mode) for (ob, h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fnorm_fw * fft(h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
                else:
                    fpsij = [fnorm_fw * fft(dp_shift_ramp[i] * pr * ob) for pr in probe_mode for ob in obj_mode]
                    aj = [fnorm_fw * fft(dp_shift_ramp[i] * (pr * h_ob + h_pr * ob)) for (pr, h_pr) in izip(probe_mode, h_probe_mode) for (ob, h_ob) in izip(obj_mode, h_obj_mode)]
                    bj = [fnorm_fw * fft(dp_shift_ramp[i] * h_pr * h_ob) for h_pr in h_probe_mode for h_ob in h_obj_mode]
    
                A0 = U.abs2(fpsij)
                A1 = 2 * np.real(fpsij * aj.conj())
                A2 = 2 * np.real(fpsij * bj.conj()) + U.abs2(aj)
    
                if p.remove_incoherent_scattering:
                    A0 += Qinc_amp[i] * Iinc
                    A1 += 2 * Qinc_amp[i] * np.real(fQinc * cfh_Qinc)
                    A2 += Qinc_amp[i] * U.abs2(fh_Qinc)

                if floating_intensity:
                    A0 *= float_intens_coeff[i]
                    A1 *= float_intens_coeff[i]
                    A2 *= float_intens_coeff[i]

                DI = 1. - fmag[i] / np.sqrt(A0)
    
                B[1] += np.dot(fmask[i].flat, (A1 * DI).flat) * Brenorm
                B[2] += (np.dot(fmask[i].flat, (A2 * DI).flat) + .25 * np.dot(fmask[i].flat, (A1 ** 2 * fmag[i] / A0 ** (3 / 2)).flat)) * Brenorm
                #B[1] += np.dot(A1.flat,DI.flat) / Brenorm
                #B[2] += (np.dot(A2.flat,DI.flat) + .25*np.dot((A1**2).flat,(fmag[i]/A0**(3/2)).flat))/ Brenorm

        if parallel:
            comm.Allreduce(MPI.IN_PLACE, B)

        ##################################################################################################################################
        # -20- probe averaging & regul
        ##################################################################################################################################
        if average_probes:
            av_h_probe = np.zeros_like(av_probe)
            for ipr in range(num_probes):
                av_h_probe += U.pshift(h_probe[ipr], r1_shift[ipr])
            av_h_probe /= num_probes

            for ipr in range(num_probes):
                d_av_h_probe[ipr] = h_probe[ipr] - U.pshift(av_h_probe, r2_shift[ipr])

            B[0] += Brenorm * average_probe_amp * average_probe_normalization * U.norm2(d_av_probe)
            B[1] += Brenorm * average_probe_amp * average_probe_normalization * 2 * np.real(np.vdot(d_av_probe.flat, d_av_h_probe.flat))
            B[2] += Brenorm * average_probe_amp * average_probe_normalization * U.norm2(d_av_h_probe) 

        if reg_del2:
            # Quadratic distance regularization on the object (gaussian prior)
            c0, c1, c2 = regularizer.poly_line_coeffs(h_obj)
            B[0] += Brenorm * c0
            B[1] += Brenorm * c1
            B[2] += Brenorm * c2
            R_list.append(c0 / tot_measpts)

        if reg_TV:
            # Total variation regularization
            h_obj_xf = U.delxf(h_obj, axis= -2)
            h_obj_yf = U.delxf(h_obj, axis= -1)
            h_obj_xb = U.delxb(h_obj, axis= -2)
            h_obj_yb = U.delxb(h_obj, axis= -1)

            epsilon = 1e-10
            abs_obj_xf = np.abs(obj_xf)
            abs_obj_yf = np.abs(obj_yf)
            abs_obj_xb = np.abs(obj_xb)
            abs_obj_yb = np.abs(obj_yb)

            abs_obj_xb[abs_obj_xb < epsilon] = epsilon
            abs_obj_yb[abs_obj_yb < epsilon] = epsilon
            abs_obj_xf[abs_obj_xf < epsilon] = epsilon
            abs_obj_yf[abs_obj_yf < epsilon] = epsilon

            B[0] += Brenorm * reg_TV_amplitude * (abs_obj_xf.sum() + abs_obj_yf.sum() + abs_obj_xb.sum() + abs_obj_yb.sum())
            B[1] += Brenorm * reg_TV_amplitude * np.real(np.vdot((obj_xf / abs_obj_xf), h_obj_xf) + \
                                               np.vdot((obj_yf / abs_obj_yf), h_obj_yf) + \
                                               np.vdot((obj_xb / abs_obj_xb), h_obj_xb) + \
                                               np.vdot((obj_yb / abs_obj_yb), h_obj_yb)) 

        if reg_Huber:
            # Huber regularization
            h_obj_xf = U.delxf(h_obj, axis= -2)
            h_obj_yf = U.delxf(h_obj, axis= -1)
            h_obj_xb = U.delxb(h_obj, axis= -2)
            h_obj_yb = U.delxb(h_obj, axis= -1)

            f_obj_xf = np.sqrt(reg_Huber_parameter + U.abs2(obj_xf))
            f_obj_yf = np.sqrt(reg_Huber_parameter + U.abs2(obj_yf))
            f_obj_xb = np.sqrt(reg_Huber_parameter + U.abs2(obj_xb))
            f_obj_yb = np.sqrt(reg_Huber_parameter + U.abs2(obj_yb))

            B[0] += Brenorm * reg_Huber_amplitude * (f_obj_xf.sum() + f_obj_yf.sum() + f_obj_xb.sum() + f_obj_yb.sum())
            B[1] += Brenorm * reg_Huber_amplitude * np.real(np.vdot((obj_xf / f_obj_xf).flat, h_obj_xf.flat) + \
                                               np.vdot((obj_yf / f_obj_yf).flat, h_obj_yf.flat) + \
                                               np.vdot((obj_xb / f_obj_xb).flat, h_obj_xb.flat) + \
                                               np.vdot((obj_yb / f_obj_yb).flat, h_obj_yb.flat)) 
            B[2] += Brenorm * .5 * reg_Huber_amplitude * np.real(np.vdot((h_obj_xf / f_obj_xf).flat, h_obj_xf.flat) + \
                                                    np.vdot((h_obj_yf / f_obj_yf).flat, h_obj_yf.flat) + \
                                                    np.vdot((h_obj_xb / f_obj_xb).flat, h_obj_xb.flat) + \
                                                    np.vdot((h_obj_yb / f_obj_yb).flat, h_obj_yb.flat))
            B[2] -= Brenorm * .5 * reg_Huber_amplitude * ((np.real(obj_xf * h_obj_xf.conj() / f_obj_xf) ** 2 / f_obj_xf).sum() + \
                                              (np.real(obj_yf * h_obj_yf.conj() / f_obj_yf) ** 2 / f_obj_yf).sum() + \
                                              (np.real(obj_xb * h_obj_xb.conj() / f_obj_xb) ** 2 / f_obj_xb).sum() + \
                                              (np.real(obj_yb * h_obj_yb.conj() / f_obj_yb) ** 2 / f_obj_yb).sum())

        if np.isinf(B).any() or np.isnan(B).any():
            print 'Warning! inf or nan found! Trying to continue...'
            B[np.isinf(B)] = 0.
            B[np.isnan(B)] = 0.
        coeff = B.tolist()
        coeff.reverse()
        coeff_p = coeff[:-1] * np.array([8, 7, 6, 5, 4, 3, 2, 1])         # Polynomial coefficient of the first derivative
        coeff_pp = coeff_p[:-1] * np.array([7, 6, 5, 4, 3, 2, 1])         # Second derivative

        if quad_approx:
            tmin = -coeff_p[-1] / coeff_p[-2]
        else:
            t0 = np.roots(coeff_p)                        # Roots of first derivative
            t0p = np.real(t0[t0.imag == 0])                       # Only real roots should be considered
            pos_curv = (np.polyval(coeff_pp, t0p) > 0)    # Points for which curvature is positive
            t0p = t0p[pos_curv]
            id_abs_min = np.abs(t0p).argmin()		# 2010-11-08: Pick the root with the smallest absolut value.
            tmin = t0p[id_abs_min]		        # 2010-11-08: Pick the root with the smallest absolut value.
	#####removed 2010-11-08
        #id = np.polyval(coeff, t0p).argmin()     # Minimum among positive curvature points.
        #tmin = t0p[id]
	#####        

        if reg_del2:
            verbose(1, '%d - L=%g - R=%g (%3.2g%%)' % (itcg, LL_list[-1], R_list[-1], 100.*R_list[-1] / (LL_list[-1] + R_list[-1])))
        else:
            verbose(1, '%d - %g' % (itcg, LL_list[-1]))

        # Move to this point
        obj += tmin * h_obj
        probe += tmin * h_probe
        if remove_incoherent_scattering:
            Qinc += tmin * h_Qinc
        verbose(3, 'Object displacement: %8.3e' % (tmin * np.real(U.norm(h_obj))))
        verbose(3, 'Probe displacement: %8.3e' % (tmin * np.real(U.norm(h_probe))))
	
        # Apply periodic boundary conditions, added 2010-11-23, MD
        if p.use_periodic_boundary:
            verbose(2, 'Using periodic boundary conditions')
            left_stripe = obj[:, :, :asize[0]].copy();
            right_stripe = obj[:, :, -asize[0]:].copy();
            obj[:, :, -asize[0]:] = (right_stripe + left_stripe) / 2
            obj[:, :, :asize[0]] = (right_stripe + left_stripe) / 2

        if dump_object and (itcg % dump_object_interval == 0) and (not parallel or prank == 0):
            io.h5write(dump_object_pattern % itcg, obj=obj)            
        if dump_probe and (itcg % dump_probe_interval == 0) and (not parallel or prank == 0):
            io.h5write(dump_probe_pattern % itcg, probe=probe)            

        if (p.doplot or p.dump_plot) and (not parallel or prank == 0) and (itcg % p.plot_interval == 0):
            plot_fig.plot(p)
            if p.doplot:
                plot_fig.draw()
            if p.dump_plot:
                try:
                    dump_plot_file = p.dump_plot_patt % dump_plot_counter
                except TypeError:
                    dump_plot_file = p.dump_plot_patt
                ispdf = (os.path.splitext(dump_plot_file)[-1].lower() == '.pdf')
                if ispdf:
                    plot_fig.savefig(dump_plot_file, dpi=600)
                else:
                    plot_fig.savefig(dump_plot_file)
                dump_plot_counter += 1
        
        for f in hooks['loop_ML']:
            verbose(3, 'Calling loop-ML hooks')
            f(locals(), globals())
        

            
    ################
    ## -21- END MAIN LOOP
    ################------------------------------------------------------------------------------')
       

    
         
        # pos_estimates = [(sp_pr[0] + d[0], sp_pr[1] + d[1]) for sp_pr, d in izip(subpix_shift_probe, displacements)]
        one = sum([(-est_disp[0] + sp_disp[0]) ** 2 for sp_disp, est_disp in izip(subpix_displacements, displacements)]) / Ndata
        two = sum([(-est_disp[1] + sp_disp[1]) ** 2 for sp_disp, est_disp in izip(subpix_displacements, displacements)]) / Ndata
        three = (sum([-est_disp[0] + sp_disp[0] for sp_disp, est_disp in izip(subpix_displacements, displacements)]) / Ndata) ** 2
        four = (sum([-est_disp[1] + sp_disp[1] for sp_disp, est_disp in izip(subpix_displacements, displacements)]) / Ndata) ** 2
        del_r = one + two - three - four
        delta_r.append(del_r)
        verbose(3, '------------------------------------------------------------------------------------------------------------------------------------')
        #verbose(3, 'sum[(x-xhut)^2] = %f' % one)
        #verbose(3, 'sum[(y-yhut)^2] = %f' % two)
        #verbose(3, 'sum[(x-xhut)]^2 = %f' % three)
        #verbose(3, 'sum[(y-yhut)]^2 = %f' % four)
        verbose(3, 'Delta_r = %f' % del_r)
        verbose(3, '------------------------------------------------------------------------------------------------------------------------------------')
    
    verbose(3, '===============================FINAL DISPLACEMENTS==================================')
    map(lambda i, d: verbose(1, 'N=%d: (%.3f,%.3f)' % (i, d[0], d[1])), range(Ndata), displacements)
    verbose(3, '===============================FINAL DISPLACEMENTS==================================')
    
    ################
    ## -22- write delta_r to file
    ################    
    if subpix_disp:
        verbose(3, 'saving to csv')
        with open("%s_deviations.csv" % save_file, "w") as f:
            out = csv.writer(f, delimiter=',')
            map(lambda i, delta: out.writerow([i, delta]), range(numit), delta_r)

    for f in hooks['post_ML']:
        verbose(3, 'Calling post-ML hooks')
        f(locals(), globals())
	
    if parallel:
        header = 'Process # %d ' % prank
    else:
        header = ''
    verbose(1, header + 'Finished', mpi=True)
    

    p.obj = obj
    p.probe = probe
    p.LL_list = LL_list
    if hasattr(verbose, 'get_log'):
        p.verbose_log = verbose.get_log()
    p.history.append(('ML', numit, LL_list))
    #p.probe_view = probe_view
    #p.obj_view = obj_view

    if remove_incoherent_scattering:
        p.Qinc = Qinc
        p.Iinc = Iinc
        p.Qinc_support = Qinc_support

    # Save reconstruction    
    if p.save and (not parallel or prank == 0):
        #filename = p.run_name + '_ML.h5'
        #save_run(filename, p.paramdict)
        save_run(save_file, p.paramdict)
        verbose(2, 'Saved reconstruction in file %s' % save_file)

    if p.MPI_timing:
        fname = os.path.abspath(os.path.curdir) + '/Timing_proc_%03d.h5' % prank
        io.h5write(fname, timing=comm.timing)

    # Last plot (blocking!)
#    if p.last_plot and (not parallel or prank == 0):
#        pyplot.interactive(False)
#        plot_axes[0].imshow(np.abs(obj[0]))
#        plot_axes[1].imshow(np.angle(obj[0]))
#        plot_axes[2].imshow(U.imsave(probe[0]))
#        plot_axes[3].plot(LL_list)
#        pyplot.show()

    return p.paramdict

class Regul(object):
    """\
    Base class for regularizer.
    """
    def __init__(self, axes):
        self.g = None
        self.coeffs = None
        self.axes = axes
        
    def gradient(self, x, **kwargs):
        raise NotImplementedError
        
    def poly_line_coeffs(self, h, **kwargs):
        raise NotImplementedError
        
        
class Regul_del2(Regul):
    """\
    Regularizer - Gaussian prior
    """
    def __init__(self, amplitude, axes=[-2, -1]):
        Regul.__init__(self, axes)
        self.amplitude = amplitude
        self.delxy = None        
        
    def gradient(self, x):
        ax0, ax1 = self.axes
        del_xf = U.delxf(x, axis=ax0)
        del_yf = U.delxf(x, axis=ax1)
        del_xb = U.delxb(x, axis=ax0)
        del_yb = U.delxb(x, axis=ax1)

        self.delxy = [del_xf, del_yf, del_xb, del_yb]
        self.g = 2 * self.amplitude * (del_xb + del_yb - del_xf - del_yf)

        return self.g
        
    def poly_line_coeffs(self, h, x=None):
        ax0, ax1 = self.axes
        if x is None:
            del_xf, del_yf, del_xb, del_yb = self.delxy
        else:
            del_xf = U.delxf(x, axis=ax0)
            del_yf = U.delxf(x, axis=ax1)
            del_xb = U.delxb(x, axis=ax0)
            del_yb = U.delxb(x, axis=ax1)
            
        hdel_xf = U.delxf(h, axis=ax0)
        hdel_yf = U.delxf(h, axis=ax1)
        hdel_xb = U.delxb(h, axis=ax0)
        hdel_yb = U.delxb(h, axis=ax1)
        
        c0 = self.amplitude * (U.norm2(del_xf) + U.norm2(del_yf) + U.norm2(del_xb) + U.norm2(del_yb))
        c1 = 2 * self.amplitude * np.real(np.vdot(del_xf, hdel_xf) + np.vdot(del_yf, hdel_yf) + \
                                          np.vdot(del_xb, hdel_xb) + np.vdot(del_yb, hdel_yb))
        c2 = self.amplitude * (U.norm2(hdel_xf) + U.norm2(hdel_yf) + U.norm2(hdel_xb) + U.norm2(hdel_yb))
        
        self.coeff = [c0, c1, c2]
        return self.coeff


class Regul_TV(Regul):
    """\
    Regularizer - Exponential prior (Total variation)
    """
    def __init__(self, amplitude, axes=[-2, -1]):
        Regul.__init__(self, axes)
        self.amplitude = amplitude
        self.delxy = None        
        self.abs_delxy = None        
        
    def gradient(self, x):
        ax0, ax1 = self.axes
        del_xf = U.delxf(x, axis=ax0)
        del_yf = U.delxf(x, axis=ax1)
        del_xb = U.delxb(x, axis=ax0)
        del_yb = U.delxb(x, axis=ax1)

        epsilon = 1e-10
        abs_del_xb = np.abs(del_xb)
        abs_del_yb = np.abs(del_yb)
        abs_del_xf = np.abs(del_xf)
        abs_del_yf = np.abs(del_yf)

        abs_del_xb[abs_del_xb < epsilon] = epsilon
        abs_del_yb[abs_del_yb < epsilon] = epsilon
        abs_del_xf[abs_del_xf < epsilon] = epsilon
        abs_del_yf[abs_del_yf < epsilon] = epsilon

        self.delxy = [del_xf, del_yf, del_xb, del_yb]
        self.abs_delxy = [abs_del_xf, abs_del_yf, abs_del_xb, abs_del_yb]

        self.g = self.amplitude * (del_xb / abs_del_xb + del_yb / abs_del_yb - del_xf / abs_del_xf - del_yf / abs_del_yf)

        return self.g
        
    def poly_line_coeffs(self, h, x=None):
        ax0, ax1 = self.axes
        if x is None:
            del_xf, del_yf, del_xb, del_yb = self.delxy
            abs_del_xf, abs_del_yf, abs_del_xb, abs_del_yb = self.delxy
        else:
            del_xf = U.delxf(x, axis=ax0)
            del_yf = U.delxf(x, axis=ax1)
            del_xb = U.delxb(x, axis=ax0)
            del_yb = U.delxb(x, axis=ax1)

            epsilon = 1e-10
            abs_del_xb = np.abs(del_xb)
            abs_del_yb = np.abs(del_yb)
            abs_del_xf = np.abs(del_xf)
            abs_del_yf = np.abs(del_yf)
    
            abs_del_xb[abs_del_xb < epsilon] = epsilon
            abs_del_yb[abs_del_yb < epsilon] = epsilon
            abs_del_xf[abs_del_xf < epsilon] = epsilon
            abs_del_yf[abs_del_yf < epsilon] = epsilon
            
        hdel_xf = U.delxf(h, axis=ax0)
        hdel_yf = U.delxf(h, axis=ax1)
        hdel_xb = U.delxb(h, axis=ax0)
        hdel_yb = U.delxb(h, axis=ax1)
        
        c0 = self.amplitude * (abs_del_xf.sum() + abs_del_yf.sum() + abs_del_xb.sum() + abs_del_yb.sum())
        c1 = self.amplitude * np.real(np.vdot((del_xf / abs_del_xf), hdel_xf) + \
                                      np.vdot((del_yf / abs_del_yf), hdel_yf) + \
                                      np.vdot((del_xb / abs_del_xb), hdel_xb) + \
                                      np.vdot((del_yb / abs_del_yb), hdel_yb)) 
        self.coeff = [c0, c1, 0.]
        return self.coeff    


def ortho(modes):
    """\
    Orthogonalize the given list of modes.
    """
    N = len(modes)
    A = np.array([[np.vdot(p2, p1) for p1 in modes] for p2 in modes])
    e, v = np.linalg.eig(A)
    ei = (-e).argsort()
    nplist = [sum(modes[i] * v[i, j] for i in range(N)) for j in ei]
    amp = np.array([U.norm2(npi) for npi in nplist])
    amp /= amp.sum()
    return amp, nplist
 
def save_run(filename, p):
    """\
    Save the complete parameter dictionary including the reconstruction results.
    """
    io.h5write(filename, **p)
    
def load_run(filename):
    """\
    Return the complete dictionary from a previous run.
    """
    p = io.h5read(filename) 
    
    # update parallel info
    p['parallel'] = parallel
    p['psize'] = psize
    p['prank'] = prank 

    # Recompute how to divide the data with current MPI configuration      
    return _prepare_datainfo(p)
    

def load_intens(p=None, **kwargs):
    """\
    Load intensiti���߉D�b=RC�a
