""" Module with various useful functions that don't really belong in any
of the other modules."""

import cPickle as pickle
import os
import math
import sys
from pprint import pformat
from textwrap import wrap
if float(sys.version[:3]) < 2.4:
    from sets import ImmutableSet as set
import numpy as np
try:
    import tables
    no_tables = False
except ImportError:
    no_tables = True

Ckms = 299792.458         # speed of light km/s, exact
pi = math.pi

class Bunch(object):
    """Bunch class from the python cookbook with __str__ and __repr__
    methods. Similar to an IDL structure.

    >>> s = Bunch(a=1, b=2, c=['bar', 99])
    >>> s.a
    1
    >>> s.c
    ['bar', 99]
    >>> s
    Bunch(a, b, c)
    >>> print s
    Bunch(
    a = 1
    b = 2
    c = ['bar', 99])
    """
    def __init__(self,**kwargs):
        self.__dict__.update(kwargs)
    def __repr__(self):
        temp = ', '.join(sorted(str(attr) for attr in self.__dict__))
        return 'Bunch(%s)' % '\n      '.join(wrap(temp, width=69))
    def __str__(self):
        temp = ("%s = %s" % (attr,pformat(val)) for attr,val
                in self.__dict__.items())
        return 'Bunch(\n%s)' % '\n'.join(sorted(temp))


def poisson_noise(flux, sigma, seed=None):
    """ Adds poisson noise to a normalised flux array.

    sigma: One sigma error in the flux at the continuum level (where
    normalised flux=1).

    flux: Array of normalised flux values (i.e. flux values divided by
    the continuum).

    If `seed` is given, it is used to seed the random number
    generator.

    Returns:  flux with noise added, one sigma error array.

    Tests
    -----
    >>> fl = np.linspace(0,1)
    >>> fl0,er0 = poisson_noise(fl, 0.1, seed=114)
    >>> fl1,er1 = np.loadtxt('testdata/noisepoisson.txt.gz', unpack=1)
    >>> print np.allclose(fl0,fl1), np.allclose(er0,er1)
    True True
    """

    if seed is not None:  np.random.seed(seed)

    flux = np.asarray(flux)
    sigma = float(sigma)
    if np.any(flux < 0):  raise Exception('Flux values must be >= 0!')
    lamb = flux / (sigma * sigma)              # variance per pixel
    flnew = np.random.poisson(lamb)
    flnew = np.where(lamb > 0, flnew/lamb * flux, 0)
    sig = np.where(lamb > 0, flux/np.sqrt(lamb), 0)
    return flnew,sig

def addnoise(flux, sigma, minsig=None, seed=None):
    """ Add noise to a normalised flux array.

    Either gaussian, poisson, or a combination of both noise types is
    added to the flux array, depending on the keyword minsig.

    Parameters
    ----------
    flux: array_like
      Array of normalised flux values.

    sigma: float
      Total desired noise at the continuum (flux=1). Note the
      SNR = 1 / sigma.

    minsig: float, optional
      By default minsig is `None`, which means gaussian noise with
      standard deviation `sigma` is added to the flux. If minsig is
      given, a combination of poisson and gaussian noise is added to
      the flux to give an error of `sigma` at the continuum. In this
      case the gaussian noise component has st. dev. of `minsig`,
      which must be less than `sigma`.

    seed: int, optional
      If seed is given, it is used to seed the random number generator.
      By default the seed is not reset.

    Returns
    -------
    flux with noise added, one sigma error array.

    Tests
    -----
    >>> fl = np.linspace(0,1)
    >>> fl0,er0 = addnoise(fl, 0.2, seed=113)
    >>> fl1,er1 = np.loadtxt('testdata/noisegauss.txt.gz', unpack=1)
    >>> print np.allclose(fl0,fl1), np.allclose(er0,er1)
    True True
    >>> fl0,er0 = addnoise(fl, 0.2, minsig=0.05, seed=116)
    >>> fl1,er1 = np.loadtxt('testdata/noiseboth.txt.gz', unpack=1)
    >>> print np.allclose(fl0,fl1), np.allclose(er0,er1)
    True True
    """
    sigma = abs(float(sigma))

    if minsig is None:
        flux = np.asarray(flux)
        if seed is not None:  np.random.seed(seed)
        dev = sigma * np.random.randn(len(flux))
        er = np.empty_like(flux)
        er.fill(sigma)
        return flux + dev, er
    else:
        minsig = abs(float(minsig))
        if minsig > sigma:
            raise Exception('Noise at continuum must be bigger than minsig!')
        # gaussian variance
        var_g = minsig*minsig
        # normalised sigma of poisson noise at the continuum
        sig_p_cont = np.sqrt(sigma*sigma - var_g)
        flnew,sig_p = poisson_noise(flux, sig_p_cont, seed=seed)
        # gaussian error
        flnew += minsig * np.random.randn(len(flux))
        # total sigma
        er = np.sqrt(sig_p * sig_p + var_g)

        return flnew, er

def read_specwizardshort(filename, ion='h1', nspec=None,
                         s2n=50, minsig=None, fwhm=7.0, dv=3.0,
                         safe=11.0, sim='gimic', combine=False, seed=None,
                         mult_tau=None, meanfl=None, hdf5name=None):
    """ Reads spectra from a Specwizard short spectrum output file.

    Calculates flux, rebins and adds noise. Returns a Bunch object
    with the information about the spectra and the snapshot they were
    taken from.

    ion   = 'h1'     # Ion for which to read tau. One of [h1 c4 si4 o6 n5 ne8]
    nspec = None     # List giving numbers of spectra to read. e.g. range(1,11)
                     # None means read all spectra.
    s2n   = 50.0     # Flux / sqrt(variance)
    fwhm  = 7.0      # Fhwm of Gaussian instrumental spread in km/s
    dv    = 3.0      # Pixel width of output spectra in km/s
    safe  = 11.0     # Radius of safe GIMIC region in h**-1 Mpc
    sim   = 'gimic'  # one of 'gimic', 'owls'
    combine = False  # if True, combine all the spectra into one long spectrum
    seed  = None     # if an integer, use this to seed the RNG for noise
    mult_tau: float (None)
        Multiply the tau values by this (e.g. 1.2). Ignored if meanfl
        is not None.
    hdf5name : str (None)
        If this is a string, write spectra to an hdf5 file with this
        filename.
    meanfl: float (None)
        If a float, multiply the optical depth of all spectra by a
        single value such that their total mean flux matches meanfl.
    """
    if no_tables:
        raise Exception("PyTables is not installed, so this function "
                        "isn't available")
    sim = sim.lower()
    assert sim in ('gimic','owls')
    if seed is not None:
        np.random.seed(seed)

    wrest = dict(h1  = 1215.6701,       # rest wavelength in Angstroms
                 c4  = 1548.2041,       # wavelengths from vpfit's atom.dat
                 si4 = 1393.76018,      # (strongest transition for doublets)
                 o6  = 1031.9261,
                 n5  = 1238.821,
                 ne8 = 770.409)

    oscdoub = dict(c4 = 0.094750 / 0.189900, # ratio of weaker to stronger
                   si4 = 0.254 / 0.513,      # doublet oscillator strength
                   o6  = 0.06580 / 0.13250)

    wdoub = dict(c4 = 1550.77845,          # wavelength of weaker
                 si4 = 1402.7729,          # doublet transition
                 o6 = 1037.6167)

    print 'Opening file %s' % filename
    fh = tables.openFile(filename)
    redshift = fh.getNodeAttr('/Header','Redshift')

    vh = fh.getNode('/VHubble_KMpS').read()
    Om = fh.getNodeAttr('/Header','Omega0')
    Ol = fh.getNodeAttr('/Header','OmegaLambda')
    if nspec is None:
        nsp = fh.getNodeAttr('/Parameters/SpecWizardRuntimeParameters',
                             'NumberOfSpectra')
        nspec = range(1, nsp + 1)
    else:
        try:
            nspec = map(int, nspec)
        except TypeError:
            nspec = range(1, int(nspec) + 1)

    if len(nspec) < 20:
        print 'Reading spectrum numbers %s' % nspec
    else:
        print ('Reading %i spectra, first and last numbers %i, %i' %
               (len(nspec),min(nspec),max(nspec)))

    Hz = 100.0 * np.sqrt(Om*(1+redshift)**3 + Ol)
    vmid = vh[int(len(vh) / 2.)]
    if sim == 'gimic':
        safedv = Hz * safe / (1+redshift)
        i0,i1 = vh.searchsorted([vmid - 2.0*safedv, vmid + 2.0*safedv])
        vhshort = vh[i0:i1]
        j0,j1 = vhshort.searchsorted([vmid - safedv, vmid + safedv])
    else:
        safedv = max(vh)/2
        i0,i1 = 0, len(vh)
        j0,j1 = 0, len(vh)

    sigma = 1. / s2n
    #minsig = sigma / 5.    # if we want gaussian + poisson noise
    old_vwidth = vh[1] - vh[0]        # assumes constant dv!

    if combine and sim == 'owls':
        # combine all short spectra into one long spectrum (most
        # suitable for owls spectra)
        alltau = []
        for i,n in enumerate(nspec):
            # find the peak HI flux value, then cycle each spectrum
            # such that the peak value is at either end
            tau = fh.getNode('/Spectrum%s/%s/OpticalDepth' % (n,ion)).read()
            tauhi = fh.getNode('/Spectrum%s/h1/OpticalDepth' % (n)).read()
            imax = tau[tau!=0].argmin()
            tau1 = np.concatenate([tau[imax:], tau[:imax]])
            if mult_tau is not None:
                tau1 *= mult_tau
            alltau.extend(tau1)

        alltau = np.array(alltau)
        vshort = np.arange(0, len(alltau), 1) * old_vwidth
        flux0 = np.exp(-alltau)
        flux1 = convolve_psf(flux0, fwhm/old_vwidth)
        nvel,flux2 = rebin(vshort, flux1, dv)
        flux3,er = addnoise(flux2, sigma)
        spectra = Bunch(fl=flux3,er=er,co=np.ones(len(flux3)))
        if ion in wdoub:
            # add tau from weaker ion
            vdoub   = vshort + (wdoub[ion] - wrest[ion])/wrest[ion]*Ckms
            taudoub = alltau * oscdoub[ion]
            alltau  +=  np.interp(vshort, vdoub, taudoub)
            flux0   = np.exp(-alltau)
            flux1   = convolve_psf(flux0, fwhm/old_vwidth)
            nvel,flux2 = rebin(vshort, flux1, dv)
            flux3,er = addnoise(flux2, sigma)
            spectra_bothtrans = Bunch(fl=flux3,er=er,co=np.ones(len(flux3)))
    else:
        alltau = []
        for n in nspec:
            tau = fh.getNode('/Spectrum%s/%s/OpticalDepth' % (n,ion)).read()
            alltau.append(tau[i0:i1])

        if meanfl is not None:
            # bisection on log10(multiplier) to find tau multiplier
            # that gives the desired mean flux.
            hi = 10
            lo = -10
            while True:
                multhi = 10**hi
                multlo = 10**lo
                mfl0 = np.mean([np.exp(-t[j0:j1] * multhi) for t in alltau])
                mfl1 = np.mean([np.exp(-t[j0:j1] * multlo) for t in alltau])
                mid = lo + 0.5 * (hi - lo)
                multmid = 10**mid
                mfl = np.mean([np.exp(-t[j0:j1] * multmid) for t in alltau])
                #print mfl
                if abs(mfl - meanfl) < 1e-5:  break
                elif mfl > meanfl:  lo = mid
                else:  hi = mid
            mult_tau = multmid

        spectra = []
        for i,n in enumerate(nspec):
            if not (i+1) % 10:  print i+1
            if mult_tau is not None:
                flux0 = np.exp(-alltau[i] * mult_tau)
            else:
                flux0 = np.exp(-alltau[i])
            flux1 = convolve_psf(flux0, fwhm/old_vwidth)
            nvel,flux2 = rebin(vhshort, flux1, dv)
            co = flux2.max() * np.ones_like(flux2)
            flux3,er = addnoise(flux2, sigma, minsig=minsig)
            xpos = fh.getNodeAttr('/Spectrum%s' % n, 'X-position')
            ypos = fh.getNodeAttr('/Spectrum%s' % n, 'Y-position')
            zpos = fh.getNodeAttr('/Spectrum%s' % n, 'Z-position')
            theta = fh.getNodeAttr('/Spectrum%s' % n, 'theta')
            phi = fh.getNodeAttr('/Spectrum%s' % n, 'phi')
            spectra.append(Bunch(fl=flux3, er=er, co=co, number=n, xpos=xpos,
                                  ypos=ypos, zpos=zpos, theta=theta, phi=phi))
    fh.close()
    # this now refers to bin centres - should add an offset to
    # compensate?
    wav = wrest[ion] * (1 + redshift) * (1 + (nvel-vmid)/Ckms)

    if sim == 'gimic':
        j0,j1 = nvel.searchsorted([vmid-safedv, vmid+safedv])
        k0,k1 = nvel.searchsorted([vmid-safedv*1.5, vmid+safedv*1.5])
    else:
        j0,j1 = 0,-1
        k0,k1 = 0,-1

    if hdf5name is not None:
        print 'Writing to %s' % hdf5name
        fh = tables.openFile(hdf5name, mode='w',
                             title='Spectra from %s' % filename)
        # write info about
        gspec = fh.createGroup('/', 'spectra', 'Spectra')
        fh.createArray(gspec, 'fl', np.array([s.fl for s in spectra]),
                       'Array of fluxes')
        fh.createArray(gspec, 'er', np.array([s.er for s in spectra]),
                       'Array of 1 sigma errors')
        fh.createArray(gspec, 'co', np.array([s.co for s in spectra]),
                       'Array of continua')
        fh.root._v_attrs.ion = ion
        fh.root._v_attrs.number_of_spectra = len(nspec)
        fh.root._v_attrs.signal_to_noise = s2n
        fh.root._v_attrs.instrumental_resolution_FWHM_kms = fwhm
        fh.root._v_attrs.pixel_size_kms = dv
        fh.root._v_attrs.min_safe_wavelength_Ang = wav[j0]
        fh.root._v_attrs.max_safe_wavelength_Ang = wav[j1]
        fh.root._v_attrs.min_safe_velocity_kms = vmid - safedv
        fh.root._v_attrs.max_safe_velocity_kms = vmid + safedv
        fh.createArray('/', 'velocity', nvel, 'Velocity in km/s')
        fh.createArray('/', 'wavelength', wav, 'Wavelength in Angstroms')
        fh.close()

    if combine:
        return Bunch(vel=nvel, z=redshift, spec=spectra,
                      spec_bothtrans=spectra_bothtrans)
    else:
        return Bunch(vel=nvel, z=redshift, wa=wav, vmid=vmid, spec=spectra,
                      safedv=safedv,
                      safewa=(wav[j0],wav[j1]), safefit=(wav[k0],wav[k1]))

def wmean(val, sig):
    """ Return the weighted mean and error.

    Uses inverse variances as weights.

    val: array with shape (N,)
      Array of values and

    sig: array with shape (N,)
      One sigma errors (sqrt(variance)) of the array values.

    Returns
    -------
    wmean, wsigma: floats
      The weighted mean and error on the weighted mean.

    Tests
    -----
    >>> val = np.concatenate([np.ones(100),np.ones(100)*2])
    >>> sig = range(1,201)
    >>> mean,err = wmean(val,sig)
    >>> np.allclose((mean, err), (1.003026102, 0.78088153553))
    True
    """
    val = np.asarray(val)
    sig = np.asarray(sig)

    # remove any values with bad errors
    condition = sig > 0.
    val = val[condition]
    sig = sig[condition]

    # normalisation
    inverse_variance = 1. / (sig*sig)
    norm = np.sum(inverse_variance)

    wmean = np.sum(inverse_variance*val) / norm
    wsigma = 1. / np.sqrt(norm)

    return wmean,wsigma


def calc_ew(wawidth,fl,co,er=None):
    """ Measure the equivalent width of a feature.

    Parameters
    ----------
    wawidth : array
        Array of pixel widths.
    fl : array
        Array of fluxes.
    co : array
        Array of continuum values.
    er : array, optional
        Array of one sigma errors.

    Returns
    -------
    ew : float
        The equivalent width in same units as the pixel width array.
    ewer : float
        If `er` is given, the one sigma error on the equivalent width is
        also returned.
    """
    # TODO: weight fluxes by their inverse variance if errors provided
    ew = np.sum(wawidth * (1.0 - fl/co))
    if er is not None:
        ewsig = wawidth * er / co
        variance = np.sum(ewsig * ewsig)
        return ew, np.sqrt(variance)
    else:
        return ew


def calctau(v, vc, wav0, osc, gam, logN, T=None, btemp=20, bturb=0,
            debug=False):
    """ Returns the optical depth (Voigt profile) for a transition.

    Given an transition with rest wavelength wav0, osc strength,
    natural linewidth gam; b parameter (doppler and turbulent); and
    log10 (column density), returns the optical depth in velocity
    space. v is an array of velocity values in km/s. vc is the
    velocity where the absorption line is centred.

    optional keywords:

    btemp = b parameter from doppler temperature broadening (km/s)

    bturb = b parameter from doppler turbulent broadening (km/s)

    T = temperature of cloud in Kelvin (overrides btemp).

    Warning
    -------
    deltav must be small enough to properly sample the profile,
    otherwise you'll get nonsense!

    Tests
    -----
    >>> wav0,osc,gam = 1215.6701,0.4164,6.265E8   # Ang, unitless, s^-1
    >>> btemp,bturb = 20., 0.                     # km/s
    >>> vc = 0                                    # km/s
    >>> v = np.linspace(-100, 100, 500)           # km/s
    >>> logN = 13.0                               # cm^-2
    >>> tau = calctau(v, vc, wav0, osc, gam, logN, btemp=btemp, bturb=bturb)
    >>> tau13 = np.loadtxt('testdata/tau_n13.txt.gz')
    >>> np.allclose(tau,tau13)
    True
    >>> v = np.linspace(-1000, 1000, 1000)        # km/s
    >>> logN = 21.                                # cm^-2
    >>> tau = calctau(v, vc, wav0, osc, gam, logN, btemp=btemp, bturb=bturb)
    >>> tau21 = np.loadtxt('testdata/tau_n21.txt.gz')
    >>> np.allclose(tau,tau21)
    True

    Can also map the velocity array to some wavelength:

    >>> z = 3.0
    >>> wa = wav0 * (1 + z) * (1 + v/Ckms)
    """
    import voigt
    
    me = 9.10938215e-31         # electron mass in kg from NIST 10/11/2007
    mp = 1.672621637e-27        # proton mass in kg from NIST 5/12/2007
    kboltz = 1.3806504e-23      # Boltzmann constant k in Joules/Kelvin
    C = 299792458.0             # speed of light m/s, exact
    echarge =  1.602176487e-19       # charge on electron, Coulombs
    permittivity = 8.854187817e-12 # Epsilon0, F/m or m^-3 kg^-1 s^4 A^2, exact
    e_const = 1./(4*np.pi*permittivity) # m^3 kg s^-4 A^-2  (exact)
    sqrt_ln2 = 0.832554611158           # sqrt(ln(2))

    wav0 = wav0 * 1e-10                            # m
    N = 10**logN * 1.e4                            # absorbers/m^2
    if T is not None:
        btemp = np.sqrt(2. * kboltz * T / mp) * 1.e-3   # km/s
    b = math.hypot(btemp, bturb) * 1.e3                 # m/s
    nu0 = C / wav0                                 # rest frequency, s^-1
    # Now use doppler relation between v and nu assuming gam << nu0
    gam_v = gam / nu0 * C             # m/s
    if debug:
        print ('Widths in km/s (Lorentzian Gamma, Gaussian b):',
               gam_v/1.e3, b/1.e3)


    fwhml = gam_v / (2. * pi)                # m/s
    fwhmg = 2. * sqrt_ln2 * b                # m/s

    ##### sampling check ######
    ic = np.searchsorted(v,vc)
    try:
        vstep = v[ic] - v[ic-1]
    except IndexError:
        raise IndexError(4*'%s ' % (len(v),ic,ic-1,vc,v))

    fwhm = max(gam_v / 1.e3, fwhmg / 1.e3)

    if vstep > fwhm:
        print 'Warning: tau profile undersampled!'
        print '  Pixel width: %f km/s, transition fwhm: %f km/s' % (vstep,fwhm)
        # best not to correct for this here, because even if we do,
        # we'll get nonsense if we convolve the resulting flux with an
        # instrumental profile.  Need to use smaller dv size
        # throughout tau, exp(-tau) and convolution of exp(-tau)
        # calculations, only re-binning back to original dv size after
        # all these steps.

    u = (v - vc) * 1.e3 / b                                # dimensionless
    a = gam_v / (4 * pi * b)                               # dimensionless
    vp = voigt.voigt(a, u)                                 # dimensionless
    const = pi * e_const * echarge**2 / (me*C)             # m^2/s
    tau = const * N * osc * wav0 / (np.sqrt(pi) * b) * vp  # dimensionless

    return tau

def calc_iontau(wa, ion, zp1, logN, b, debug=False, ticks=False):
    """ Returns tau values at each wavelength for transitions in ion.

    Input
    -----
    wa:        wavelength array
    ion:       ion entry from readatom output dictionary
    zp1:       redshift+1
    logN:      log10(column density in cm**-2)
    b:         b parameter (km/s).  Assumes thermal broadening.

    Returns
    -------
    tau: ndarray of optical depth values.

    Tests
    -----
    >>> wa = np.linspace(2500, 2700, 5000)
    >>> at = readatom('/home/nhmc/installed/vpfit/atom.dat')
    >>> tau = calc_iontau(wa, at['CIV'], 1.7, 14, 50)
    >>> abs(tau.max() - 0.8803955) < 1e-6
    True
    >>> abs(tau.min() - 0.0) < 1e-6
    True
    """
    if debug:
        i = int(len(wa)/2)
        psize =  Ckms * (wa[i] - wa[i-1]) / wa[i]
        print 'approx pixel width %.1f km/s at %.1f Ang' % (psize, wa[i])

    #select only ions with redshifted central wavelengths inside wa
    obswavs = ion.wav * zp1
    trans = ion[(min(wa) < obswavs) & (obswavs < max(wa))]
    if debug:
        if len(trans) == 0:
            print 'No transitions found overlapping with wavelength array'

    tickmarks = []
    sumtau = np.zeros_like(wa)
    for wav0,osc,gam in trans:
        refwav = wav0 * zp1
        tickmarks.append(refwav)
        dv = (wa - refwav) / refwav * Ckms
        tau = calctau(dv, 0.0, wav0, osc, gam, logN, btemp=b)
        sumtau += tau

    if ticks:
        return sumtau, tickmarks
    else:
        return sumtau

def calc_abs(wa, ion, trans, zp1, resolution, logN=13.0, b=10.0):
    """ Generate a normalised continuum over some wavelength range
    including absorption from an ion.

    Includes instrumental broadening. Splits the wavelength array up
    into smaller bins if necessary to sample the instrumental profile
    properly.

    Parameters
    ----------
    wa: array of floats
        The array of wavelengths (Angstroms) where the continuum will
        be generated.
    ion: atom.dat entry
        The full atom.dat list of transitions for given ion,
        i.e. at['HI']
    trans: single transition entry from atom.dat
        a transition entry from atom.dat read with readatom(). e.g
        at['HI'][0]
    zp1: float
        One plus the redshift of the ion.
    resolution:
        The spectrum resolution.
    logN: float (13.0)
        log10 of column density in absorbers per cm**2.
    b: float (10.)
        b parameter in km/s of the ion.

    Returns
    -------
    co: array of floats
        normalised continuum including absorption
    """
    # show expected lines with sample N, b.

    #use smaller wave divisions so voigt profiles are sampled properly
    # new array
    wa = np.asarray(wa)
    dv = np.diff(wa).mean() / wa.mean() * Ckms
    ndiv = int( np.ceil( dv / (b / 2.) ) )
    print 'dividing', ndiv, 'times'
    n = len(wa)
    wa0 = np.interp(np.linspace(0, n-1, (n-1)*ndiv), range(n), wa)

    tau = calc_iontau(wa0, ion, zp1, logN, b)
    co0 = np.exp(-tau)

    # instrumental broadening
    fwhmwa = wa.mean() / resolution
    i = indexnear(wa, wa.mean())
    fwhmpix = fwhmwa / (wa[i+1] - wa[i])
    print 'fwhmpix', fwhmpix
    co1 = convolve_psf(co0, fwhmpix * ndiv)

    # re-bin back to original wav array
    co2 = np.empty_like(wa)
    for i in range(len(wa)):
        co2[i] = co1[i*ndiv:i*ndiv+ndiv].mean()

    co = np.where(np.isnan(co2), 1, co2)
    return co



def convolve_psf(a, fwhm, edge='invert', debug=False):
    """ Given an array of values 'a' and a gaussian full width at half
    maximum 'fwhm' in pixel units, returns the convolution of the
    array with the normalised gaussian.

    Gaussian is calculated for as many pixels required until it drops
    to 1% of peak value. Note that the data will be spoiled at
    distances n/2 (rounding down) from the edges, where n is width of
    the gaussian in pixels.

    The FWHM given should be > 2 pixels to sample the gaussian PSF
    properly.
    """
    const2   = 2.354820046             # 2*sqrt(2*ln(2))
    const100 = 3.034854259             # sqrt(2*ln(100))
    sigma = fwhm / const2
    # gaussian drops to 1/100 of maximum value at x =
    # sqrt(2*ln(100))*sigma, so number of pixels to include from
    # centre of gaussian is:
    n = np.ceil(const100 * sigma)
    if debug:
        print "First and last %s pixels of output array will be invalid" % n
    x = np.linspace(-n, n, 2*n + 1)        # total no. of pixels = 2n+1
    gauss = np.exp(-0.5 * (x / sigma) ** 2 )

    return convolve_reflect_edges(a, gauss, edge=edge)

def convolve_reflect_edges(a, window, edge='invert'):
    """ Convolve a with a window array. The window array should have
    an odd number of elements.

    The edges of `a` are extended by reflection (and, if requested,
    inversion) to reduce edge effects.

    edge = {'extend', 'reflect', 'invert'} or an integer

    The window is normalised.
    """
    npts = len(window)
    if not npts % 2:
        raise ValueError('`window` must have an odd number of elements!')

    n = npts // 2

    # normalise the window
    window /= sum(window)

    # Add the reflected (and inverted) edges to either end of the
    # array to reduce edge effects in the convolution.
    if len(a) < 2*n:
        raise ValueError('Window is too big for the array!')
    if edge == 'invert':
        temp1 = 2*a[0] - a[n:0:-1], a, 2*a[-1] - a[-2:-n-2:-1]
    elif edge == 'reflect':
        temp1 =  a[n:0:-1], a, a[-2:-n-2:-1]
    elif edge == 'extend':
        temp1 =  a[0] * np.ones(n) , a, a[-1] * np.ones(n)
    else:
        try:
            int(edge)
        except TypeError:
            raise ValueError('Unknown value for edge keyword: %s' % edge)
        med1 = np.median(a[:edge])
        med2 = np.median(a[-edge:]) 
        temp1 =  med1 * np.ones(n) , a, med2 * np.ones(n)

    temp2 = np.convolve(np.concatenate(temp1), window, mode='same')

    return temp2[n:-n]

def gauss(x, fwhm, height):
    """ Gaussian function."""
    const2 = 2.354820046                # 2*sqrt(2*ln(2))
    sig = fwhm / const2
    return height * np.exp(-0.5 * (x / sig)**2)

def gauss2d(x,y,xfwhm,yfwhm,height):
    """ 2-d gaussian function."""
    if hasattr(x, '__iter__') and hasattr(y, '__iter__'):
        x,y = np.meshgrid(x,y)
    const2 = 2.354820046              # 2*sqrt(2*ln(2))
    xsig = xfwhm / const2
    ysig = yfwhm / const2
    term1 = x**2/(2*xsig*xsig)
    term2 = y**2/(2*ysig*ysig)
    return height * np.exp(-(term1 + term2))

def readatom(filename, debug=False):
    """ Reads in atomic transitions from a vpfit atom.dat file.  Single
    argument is atom.dat filename.

    Returns a dictionary of atoms.

    Examples
    --------
    >>> at = readatom('testdata/atom.dat.gz')
    """

    # first 2 chars - element.
    #        Check that only alphabetic characters
    #        are used (if not, discard line).
    # next 4 chars - ionization state (I, II, II*, etc)
    # remove first 6 characters, then:
    # first string - wavelength
    # second string - osc strength
    # third string - lifetime? (intrinsic width constant)
    # ignore anything else on the line

    atom = dict()
    if filename.endswith('.gz'):
        import gzip
        fh = gzip.open(filename)
    else:
        fh = open(filename)
    for line in fh:
        if debug:  print line
        if not line[0].isupper():  continue
        ion = line[:6].replace(' ','')
        wav,osc,gam = [float(item) for item in line[6:].split()[:3]]
        if ion in atom:
            atom[ion].append((wav,osc,gam))
        else:
            atom[ion] = [(wav,osc,gam)]

    fh.close()
    # turn each ion into a record array
    for ion in atom:
        atom[ion] = np.rec.fromrecords(atom[ion], names='wav,osc,gam')

    return atom

def percentile(a, frac):
    """ Returns the value in an array such that some fraction of all
    array values are below this value.

    Rounds down.

    Examples
    --------
    >>> a = range(12)
    >>> print percentile(a, 0.8)
    9
    """
    a = np.asarray(a)
    ind = int(frac * len(a))
    return np.sort(a)[ind]

def ncorr(func,window):
    """ Given two arrays, returns their cross-correlation function,
    'normalised'. (Removes aliasing, I think? Removes the drop in the
    xcorr function that occurs at the edges because the entire window
    array is not contained within the func array.) """
    if len(func) < len(window):
        func,window = window,func

    x = np.correlate(func,window,mode='same')
    #pl.clf()
    #pl.plot(x,'sg')

    winsize =  len(window)   # window (smaller array)
    npts = len(func)       # larger array
    for i,item in enumerate(x):
        if i < np.ceil(winsize/2.):
            weight = (i + np.ceil(winsize/2.)) / winsize
        elif npts - i < np.ceil(winsize/2.):
            weight = (npts - i + np.floor(winsize/2.)) / winsize
        else:  continue
        x[i] = item / weight

    offset = np.floor(winsize / 2.)
    #pl.plot(np.arange(npts) - offset,x,'s')
    return x

def rebin(vel, flux, dv):
    """ Rebin flux values to a linear pixel scale.

    Assumes the vel positions given refer to the lower edge of the
    flux bins.

    vel : old pixel values (doesn't have to be a velocity)
    flux : old flux values
    dv : width (in same units as old pixel scale) of pixels in the new
         pixel scale
    """
    newvel = np.arange(min(vel), max(vel), dv)
    newflux = []
    df = 0.
    weight = 0.
    j = 0
    i = 0
    lo0 = vel[i]   # lower edge of contributing (sub-)pixel in old scale 
    while True:
        hi0 = vel[i+1]     # upper edge of contr. (sub-)pixel in old scale
        hi1 = newvel[j+1]  # upper edge of jth pixel in rebinned scale
        if hi0 < hi1:
            dweight = hi0 - lo0
            df += flux[i] * dweight
            weight += dweight
            lo0 = hi0
            i += 1
            if i > len(vel) - 2:  break
        else:
            # We have all old pixel flux values that contribute to the
            # new pixel; append the new flux value and move to the
            # next new pixel.
            dweight = hi1 - lo0
            df += flux[i] * dweight
            weight += dweight
            newflux.append(df/weight)
            df = 0.
            weight = 0.
            lo0 = hi1
            j += 1
            if j > len(newvel) - 2:  break

    return newvel[:-1], np.array(newflux)

def make_rdgen_input(specfilename, filename, wmin=None, wmax=None):
    temp = ('rd %(specfilename)s\n'
            'ab\n'
            '\n'
            '\n'
            '\n'
            '%(wmin)s %(wmax)s\n'
            'qu\n'  % locals() )
    fh = open(filename,'w')
    fh.write(temp)
    fh.close()

def make_autovpin_input(specfilename, filename):
    temp = ('%(specfilename)s\n'
            '\n'
            '\n'
            '\n'
            '\n'
            '\n' % locals() )
    fh = open(filename,'w')
    fh.write(temp)
    fh.close()

def findnoise(fl, sig, bins=(0.2, 0.4, 0.6, 0.8)):
    """ Read noise properties of a normalised spectrum.

    Returns the median noise value for pixels with fluxes in the ranges:
    fl <= 0.2
    0.2 < fl <= 0.4
    0.4 < fl <= 0.6
    0.6 < fl < 0.8
    fl >=0.8

    Different flux bin edges can be given with the bins keyword.

    Note we assume that the noise properties do not change with
    wavelength!

    Tests
    -----
    >>> wa,fl,er,co = np.loadtxt('testdata/HE0940m1050m.txt.gz', unpack=1)
    >>> bins = (0.05, 0.2, 0.4, 0.6, 0.8, 0.95)
    >>> i,j = wa.searchsorted([5800,6000])
    >>> nfl, ner = fl[i:j]/co[i:j], er[i:j]/co[i:j]
    >>> noise = findnoise(nfl, ner, bins=bins) # doctest: +ELLIPSIS
    bin 0: 3 pixels
    ...
    >>> comparison = [0.00337750, 0.00525233, 0.00875719, \
    0.01069438, 0.0129873, 0.01471107, 0.01493616]
    >>> np.allclose(noise, comparison)
    True
    """
    # separate flux into 5 bins
    fl,sig = (np.asarray(a) for a in (fl,sig))
    cond = []
    cond.append(fl <= bins[0])
    print 'bin 0: %i pixels' % len(fl[cond[-1]])
    for i,(lower,upper) in enumerate(zip(bins[:-1],bins[1:])):
        cond.append( (fl > lower) & (fl <= upper) )
        print 'bin %i: %i pixels' % (i+1, len(fl[cond[-1]]))
    cond.append( fl >= bins[-1] )
    print 'last bin: %i pixels' % len(fl[cond[-1]])
    noise = [np.median(sig[condition]) for condition in cond]
    return noise


def write_ds9reg(xvals, yvals, filename=None, coord='IMAGE', ptype='x',
                 colour='cyan', tag='all',text=None):
    """Write a region file for ds9.

    colour is one of: cyan blue magenta red green yellow white black

    coord specifies the units of xvals, yvals and is one of:

    IMAGE                   # pixel coords of current file
    FK5, J2000              # sky coordinate systems

    ptype (point) values (can also be followed by an integer size):
    circle
    box
    diamond
    cross
    x
    arrow
    boxcircle
    """
    regions = ['global font="helvetica 10 normal" select=1 highlite=1 '
               'edit=0 move=1 delete=1 include=1 fixed=0 source\n']
    regions.append(coord + '\n')

    if len(tag) != len(xvals):
        tag = [tag] * len(xvals)

    format = ('point(%12.8f,%12.8f) # point=%s text={%s} color=%s tag={%s}\n')
    for i,(x,y) in enumerate(zip(xvals, yvals)):
        textval =  (i if text is None else text[i])
        vals = (x, y, ptype, textval, colour, tag[i])
        regions.append(format % vals)

    if filename is not None: 
        fh = open(filename,'w')
        fh.writelines(regions)
        fh.close()
    return regions

def ds9reg_gmosslits(slitinfo, filename=None, coord='IMAGE', pixscale=1,
                     colour='cyan', tag='all', text=None, proj=None):
    """Write a region file for ds9.

    slitinfo is a record array read with pyfits. It must have fields
    x_ccd, y_ccd, slitpos_x, slit_posy, slitsize_x, slitsize_y and
    slittilt.
    
    colour is one of: cyan blue magenta red green yellow white black

    coord is one of:
    IMAGE                   # pixel coords of current file
    FK5, J2000              # sky coordinate systems. proj must be given.

    ptype (point) values (can also be followed by an integer size):

    circle
    box
    diamond
    cross
    x
    arrow
    boxcircle
    """
    regions = ['global font="helvetica 10 normal" select=1 highlite=1 '
               'edit=0 move=1 delete=1 include=1 fixed=0 source\n']
    regions.append(coord + '\n')

    if len(tag) != len(slitinfo):
        tag = [tag] * len(slitinfo)

    format = 'polygon('+'%12.8f,'*7+'%12.8f) # text={%s} color=%s tag={%s}\n'
    for i,info in enumerate(slitinfo):
        textval =  (i if text is None else text[i])
        x,y = info['x_ccd'], info['y_ccd']
        w,h = info['slitsize_x']/pixscale, info['slitsize_y']/pixscale
        x0,x1 = x-w/2., x+w/2.
        y0,y1 = y-h/2., y+h/2.
        if coord in ('FK5', 'J2000'):
            r0,d0 = proj.toworld((x0,y0))
            r1,d1 = proj.toworld((x0,y1))
            r2,d2 = proj.toworld((x1,y1))
            r3,d3 = proj.toworld((x1,y0))
            vals = (r0, d0, r1, d1, r2, d2, r3, d3, textval, colour, tag[i])
        else:   
            vals = (x0, y0, x0, y1, x1, y1, x1, y0, textval, colour, tag[i])
        regions.append(format % vals)

    if filename is not None:
        fh = open(filename,'w')
        fh.writelines(regions)
        fh.close()
    return regions


def indexnear(ar, val):
    """ Find the element in an array closest to a given value.

    The input array must be sorted lowest to highest.  Returns the
    index of the element with a value closest to the given value.

    Parameters
    ----------
    ar: array_like
      Input array. It must be sorted smallest to largest.
    val: float
      Find the element of `ar` that is closest to `val`.

    Returns
    -------
    index: int
      Index of the `ar` element with the closest value to `val`.

    Examples
    --------
    >>> wa = np.arange(4000, 4500, 0.051)
    >>> i = indexnear(wa, 4302.5)
    >>> print i, wa[i]
    5931 4302.481
    >>> i = indexnear(wa, 4600.0)
    >>> print i, wa[i]
    9803 4499.953
    >>> i = indexnear(wa, 3000.0)
    >>> print i, wa[i]
    0 4000.0
    """
    # TODO: change to be ufunc-like, using np.where?
    ar = np.asarray(ar)
    i = ar.searchsorted(val)
    # needed because searchsort rounds up
    if i == 0:
        return i
    # note if i == len(ar) then ar[i] is invalid, but won't get tested.
    elif i == len(ar) or val - ar[i-1] < ar[i] - val:
        return i-1
    else:
        return i

def ismember(a1, a2):
    """ Test whether items from a2 are in a1.

    This does the same thing as np.setmember1d, but works on
    non-unique arrays.

    Only a few (2-4) times slower than np.setmember1d, and a lot
    faster than [i in a2 for i in a1].

    np.setmember1d gets the following example wrong:

    >>> test = np.array([5,4,5,3,4,4,3,4,3,5,2,1,5,5])
    >>> state = [2,3,4]
    >>> mask = ismember(test,state)
    >>> test[mask]
    array([4, 3, 4, 4, 3, 4, 3, 2])
    """
    a2 = set(a2)
    a1 = np.asarray(a1)
    ind = a1.argsort()
    a1 = a1[ind]
    mask  = []
    # need this bit because prev is not defined for first item
    item  = a1[0]
    if item in a2:
        mask.append(True)
        a2.remove(item)
    else:
        mask.append(False)
    prev = item
    # main loop
    for item in a1[1:]:
        if item == prev:
            mask.append(mask[-1])
        elif item in a2:
            mask.append(True)
            prev = item
            a2.remove(item)
        else:
            mask.append(False)
            prev = item
    # restore mask to original ordering of a1 and return
    mask = np.array(mask)
    return mask[ind.argsort()]

def findtrans(name, atomdat=None):
    """ Given an ion and wavelength, returns the best matching entry
    in atom.dat.

    >>> ion, tr = findtrans('CIV 1550')
    >>> np.allclose(tr['wav'], 1550.7812)
    True
    """
    if atomdat is None:
        atomdat = readatom('/home/nhmc/installed/vpfit/atom.dat')
    i = 0
    name = name.strip()
    while name[i].isalpha(): i += 1
    ion = name[:i]
    wa = float(name[i:])
    # must be sorted lowest to highest for indexnear
    isort = np.argsort(atomdat[ion].wav)
    sortwa = atomdat[ion].wav[isort]
    ind = indexnear(sortwa, wa)
    tr = atomdat[ion][isort[ind]]
    # Make a short string that describes the transition, like 'CIV 1550'
    wavstr = ('%.1f' % tr['wav']).split('.')[0]
    trstr =  '%s %s' % (ion, wavstr)
    return trstr, atomdat[ion][isort[ind]]

def calc_Mstar_b(z):
    """ Find the Schechter parameter M* in the b band at redshift z,
    by interpolating over the Faber at al. 2007 DEEP2 averaged values,
    and assuming M*_b = -20.0 at z=0 (rough average of the z=0.07-0.1
    points in Faber 2007) .
    """
    zvals = 0.0, 0.3, 0.5, 0.7, 0.9, 1.1
    Mvals = -20.00, -21.07, -21.15, -21.51, -21.36, -21.54
    return np.interp(z, zvals, Mvals)

def combinations(items, n):
    """ A generator for the number of ways you can take n items (order
    unimportant) from a list of items."""
    if n == 0:
        yield []
    else:
        for i in xrange(len(items)):
            for c in combinations(items[i+1:], n-1):
                yield [items[i]] + c

def permutations(items):
    """ Permutations are just a special case of combinations."""
    return combinations(items, len(items))

def nan2num(a, replace=0):
    """ Replace nan entries with the `replace` keyword value.

    If replace is 'mean', use the mean of the array to replace
    values. If it's 'interp', then intepolate from the nearest values.
    """
    a = np.asanyarray(a)
    b = a.copy()
    bad = np.isnan(b)
    if replace == 'mean':
        replace = b[~bad].mean()
    elif replace == 'interp':
        x = np.arange(len(a))
        replace = np.interp(x[bad], x[~bad], b[~bad])
    b[bad] = replace
    return b

def _test():
    import doctest
    doctest.testmod()

if __name__ == "__main__":
    _test()
