import pdb
import copy
import time
import numpy as np
from astro.utilities import ismember, wmean, combinations
from astro.coord import ang_sep
import matplotlib.pyplot as pl
import xcorr
from xcorr import \
     wlya, wlyb,  Ckms, cosmo, PC, A4LANDSCAPE, A4PORTRAIT, CRADECS,\
     num_vimos, num_hires, num_will, hires_filenames, MASKS

import astro.spec
from matplotlib.mlab import rec_append_fields
from matplotlib.ticker import NullFormatter, NullLocator, AutoMinorLocator

COLOURS = 'k b g r m c y 0.7 purple orange'.split()

# maximum separation allowed between QSO sight-lines
# (degrees).
MAXSEP = 5.0

# distance bins, in comoving h^-1 Mpc
# maxdist = 40.0
# binwidth = 1.0
# binedges = np.arange(0, maxdist + binwidth, binwidth)
# CBINS = binedges[:-1] + 0.5*binwidth
# BINEDGES = list(binedges)
# NBINS = len(binedges) - 1

# resolution is say 2 Angstroms, which is 120 km/s at z=3?, which is about 2
# (or more?) Mpc?

# LINEAR BINS

#twid = 20.0             # bin widths, Mpc
rwid = 50.0
#tnum = 5                # number of bins
rnum = 10

# tvwid = 1600.0          # bin widths, km/s
# rvwid = 1600.0
# tvnum = 10             # number of bins
# rvnum = 10

#RBIN_EDGES = np.arange(0, rwid*(rnum+1), rwid)        # radial bins, Mpc 
RBIN_EDGES = np.logspace(-2, 2.7, 10)
CBINSR = RBIN_EDGES[:-1] + 0.5*(RBIN_EDGES[1:] - RBIN_EDGES[:-1])

#TBIN_EDGES = np.arange(0, twid*(tnum+1), twid)        # transverse bins, Mpc 
#CBINST = TBIN_EDGES[:-1] + 0.5*(TBIN_EDGES[1:] - TBIN_EDGES[:-1])
#RVBIN_EDGES = np.arange(0, rvwid*(rvnum+1), rvwid)     # radial bins, km/s
#TVBIN_EDGES = np.arange(0, tvwid*(tvnum+1), tvwid)     # radial bins, km/s

# LOG BINS
TBIN_EDGES = np.logspace(-2, 2.5, 10)
CBINST = TBIN_EDGES[:-1] + 0.5*(TBIN_EDGES[1:] - TBIN_EDGES[:-1])

SBIN_EDGES = np.logspace(-2, 2.7, 10)
CBINSS = SBIN_EDGES[:-1] + 0.5*(SBIN_EDGES[1:] - SBIN_EDGES[:-1])

def filter_qsos(qsos, maxsep=3.):
    """ Select qsos are that are in one of the first 5 fields, and are
    not vimos qsos (thus too faint for a reasonable spectrum).

    maxsep is the maximum separation from the central QSO in degrees.
    """
    c0 = np.zeros(len(qsos), dtype=bool)
    for field in 'HE0940 J0124 J1201 Q0042 PKS2126'.split():
        cra,cdec = CRADECS[field]
        sep = ang_sep(qsos.ra, qsos.dec, cra, cdec)
        c0 = c0 | (sep < maxsep) 

    c1 = ~ismember(qsos.num, num_vimos)
    
    return qsos[c0 & c1]

def select_lya_norm(qsos, debug=False, vlya=-4000., vlyb=0.):
    """ Take only lya regions, normalise and convert to comoving los
    distance. Remove any entries that do not have enough of the forest
    covered by the spectrum. 

    vlya: move this far in km/s away from lya emission (-ve moves
        bluewards)
    vlyb: move this far in km/s away from lyb emission (-ve moves
        bluewards)

    Return the nlya list, the new qso array and new spectra list.
    """
    print 'Selecting Lya regions and normalising'

    qsos_new = []
    nlyas = []
    const = 1 / (PC * 1.e6)
    if debug:
        fig = pl.figure()
        ax = fig.add_subplot(111)
    for qso in qsos:
        if debug:
            print qso['num'], qso['name'],
        wmax = wlya * (1 + qso['z']) * (1 + vlya / Ckms)
        wmin = wlyb * (1 + qso['z']) * (1 + vlyb / Ckms)
        i0,i1 = qso['spec'].wa.searchsorted([wmin, wmax])
        if i1 < 10:
            print '\nToo few forest pixels, skipping'
            continue
        sp = qso['spec'][i0:i1]
        nfl = sp.fl / sp.co
        ner = sp.er / sp.co
        c0 = ~np.isnan(sp.fl) & (sp.er > 0) & (sp.co > 0)
        c1 = sp.co / sp.er > 0.5
        good = c0 & c1
        nfl[~good] = np.nan
        ner[~good] = np.nan
        redshifts = sp.wa / wlya - 1.
        # los comoving distance in metres
        dlos = [cosmo.Dc(z)  for z in redshifts]
        # hubble parameter per pixel
        hz = [cosmo.Hz(z) for z in redshifts]  # km/s/Mpc
        nlya = np.rec.fromarrays([sp.wa, nfl, ner, dlos, hz, redshifts, good],
                                 names='wa,nfl,ner,dlos,hz,z,good')
        nlya.dlos = nlya.dlos * const   # convert to Mpc
        nlyas.append(nlya)
        qsos_new.append(tuple(qso))
        if debug:
            ax.cla()
            wrest = sp.wa / (1 + qso['z'])
            print wrest.min(), wrest.max(), qso['z']
            ax.plot(wrest, nfl, wrest, ner)
            ax.set_ylim(-1, 2)
            ax.set_xlim(1025, 1215.6701)
            pl.show()
            raw_input(' ... Enter')

    qsos_new = np.array(qsos_new, dtype=qsos.dtype).view(np.recarray)
        
    return nlyas, qsos_new

def add_meanfl(qsos, nlya, mean='FG', plot=True, split=False, usetau=False):
    """ Add an estimate of the mean flux and calculate the quantity
    df = (f/f_mean - 1) for each spectrum.

    mean: {'FG', 'scaled FG', 'qso'}
        How should we determine the expected mean flux at each pixel?
        'FG' means use Faucher-Giguere flux, 'scaled FG' will use the
        Faucher-Giguere flux scaled to match the mean flux of the qso,
        'qso' means use the mean flux of each qso.

    plot: if True, plot and pause after each qso.

    split: if True, split the lya forest region into 3 when
           calculating mean flux.

    usetau: if True, plot tau rather than mean flux

    Returns
    -------
    A list of scale factors

    """

    # option to scale mean flux relation to each QSO. Need to
    # calculate tau(z) for a single QSO, then scale the mean flux
    # level to match that tau(z).

#     fluxes = []; errs = [];  zs = []
#     for i,(n,qso) in enumerate(zip(nlya,qsos)):
#         #if i == 5: continue
#         npts = len(n)
#         if split:
#             i0 = int(npts/3)
#             i1 = int(2*npts/3)
#             print i0,i1

#             mfl0,mer0 = wmean(n['nfl'][:i0], n['ner'][:i0])
#             mfl1,mer1 = wmean(n['nfl'][i0:i1], n['ner'][i0:i1])
#             mfl2,mer2 = wmean(n['nfl'][i1:], n['ner'][i1:])
#             fluxes.append((mfl0, mfl1, mfl2))
#             errs.append((mer0, mer1, mer2))
#             z = n['z'][:i0].mean(), n['z'][i0:i1].mean(), n['z'][i1:].mean()
#         else:
#             mfl,mer = wmean(n['nfl'], n['ner'])
#             fluxes.append([mfl])
#             errs.append([mer])
#             z = [n['z'].mean()]
#         zs.append(z)
#         #print zs[-1],fluxes[-1],errs[-1]
#         #raw_input('enter to continue')

    #print len(zs)
#     fig = pl.figure()
#     ax = fig.add_subplot(111)
#     #zlya = [n['z'].mean() for n in nlya]   # mean forest z vals
#     for i,(fl,er,z) in enumerate(zip(fluxes, errs, zs)):
#         fl = np.array(fl); er = np.array(er)
#         col = COLOURS[i % len(COLOURS)]
#         if usetau:
#             yvals = -np.log(fl)
#             ylo,yhi = -np.log(fl+er), -np.log(fl-er)
#         else:
#             yvals = fl
#             ylo,yhi = fl+er, fl-er

#         ax.plot(z, yvals,'o',color=col, label=None)
#         erhi,erlo = yhi - yvals, yvals - ylo
#         ax.errorbar(z, yvals, yerr=[erlo,erhi], fmt=',', color=col)
#         ax.text(z[0], yvals[0], str(qsos[i]['num']))

#     zrange = np.linspace(2, 3.5, 100)
#     if usetau:
#         ax.plot(zrange,teff(zrange),'r',lw=8,alpha=0.1,label='FG fit')
#         ax.set_ylabel('effective tau')
#     else:
#         ax.plot(zrange,meanfl_FG(zrange),'r',lw=8,alpha=0.1,label='FG fit')
#         ax.set_ylabel('Mean flux')
#     ax.legend()
#     ax.set_xlabel('Redshift')
#     pl.show()

    if plot:
        fig = pl.figure()
        ax = fig.add_subplot(111)

    multipliers = []
    for i,(n,qso) in enumerate(zip(nlya, qsos)):
        meanfl_qso, meanfl_qso_er = wmean(n['nfl'], n['ner'])
        zmean = n.z.mean()

        multfac = meanfl_FG(zmean) / meanfl_qso
        multipliers.append(multfac)
        print 'Flux multiplier to give expected mean flux: %.3f' % multfac

        if mean == 'qso':
            meanfl = [qso_mean] * len(n)
        elif mean == 'FG':
            meanfl = meanfl_FG(n.z)
        elif mean == 'scaled FG':
            meanfl = meanfl_FG(n.z) / multfac
        else:
            raise ValueError('Unknown mean keyword value: %s' % mean)
        
        if 'meanfl' in n.dtype.names:
            nlya[i].meanfl = meanfl
        else:
            nlya[i] = rec_append_fields(nlya[i], 'meanfl', meanfl)

        df = nlya[i].nfl / meanfl - 1.
        if 'df' in n.dtype.names:
            nlya[i].df = df
        else:
            nlya[i] = rec_append_fields(nlya[i], 'df', df)

        if plot:
            ax.cla()
            ax.axhline(0, color= '0.7')
            ax.axhline(1, color= '0.7')
            ax.plot(n.z, n.nfl * multfac, 'b')
            ax.plot(n.z, n.ner * multfac, color='orange')
            ax.plot(n.z, meanfl_FG(n.z), 'r')
            ax.set_ylim(-1, 2)
            ax.set_xlim(n.z.min(), n.z.max())
            pl.show()
            raw_input('Enter... ')

    return multipliers


def tau_eff_FG(z):
    """ expected effective optical depth at redshift z from
    Faucher-Giguere et al.'s (2008) fitted model. Valid for 2 < z < 4."""
    return 0.0018 * (1 + z) ** 3.92

def meanfl_FG(z):
    """ Expected mean flux at redshift z from Faucher-Giguere et al.'s
    (2008) fitted model. Valid for 2 < z < 4."""
    return np.exp(-0.0018*(1+z)**3.92)

def calc_xi_r(pix, binedges, usevel=False):
    """ Calculate the correlation function as a function of radial
    separation for single QSO.

    Parameters
    ----------
    binedges: radial bin edges
    pix.z:    redshift for each pixel
    pix.wa:   wavelength pf each pixel  (needed for km/s)
    pix.nfl:  normalised flux at each pixel
    pix.ner:  normalised error at each pixel
    pix.df:   (nfl / mean_flux - 1)
    pix.dlos: comoving los distance at each pixel
    pix.hz:   Hubble parameter at each pixel (needed for km/s)

    Separations can be in comoving Mpc or velocity.
    """
    
    nbins = len(binedges) - 1
    # xi per bin
    xi = [[] for i in range(nbins)]
    # number of pairs in each bin
    pc = [[] for i in range(nbins)]
    
    #print 'Calculating xi. Pixel number -'
    const0 = Ckms / wlya

    pix1 = pix[pix.good]

    for i, p in enumerate(pix1[:-1]):
        #if not i % 100:  print i            
        # radial comoving separation. (Mpc)

        #if usevel:
        #    zmeanp1 = 0.5 * (p['z'] + pix.z) + 1
        #    rsep = (allpix.wa - p['wa']) * const0 / zmeanp1
        #else:

        # [i+1:] only counts pixels to right of this pixel - we've
        # already counted pairs to the left.
        pix2 = pix1[i+1:]
        rsep = pix2.dlos - p['dlos']
        # this shouldn't happen, can probably remove
        #if rsep.min() < 0:
        #    raise ValueError('rsep is negative!')
        term1 = p['df']
        for j in xrange(nbins):
            # TODO: weight each xi term by the sqrt(sum of errors
            # squared) of the pair of pixels that contribute to that
            # term.
            inside_bin = (binedges[j] <= rsep) & (rsep < binedges[j+1])
            terms2 = pix2.df[inside_bin] 
            xi[j].append( (term1 * terms2).sum() )
            pc[j].append( len(terms2) )

    pctot = np.array([sum(val) for val in pc]) 
    xitot = np.array([sum(val) for val in xi]) / pctot

    return xitot, pctot


def calc_xi_t(pix1, ra1, dec1, pix2, ra2, dec2, mflkey=None):
    """ Calculate the correlation function as function of transverse
    separation between two QSOs.

    Parameters
    ----------
    pix1, pix2: pixels in the two spectra
    ra1, dec1, ra2, dec2: ras and decs for each spectrum.
    mflkey:   says which mean flux to use (FG or the mean for this QSO)

    Each pixel array needs the fields:

    pix.z:    redshift for each pixel
    pix.wa:   wavelength pf each pixel  (needed for km/s)
    pix.nfl:  normalised flux at each pixel
    pix.ner:  normalised error at each pixel
    pix.df:   (nfl / mean_flux - 1)
    pix.dlos: comoving los distance at each pixel
    pix.hz:   Hubble parameter at each pixel (needed for km/s)
    """
    rad_per_deg = np.pi / 180.

    # Only use overlapping regions
    z1, z2 = pix1.z, pix2.z
    zmin = max(z1[0], z2[0])
    zmax = min(z1[-1], z2[-1])
    if zmin >= zmax:
        print 'Spectra do not overlap in redshift space'
        return None, None, None
    i,j = z1.searchsorted([zmin, zmax])
    pix1a = pix1[i:j]
    i,j = z2.searchsorted([zmin, zmax])
    pix2a = pix2[i:j]

    pix1b = pix1a[pix1a.good]
    pix2b = pix2a[pix2a.good]

    # angular separation in degrees between the sightlines
    sepdeg = ang_sep(ra1, dec1, ra2, dec2)
    seprad = rad_per_deg * sepdeg

    # find minimum, maximum tranverse separations
    tsepmin = seprad * max(pix1b.dlos[0],  pix2b.dlos[0])
    tsepmax = seprad * min(pix1b.dlos[-1], pix2b.dlos[-1])

    Npix2b = len(pix2b)
    
    xitot = 0
    pc = 0
    for df1 in pix1b.df:
        xitot += (df1 * pix2b.df).sum()
        pc += Npix2b

    # will be nan where there are no pixels in a bin
    xi = (xitot / pc) if pc > 0 else np.nan

    return xi, pc, (tsepmin, tsepmax)

def calc_xi_s(pix1, ra1, dec1, pix2, ra2, dec2, binedges, mflkey=None):
    """ Calculate the correlation function as function of 3-d
    separation between the lya forest of two QSOs.

    Parameters
    ----------
    pix1, pix2: pixels in the two spectra
    ra1, dec1, ra2, dec2: ras and decs for each spectrum.
    mflkey:   says which mean flux to use (FG or the mean for this QSO)

    Each pixel array needs the fields:

    pix.z:    redshift for each pixel
    pix.nfl:  normalised flux at each pixel
    pix.ner:  normalised error at each pixel
    pix.mfl:  the expected mean flux at each pixel
    pix.dlos: comoving los distance at each pixel
    pix.df:   (flux / meanflux - 1)
    """
    rad_per_deg = np.pi / 180.

    nbins = len(binedges) - 1
    # xi per bin
    xi = [[] for i in range(nbins)]
    # number of pairs in each bin
    pc = [[] for i in range(nbins)]

    # angular separation in degrees between the sightlines
    sepdeg = ang_sep(ra1, dec1, ra2, dec2)
    seprad = rad_per_deg * sepdeg

    pix1a = pix1[pix1.good]
    pix2a = pix2[pix2.good]

    for i,p in enumerate(pix1a):
        # transverse comoving separation. (Mpc)
        mean_dlos = 0.5 * (p['dlos'] + pix2a['dlos'])
        tsep = seprad * mean_dlos

        # 3-d comoving distance
        sep = np.hypot(pix2a.dlos - p['dlos'], tsep)

        term1 = p['df']
        for j in xrange(nbins):
            # TODO: weight each xi term by the sqrt(sum of errors
            # squared) of the pair of pixels that contribute to that
            # term.
            inside_bin = (binedges[j] <= sep) & (sep < binedges[j+1])
            terms2 = pix2a.df[inside_bin]
            xi[j].append( (term1 * terms2).sum() )
            pc[j].append( len(terms2) )

    pctot = np.array([sum(val) for val in pc])
    # will be nan where there are no pixels in a bin
    xitot = np.array([sum(val) for val in xi]) / pctot

    return xitot, pctot, sepdeg

def plot_zranges(qsos, spectra, title=None, sortby='z'):
    """ Plot the lya ranges in each qso spectrum.
    """
    ind = qsos.argsort(order=sortby)
    spectra = np.array(spectra)

    fig = pl.figure(figsize=A4PORTRAIT)
    ax = fig.add_subplot(111)
    ax.set_xlabel('Redshift')
    lowest = 99
    highest = 0
    for i, (qso, sp) in enumerate(zip(qsos[ind], spectra[ind])):
        print '\n', i+1, 'of %i\n' % len(qsos)
        offset = 1.5*i
        ax.axhline(offset, color='0.7')
        good = ~np.isnan(sp.fl) & ~np.isnan(sp.co) & (sp.er > 0)
        if not np.any(good):
            print 'skipping; no good pixels'
            continue
        # find, maximum, minimum y limits
        ymin = -2 * np.median(sp.er[good])
        ind = int(0.9 * len(sp.fl[good]))
        ymax = 3 * np.sort(sp.fl[good])[ind]
        # scale such that ymax is 1
        sp.fl[~good | (sp.fl > 2*ymax) | (sp.fl < 10*ymin)] = np.nan
        sp.multiply(1. / ymax)
        ymin = ymin / ymax + offset
        ymax = 1 + offset
        ax.plot(sp.wa / wlya - 1, sp.fl + offset, 'b', alpha=0.7, lw=1)
        zmin = (qso['z'] + 1) * wlyb / wlya - 1
        ax.fill_between([zmin, qso['z']], ymin, ymax + 0.3,
                        alpha=0.3, facecolor='r', edgecolors='None')
        ax.text(4.3, offset+0.5, qso['num'],fontsize=6)
        lowest = min(lowest, (sp.wa / wlya - 1)[0])
        highest = max(highest, qso['z'])

        #raw_input()
    ax.yaxis.set_major_formatter(NullFormatter())
    ax.yaxis.set_major_locator(NullLocator())
    ax.xaxis.set_minor_locator(AutoMinorLocator())
    ax.set_title(title)
    ax.set_xlim(2.0, 4.5)
    ax.set_ylim(-1, len(qsos)*1.5 + 1)
    pl.show()
    fig.savefig('lya_range_%s.pdf' % title)


def plot_xi_r_one_by_one(xi_r, pc_r, nlya, qsos, prefix='a', show=False, log=False):
    fig = pl.figure(figsize=A4PORTRAIT)
    ax1 = fig.add_subplot(311)
    ax2 = fig.add_subplot(312, sharex=ax1)
    ax3 = fig.add_subplot(313)
    xi_med = np.median(np.nan_to_num(xi_r), axis=0)
    for i,(xi,pc) in enumerate(zip(xi_r,pc_r)):
        ax1.cla()
        ax1.set_title('100 km/s ~ 1 comoving Mpc ~ 0.375 $\AA$ rest')
        ax1.axhline(0, color='0.7')
        if log:
            ax1.semilogx(CBINSR, xi_med, 'k',alpha=0.7, ls='steps-mid')
        else:
            ax1.plot(CBINSR, xi_med, 'k',alpha=0.7, ls='steps-mid')
        ax1.plot(CBINSR, xi,'o')
        ax1.set_ylabel(r'$<(f_1/f_{mean} - 1)(f_2/f_{mean} - 1)>$')
        ax2.cla()
        #ax2.semilogy(CBINSR, pc+1e-6, ls='steps-mid')
        #ax2.set_ylim(1e0, 1e6)
        if log:
            ax2.loglog(CBINSR, pc + 1e-6, ls='steps-mid')
            ax2.set_ylim(1e-1, 1e6)
        else:
            ax2.plot(CBINSR, pc, ls='steps-mid')
            ax2.set_ylim(0, 7.e4)
        ax2.grid()
        ax2.set_ylabel('# pixel pairs')
        ax2.set_xlabel(r'Separation / ($h^{-1}$ comoving Mpc)')
        ax3.cla()
        s = nlya[i]
        qso = qsos[i]
        ax3.axhline(0, color='0.7')
        ax3.axhline(1, color='0.7')
        wrest = s.wa / (1 + qso['z'])
        ax3.plot(wrest, s.nfl, label=None)
        ax3.plot(wrest, s.mfl, 'r', label='FG_mean')
        ax3.plot(wrest, s.mfls, ':r', label='FG scaled to match QSO mean')
        ax3.plot(wrest, s.ner, color='orange', label=None)
        leg = ax3.legend(loc='lower left', borderpad=0.1, labelspacing=0.04)
        for t in leg.get_texts():
            t.set_fontsize('small')    # the legend text fontsize
        ax3.set_xlim(1020, 1216)
        ax3.set_ylim(-1, 2)
        ax3.text(1170, -0.5, '#%(num)s, z=%(z).2f' % qso)
        print qso['num']
        ax3.set_xlabel(r'Rest wavelength / $\AA$')
        ax3.set_ylabel('$f$')
        if show: pl.show()
        fig.savefig('lya_xiplots/%s_%i.png' % (prefix,qso['num']))

        if show: raw_input()

def bin_data(x, y, binedges):
    """ Bins data x, y, into bins with x edges values using median
    and estimates errors with rms.
    """
    good = ~np.isnan(y)
    med = []
    rms = []
    for b0, b1 in zip(binedges[:-1], binedges[1:]):
        insidebin =  (b0 <= x) & (x < b1)
        y0 = y[insidebin & good]
        median = np.median(y0)
        std = np.std(y0)
        med.append( median )
        rms.append( std )
    return med, rms

if 0:
    qsos = xcorr.readqsos()
    # only first 5 fields and no VIMOS qsos
    qsos1 = filter_qsos(qsos)
    spectra = xcorr.read_qso_spec(qsos1)
    # rebin hires spectra to bigger pixel size
    for i,n in enumerate(qsos1.num):
        if n in num_hires:
            spectra[i] = spectra[i].rebin_simple(30)

    qsos2 = rec_append_fields(qsos1, 'spec', spectra)
if 0:
    # Take only lya regions, normalise and convert to comoving los
    # distance.
    nlya, qsos3 = select_lya_norm(qsos2, debug=0)

    # have a look at mean flux of each spectrum, c.f. Faucher Giguere
    # results. Scale mean flux for each spectrum.
    mult = add_meanfl(qsos3, nlya, plot=0)

    # maybe remove some bad spectra at this point?

    # want to calculate the radial correlation function and the
    # transverse correlation function.

if 0:
    # calculate xi_radial spectrum by spectrum
    nlya_subset, qsos_subset = (a for a in (nlya, qsos3))
    # find xi
    xi_r = []
    pc_r = []
    t1 =  time.time()
    for i,spec in enumerate(nlya_subset):
        print i
        xi,pc = calc_xi_r(spec, RBIN_EDGES, usevel=False)
        xi_r.append(xi)
        pc_r.append(pc)
    print 'elapsed time in min: ', (time.time() - t1) / 60.

    xi_r = np.array(xi_r)
    pc_r = np.array(pc_r)

if 0:
    plot_xi_r_one_by_one(xi_r, pc_r, nlya_subset, qsos_subset, prefix='a',
                         show=0,log=1)

if 0:
    # do transverse correlation function in a similar way, for each
    # possible pair of spectra. Note there are nC2 pairs, i.e. ~20,000
    # for n=200, ~80000 for n=400, # pairs scales with the square if
    # number of sightlines.

    tinfo = []
    t1 = time.time()
    for field in 'J0124 Q0042 HE0940 J1201 PKS2126'.split():
    #for field in 'HE0940'.split():
        # find all pair separations
        cond =  qsos3.fieldqso == field
        qsos4 = qsos3[cond]
        nlya1 = [n for n, c in zip(nlya, cond) if c]
        Nqso = len(qsos4)
        npairs = 0.5 * Nqso * (Nqso-1)
        print '%f pairs for %s field' % (npairs, field)
        for (q1, sp1), (q2, sp2) in combinations(zip(qsos4, nlya1), 2):
            pairids = q1['num'], q2['num']
            print pairids, q1['z'], q2['z']
            xi_t, pc_t, tsep = calc_xi_t(
                sp1, q1['ra'], q2['dec'], sp2, q2['ra'], q2['dec'],
                mflkey='mfls')
            if xi_t is None:
                continue
            tinfo.append( (xi_t, pc_t, tsep, pairids, field) )
    dtype = [('xi', float), ('pc', int), ('sepMpc', float, (2,)),
             ('pairids', float, [2]), ('qfield', 'S12')]
    tdat = np.array(tinfo, dtype=dtype).view(np.recarray)
    print 'elapsed time in min: ', (time.time() - t1) / 60.

if 0:
    fig = pl.figure()
    ax = fig.add_subplot(111)
    ax.semilogx(tdat.sepMpc.mean(axis=1), tdat.xi, '.b', alpha=0.2)
    #ax.plot(tdat.sepMpc.mean(axis=1), tdat.xi, '.b', alpha=0.2)
    ax.axhline(0, color='0.7')
    ax.grid()

    temp = tdat[~np.isnan(tdat.xi)]
    ximed, rms = bin_data(temp.sepMpc.mean(axis=1), temp.xi, TBIN_EDGES)
    ax.plot(CBINST, ximed,ls='steps-mid')
    ax.errorbar(CBINST, ximed, fmt=None, yerr=rms)
    show()

    #ax.set_title(field)
#         meanxi = []
#         for i in range(len(CBINST)):
#             goodxi = ~np.isnan(xi[:,i])
#             Ngood = len(xi[:,i][goodxi])
#             print Ngood
#             if Ngood > 0:
#                 meanxi.append(np.median(xi[:,i][goodxi]))
#             else:
#                 meanxi.append(np.nan)

#         ax.plot(CBINST, meanxi, 'or')
#         ax.set_title(field)
