#!/usr/bin/env python
import pprint
import BinnedAnalysis as BAn
from SED import SED

def fermiSed(srcMaps, expCube, binnedExpMap, srcModel, irfs, centerSrc,
             ibin_min=None, ibin_max=None, TSLimit=2., outfile='mysed.dat'):
    print 'Initiating ...'
    obs = BAn.BinnedObs(srcMaps=srcMaps, expCube=expCube,
                        binnedExpMap=binnedExpMap, irfs=irfs)

    like = BAn.BinnedAnalysis(obs, srcModel, optimizer="MINUIT")

    sedEnergy = like.energies
    if isinstance(ibin_min, int):
        minBin = max(0, ibin_min)
    else:
        minBin = 0

    if isinstance(ibin_max, int):
        maxBin = min(len(sedEnergy)-2, ibin_max)
    else:
        maxBin = len(sedEnergy)-2

    bin_edges = sedEnergy[minBin:maxBin+2]
    print '[INFO] Calculate from bin %s to bin %s ...' % (minBin, maxBin-1)

    sed = SED(like, centerSrc, min_ts=TSLimit, bin_edges=bin_edges)
    sed.save(outfile)

  # ff = open('.'.join(outfile.split('.')[:-1])+'_dict.dat', 'w')
  # dicttot = sed.todict()
  # pprint.pprint(dicttot, stream=ff)
  # ff.close()

def cli():
    import argparse
    import multiprocessing
    import numpy as np
    import pyfits as pf

    parser = argparse.ArgumentParser(description='PyLikelihood Script!')
    parser.add_argument("srcMaps", type=str, help='Source Maps')
    parser.add_argument("expCube", type=str, help='Live Time Cube')
    parser.add_argument("binnedExpMap", type=str, help='Exposure Cube')
    parser.add_argument("srcModel", type=str, help='model.xml')
    parser.add_argument("irfs", type=str, help='IRFs')
    parser.add_argument("centerSrc", type=str, default='', help='center source')
    parser.add_argument("-TSLimit", type=float, default=2., help='Below which the upper limit of center source will be calculated.')
    parser.add_argument("-minBin", type=int, default=0, help='minBin_c=max(minBin, 0), [minBin_c, maxBin_c) will be calculated')
    parser.add_argument("-maxBin", type=int, default=1000, help='maxBin_c=min(maxBin, nEbins), [minBin_c, maxBin_c) will be calculated')
    parser.add_argument("-njobs", type=int, default=1, help='The number of jobs you wish to spawn')

    args = parser.parse_args()

    if args.njobs > 1:
        hdulist = pf.open(args.binnedExpMap)
        energies = hdulist['ENERGIES'].data.field(0)
        minBin = max(0, args.minBin)
        maxBin = min(args.maxBin, len(energies)-2)
        hdulist.close()

        ebins = np.arange(maxBin, minBin-1, -1)
        njobs = min(args.njobs, len(ebins))
        split_ebins = np.array_split(ebins, int(njobs))
        pool = multiprocessing.Pool(processes=njobs)
        for ijob in xrange(njobs):
            ebinsToCalc = split_ebins[ijob]
            err = pool.apply_async(fermiSed, (args.srcMaps, args.expCube, args.binnedExpMap, args.srcModel, args.irfs,
                                              args.centerSrc, ebinsToCalc.min(), ebinsToCalc.max(), args.TSLimit,
                                              'mysed_%s.dat' % ijob))
        pool.close()
        pool.join()
        err.get()
    else:
        fermiSed(args.srcMaps, args.expCube, args.binnedExpMap, args.srcModel, args.irfs, args.centerSrc,
                 args.minBin, args.maxBin, args.TSLimit, 'mysed.dat')


if __name__ == '__main__': cli()
