#!/usr/bin/env python
from __future__ import print_function
import numpy as np
try:
    import pyfits as pf
except ImportError:
    from astropy.io import fits as pf

import pyLikelihood as pyLike
import BinnedAnalysis as BAn
from LikelihoodState import LikelihoodState
from UpperLimits import UpperLimits

from COLSed import SimpleEBoundObj, collect as collectSED

new_fermitools = hasattr(BAn, 'BinnedConfig')
#new_fermitools = False # keep back due to the bug in FermiST 1.2.1

def get_results(like, minidx, maxidx, emin, emax, centerSrc, approxTs=True, TSLimit=None, outfile='results_pyLike.dat'):
    'Codes belows are adapted from the one of Y-F, Liang'
    import pprint

    dicttot = {"logLikelihood": like.logLike.value()}
    try:
        dicttot['zEDM'] = like.optObject.getDistance()
    except AttributeError:
        pass

    try:
        dicttot['zReturnCode'] = like.optObject.getRetCode()
    except AttributeError:
        pass

    try:
        dicttot['zFitQuality'] = like.optObject.getQuality()
    except AttributeError:
        pass

    try:
        dicttot['zEdispON'] = like.logLike.use_edisp()
    except AttributeError:
        try:
            dicttot['zEdispON'] = like.logLike.edisp_val() >= 0
        except AttributeError:
            pass

    try:
        dicttot['zERange'] = (like.emin, like.emax)
    except AttributeError:
        pass

    nobs = sum(like.nobs[minidx:maxidx])
    cts_all, cts_free = 0., 0.
    for source in like.sourceNames():
        npred = like.NpredValue(source)
        cts_all += npred

        if like[source].src.fixedSpectrum():
            srckey = '+'+source
            dicttot[srckey] = {'NPred': npred}
            if npred >= 1e-4*nobs and npred >= 2:
                ts = like.Ts(source)
                dicttot[srckey]['TS value'] = ts
                if npred >= 1e-2*nobs or ts >= 400:
                    print('  [%s] TS=%.3f, npred=%.1f [FIXED]' % (source, ts, npred))
            continue

        dict_ = {}
        dict_['NPred'] = npred
        cts_free += npred

        if source in centerSrc:
            ts = like.Ts(source, reoptimize=not approxTs)
        else:
            ts = like.Ts(source)
        dict_['TS value'] = ts

        if source in centerSrc:
            print('  [%s] TS=%.3f, npred=%.1f [CENTERSRC]' % (source, ts, npred))
        else:
            print('  [%s] TS=%.3f, npred=%.1f' % (source, ts, npred))

        try:
            flux_ = like.flux(source, emin=emin, emax=emax, energyFlux=False)
            fluxErr_ = like.fluxError(source, emin=emin, emax=emax, energyFlux=False)
        except RuntimeError:
            pass
        else:
            dict_['Flux'] = "%.5e +/- %.5e" %(flux_, fluxErr_)

        try:
            Eflux_ = like.flux(source, emin=emin, emax=emax, energyFlux=True)
            EfluxErr_ = like.fluxError(source, emin=emin, emax=emax, energyFlux=True)
        except RuntimeError:
            pass
        else:
            dict_['EFlux'] = "%.5e +/- %.5e" %(Eflux_, EfluxErr_)

        for pname in like[source].funcs['Spectrum'].paramNames:
            value_ = like[source].funcs['Spectrum'].params[pname].value()
            if like[source].funcs['Spectrum'].params[pname].isFree():
                error_ = like[source].funcs['Spectrum'].params[pname].error()
                dict_[pname] = "%f +/- %f" %(value_, error_)
            else:
                dict_[pname] = "%f" %value_
        dicttot[source] = dict_

    dicttot['zCounts'] = (cts_all, cts_free, nobs)

    for cs in centerSrc:
        if (cs in like.sourceNames()):
            ul_flag1 = (isinstance(TSLimit, float) and float(dicttot[cs]['TS value'])<TSLimit)
            if like.covar_is_current:
                try:
                    flux_split = dicttot[cs]['Flux'].split('+/-')
                except KeyError:
                    ul_flag2 = False
                else:
                    flux_val, flux_err = float(flux_split[0]), float(flux_split[1])
                    ul_flag2 = flux_val<2.*flux_err
            else:
                ul_flag2 = False
            ul_flag3 = float(dicttot[cs]['NPred']) <= max(1., cts_all*1e-4)
            if (ul_flag1 or ul_flag2 or ul_flag3):
                print('calculating the upper limit of {} ...'.format(cs))
                ul = UpperLimits(like)
                ul[cs].compute(emin=emin, emax=emax)
                print('The upper limit of %s is %s' % (cs, ul[cs].results))
                dicttot[cs]['Upper Limit'] = ul[cs].results[0].value
                dicttot[cs]['ULFLAG'] = ul_flag1 + 2*ul_flag2 + 4*ul_flag3

    f = open(outfile, 'w')
    pprint.pprint(dicttot, stream=f)
    f.close()
    return cts_all

def get_covar(like, outfile='covariance_pyLike.dat'):
    if like.covar_is_current:
        fh = open(outfile, 'w')
        fh.write('-'*20+ 'free parameters'+ '-'*20+ '\n')
        prm_lst = []
        for prm in like.params():
            if prm.parameter.isFree():
                freePrm = prm.srcName+'_'+prm.parameter.getName()
                fh.write('%s\t%s\n' % (len(prm_lst)+1, freePrm))
                prm_lst.append(freePrm)

        covar = np.array(like.covariance)
        fh.write('-'*20+ 'eigen values of Hessian mattrix'+ '-'*20+ '\n')
        try:
            eigvals_of_hessian = 1./np.linalg.eigvals(covar)
            np.savetxt(fh, eigvals_of_hessian, delimiter='  ', fmt='% .5e')
        except:
            fh.write('Fail!\n')

        fh.write('-'*20+ 'covariance matrix'+ '-'*20+ '\n')
        np.savetxt(fh, covar, delimiter='  ', fmt='% .3e')

        fh.write('-'*20+ 'correlation matrix'+ '-'*20+ '\n')
        cov_diag = covar.diagonal()
        if (cov_diag < 0.).any():
            fh.write('[ERROR] covar.diagonal < 0\n')
        else:
            sigma = np.sqrt(cov_diag)
            corr = covar / (sigma * sigma.reshape(-1, 1))
            np.savetxt(fh, corr, delimiter='  ', fmt='% .3f')

            fh.write('='*20+ 'correlation check'+ '='*20+ '\n')
            triu = np.abs(np.triu(corr, 1))

            fh.write('.'*10+ 'Very strong correlation (corr >= 0.8)'+ '.'*10+ '\n')
            indics = (triu >= 0.8).nonzero()
            for i, j in zip(indics[0], indics[1]):
                fh.write('%s <-> %s : %s\n' % (prm_lst[i], prm_lst[j], corr[i, j]))

            fh.write('.'*12+ 'Strong correlation (0.6 <= corr < 0.8)'+ '.'*12+ '\n')
            indics = ((triu < 0.8)*(triu >= 0.6)).nonzero()
            for i, j in zip(indics[0], indics[1]):
                fh.write('%s <-> %s : %s\n' % (prm_lst[i], prm_lst[j], corr[i, j]))
        fh.close()
    else:
        print('[WARN] No covariance is available!')


def loglikeProfile(like, srcName, outfile, maxdlike=100., max_delta_sqdlike=0.5, dofit=True):
    """
    Fix all bkg srcs, vary the prefactor of src and get loglike profile.
    We do trials assuming the prefactor satisfies Gaussian profile.
    Get the profile of negLoglike!
    [WARN] ONLY USE THE FUNCTION WHEN THE SOURCE IS EXTREMELY WEAK!
    """
    if not srcName in like.sourceNames():
        raise IOError('Source %s is not in the model!' % srcName)

    if maxdlike <= 0:
        raise IOError('maxdlike must be larger than 0.')

    if max_delta_sqdlike <= 0:
        raise IOError('max_delta_sqdlike must be larger than 0.')

    # get norm parameter of srcName
    for par in like[srcName].funcs['Spectrum'].paramNames:
        if (par == 'Prefactor' or par == 'norm' or
            par == 'Integral' or par == 'Value' or
            par == 'Normalization'):
            normPar = par

    if not like[srcName].funcs['Spectrum'].params[normPar].isFree():
        raise RuntimeError('%s[%s] should be free!' % (srcName, normPar))

    parobj = like[srcName].funcs['Spectrum'].params[normPar].parameter
    pval0 = parobj.getValue()
    perr0 = max(parobj.error(), 0.1*pval0)
    pscale = parobj.getScale()
    pmin, pmax = parobj.getBounds()

    ## freeze the norm of the source
    pidx = like.par_index(srcName, normPar)
    like.freeze(pidx)
    fitobj = pyLike.Minuit(like.logLike)
    tol0 = like.tol

    # first fit
    like.fit(covar=False, verbosity=False, optObject=fitobj)
    llike0 = like.logLike.value()
    _prefactor, _loglike = [pval0], [llike0]

    def getsplitval(pval_t_min, pval_t_max, sqrt_llike_t_min, sqrt_llike_t_max):
        #pval_t = (pval_t_min + pval_t_max)/2.
        pval_t = np.sqrt(pval_t_min * pval_t_max)
        parobj.setValue(pval_t)
        if dofit:
            try:
                like.fit(covar=False, verbosity=False, optObject=fitobj)
            except RuntimeError:
                like.fit(covar=False, verbosity=False, optObject=fitobj, tol=tol0*5.)
        llike_t = like.logLike.value()
        _sqrt_llike_t = np.sqrt(abs(llike_t-llike0))
        print(pval_t, llike0-llike_t, abs(_sqrt_llike_t-sqrt_llike_t_min), abs(_sqrt_llike_t-sqrt_llike_t_max))

        _pval_t_list, _llike_t_list = [pval_t], [llike_t]
        if abs((pval_t_max-pval_t_min)/pval_t)<0.01:
            return _pval_t_list, _llike_t_list

        if abs(_sqrt_llike_t-sqrt_llike_t_min) > max_delta_sqdlike:
            _pval_t_ext, _llike_t_ext = getsplitval(pval_t_min, pval_t, sqrt_llike_t_min, _sqrt_llike_t)
            _pval_t_list.extend(_pval_t_ext)
            _llike_t_list.extend(_llike_t_ext)

        if abs(_sqrt_llike_t-sqrt_llike_t_max) > max_delta_sqdlike:
            _pval_t_ext, _llike_t_ext = getsplitval(pval_t, pval_t_max, _sqrt_llike_t, sqrt_llike_t_max)
            _pval_t_list.extend(_pval_t_ext)
            _llike_t_list.extend(_llike_t_ext)

        return _pval_t_list, _llike_t_list

    # negative
    atlimit = False
    itime, fac = 1., 1.
    _prefactor_list0, _loglike_list0 = [pval0], [llike0]
    _prefactor_list1, _loglike_list1 = [], []

    while True:
        pval_trial = pval0 - np.sqrt(2.) * itime * perr0 * max_delta_sqdlike * fac

        if pval_trial <= pmin:
            pval_trial = pmin*(1.+1.e-5)
            atlimit = True

        parobj.setValue(pval_trial)
        if dofit:
            try:
                like.fit(covar=False, verbosity=False, optObject=fitobj)
            except RuntimeError:
                like.fit(covar=False, verbosity=False, optObject=fitobj, tol=tol0*5.)
        llike = like.logLike.value()
        print(pval_trial, llike0-llike)

        _prefactor_list0.append(pval_trial)
        _loglike_list0.append(llike)

        sqrt_llike_t_min = np.sqrt(abs(_loglike_list0[-2]-llike0))
        sqrt_llike_t_max = np.sqrt(abs(_loglike_list0[-1]-llike0))
        dsqllike = abs(sqrt_llike_t_min - sqrt_llike_t_max)
        if dsqllike > 1e2 * maxdlike:
            raise RuntimeError('Too large dsqllike (%s)! There may be some problems in the fit!'%dsqllike)
        elif dsqllike > max_delta_sqdlike:
            pval_trial_min, pval_trial_max = _prefactor_list0[-2], _prefactor_list0[-1]
            pval_t_list, llike_t_list = getsplitval(pval_trial_min, pval_trial_max, \
                                                    sqrt_llike_t_min, sqrt_llike_t_max)
            _prefactor_list1.extend(pval_t_list)
            _loglike_list1.extend(llike_t_list)
        elif dsqllike < 0.1 * max_delta_sqdlike:
            fac *= 1.9
            itime /= 2.

        if atlimit or (llike0-llike>=maxdlike):
            break
        itime += 1.

    _prefactor.extend(_prefactor_list0[1:])
    _loglike.extend(_loglike_list0[1:])
    _prefactor.extend(_prefactor_list1)
    _loglike.extend(_loglike_list1)

    # positive
    atlimit = False
    itime, fac = 1., 1.
    _prefactor_list0, _loglike_list0 = [pval0], [llike0]
    _prefactor_list1, _loglike_list1 = [], []

    while True:
        pval_trial = pval0 + np.sqrt(2.) * itime * perr0 * max_delta_sqdlike * fac

        if pval_trial >= pmax:
            pval_trial = pmax * (1.-1.e-5)
            atlimit = True

        parobj.setValue(pval_trial)
        if dofit:
            try:
                like.fit(covar=False, verbosity=False, optObject=fitobj)
            except RuntimeError:
                like.fit(covar=False, verbosity=False, optObject=fitobj, tol=tol0*5.)
        llike = like.logLike.value()
        print(pval_trial, llike0-llike)

        _prefactor_list0.append(pval_trial)
        _loglike_list0.append(llike)

        sqrt_llike_t_min = np.sqrt(abs(_loglike_list0[-2]-llike0))
        sqrt_llike_t_max = np.sqrt(abs(_loglike_list0[-1]-llike0))
        dsqllike = abs(sqrt_llike_t_min - sqrt_llike_t_max)
        if dsqllike > 1e2 * maxdlike:
            raise RuntimeError('Too large dsqllike (%s)! There may be some problems in the fit!'%dsqllike)
        elif dsqllike > max_delta_sqdlike:
            pval_trial_min, pval_trial_max = _prefactor_list0[-2], _prefactor_list0[-1]
            pval_t_list, llike_t_list = getsplitval(pval_trial_min, pval_trial_max, \
                                                    sqrt_llike_t_min, sqrt_llike_t_max)
            _prefactor_list1.extend(pval_t_list)
            _loglike_list1.extend(llike_t_list)
        elif dsqllike < 0.1 * max_delta_sqdlike:
            fac *= 1.9
            itime /= 2.

        if atlimit or (llike0-llike>=maxdlike):
            break
        itime += 1.

    _prefactor.extend(_prefactor_list0[1:])
    _loglike.extend(_loglike_list0[1:])
    _prefactor.extend(_prefactor_list1)
    _loglike.extend(_loglike_list1)

    prefactor = np.atleast_1d(_prefactor) * pscale
    loglike = np.atleast_1d(_loglike)
    dloglike = loglike - llike0

    argsort = np.argsort(prefactor)
    outdata = np.c_[prefactor, dloglike][argsort, :]
    np.savetxt(outfile, outdata, delimiter='\t', header='Prefactor\tloglike%+.6f'%llike0)


def simpleSed(srcMaps, expCube, binnedExpMap, srcModel, irfs, centerSrc,
              ibin_min=None, ibin_max=None, delta_bin=1, wtsmap=None,
              edispon=False, edisp_bins=-2, approxTs=True, TSLimit=None,
              freezeWeak=False, freezeTS=1.,
              get_loglikeprofile=False, maxdlike=20., max_delta_sqdlike=0.1,
              saveFolder=None):
    print('Initiating ...')
    if not saveFolder:
        _saveFolder = ''
    elif saveFolder.endswith('/'):
        _saveFolder = saveFolder
    else:
        _saveFolder = saveFolder + '/'

    obs = BAn.BinnedObs(srcMaps=srcMaps, expCube=expCube,
                        binnedExpMap=binnedExpMap, irfs=irfs)

    if new_fermitools and edispon:
        myconfig = BAn.BinnedConfig(edisp_bins=edisp_bins)
        print('edisp_bins=%s'%edisp_bins)
        DRM = BAn.BinnedAnalysis(obs, srcModel, optimizer='DRMNFB', config=myconfig, wmap=wtsmap)
    else:
        if wtsmap is not None:
            raise RuntimeError('weighted likelihood is not implemented in the old fermitools')
        myconfig = None
        DRM = BAn.BinnedAnalysis(obs, srcModel, optimizer="DRMNFB")
    DRM.logLike.set_edisp_flag(edispon)
    for cs in centerSrc:
        n_not = 0
        if not cs in DRM.sourceNames():
            n_not += 1
            print('the source {} is not in the model'.format(cs))
        if n_not == len(centerSrc):
            raise RuntimeError('the centersrc file is empty')
    pristine = LikelihoodState(DRM)

    nsedEnergy = len(DRM.energies) # the number of energy boundary

    if isinstance(delta_bin, int) and delta_bin>1:
        suffix = '_wid%02i'%delta_bin
    else:
        delta_bin = 1
        suffix = ''

    minBin = max(0, ibin_min) if isinstance(ibin_min, int) else 0
    maxBin = min(nsedEnergy-1, ibin_max+1) if isinstance(ibin_max, int) else nsedEnergy-1

    assert maxBin >= minBin
    print('[INFO] Calculate from energy bin [%02i, %02i]!' % \
        (minBin, min(maxBin, nsedEnergy-1)-1))
    if delta_bin > 1:
        print('[INFO] Bin width: %i' % delta_bin)

    for iband in range(minBin, maxBin, delta_bin): # fit from band closed set [minB, maxB]
        print('='*40)
        minidx = iband # included
        maxidx = min(iband+delta_bin, maxBin) # not included

        DRM.selectEbounds(minidx, maxidx)
        Emin, Emax = DRM.emin, DRM.emax
        DRM.logLike.set_edisp_flag(edispon)

        if delta_bin == 1:
            print('[INFO] Start calculating bin %s, [%.3f, %.3f] MeV ...' % (minidx, Emin, Emax))
        else:
            print('[INFO] Start calculating bin %s-%s, [%.3f, %.3f] MeV ...' % (minidx, maxidx-1, Emin, Emax))

        try:
            print('[INFO] current edispflag=%s!' % DRM.logLike.use_edisp())
        except AttributeError:
            print('[INFO] current edispflag=%s (%s)!' % (DRM.logLike.edisp_val()>=0, DRM.logLike.edisp_val()))

        try:
            DRM.fit()
            print('[INFO] Total lnLike: ', DRM.logLike.value())
            print('[INFO] Return Code:  ', DRM.optObject.getRetCode())
            DRM.logLike.writeXml(_saveFolder + 'model_pass1_band%02i%s.xml' % (minidx, suffix))
        except RuntimeError as e:
            print('[FAIL] '+str(e))
            print('-'*40)
            if new_fermitools:
                ALTFIT = BAn.BinnedAnalysis(obs, srcModel, optimizer="MINUIT", config=myconfig)
            else:
                ALTFIT = BAn.BinnedAnalysis(obs, srcModel, optimizer="MINUIT")
            ALTFITobj = pyLike.Minuit(ALTFIT.logLike)
            ALTFITobj.tol = 0.05
            ALTFIT.selectEbounds(minidx, maxidx)
            ALTFIT.logLike.set_edisp_flag(edispon)

            print('[INFO] Energy range: [%.3f, %.3f] MeV'%(ALTFIT.emin, ALTFIT.emax))
            try:
                print('[INFO] current edispflag=%s!' % ALTFIT.logLike.use_edisp())
            except AttributeError:
                print('[INFO] current edispflag=%s (%s)!' % (ALTFIT.logLike.edisp_val()>=0, ALTFIT.logLike.edisp_val()))

            ALTFIT.fit(covar=False, optObject=ALTFITobj)
            print('[INFO] Total lnLike: ', ALTFIT.logLike.value())
            print('[INFO] Fit Quality:  ', ALTFITobj.getQuality())
            print('[INFO] Return Code:  ', ALTFITobj.getRetCode())
            print('[INFO] Fit Distance: ', ALTFITobj.getDistance())
            ALTFIT.logLike.writeXml(_saveFolder + 'model_pass1_band%02i%s.xml' % (minidx, suffix))
            del ALTFIT, ALTFITobj

       # restore the parameters of DRM to the initial value in order to fit the next one, MIN will load model from pass1.xml
        pristine.restore()

        print('-'*40)
        if new_fermitools:
            MIN = BAn.BinnedAnalysis(obs, _saveFolder + 'model_pass1_band%02i%s.xml' % (minidx, suffix),
                                     optimizer="MINUIT", config=myconfig)
        else:
            MIN = BAn.BinnedAnalysis(obs, _saveFolder + 'model_pass1_band%02i%s.xml' % (minidx, suffix),
                                     optimizer="MINUIT")
        MINobj = pyLike.Minuit(MIN.logLike)

        MIN.selectEbounds(minidx, maxidx)
        MIN.logLike.set_edisp_flag(edispon)
        print('[INFO] Energy range: [%.3f, %.3f] MeV'%(MIN.emin, MIN.emax))
        try:
            print('[INFO] current edispflag=%s!' % MIN.logLike.use_edisp())
        except AttributeError:
            print('[INFO] current edispflag=%s (%s)!' % (MIN.logLike.edisp_val()>=0, MIN.logLike.edisp_val()))

        if freezeWeak:
            print('[INFO] Starting to fix the weak sources ...')
            for source in MIN.sourceNames():
                if MIN[source].src.fixedSpectrum():
                    continue
                ts = MIN.Ts(source)

                extra_infos = []
                if source in centerSrc:
                    extra_infos.append('CENTERSRC')

                stype = MIN[source].type.upper()
                if isinstance(stype, bytes):
                    stype = stype.decode()
                if stype != 'POINTSOURCE':
                    extra_infos.append(stype)

                if source not in centerSrc and ts < freezeTS and stype == 'POINTSOURCE':
                    for par in MIN[source].funcs['Spectrum'].paramNames:
                        if (par == 'Prefactor' or par == 'norm' or
                                par == 'Integral' or par == 'Value' or
                                par == 'Normalization'):
                            normpar = par

                    index = MIN.par_index(source, normpar)
                    MIN.freeze(index)
                    extra_infos.append('FREEZE')

                if extra_infos:
                    print('  [%s] TS=%.4f [%s]'%(source, ts, ', '.join(extra_infos)))
                else:
                    print('  [%s] TS=%.4f'%(source, ts))

        print('[INFO] Start the second fitting ...')
        MIN.fit(covar=True, optObject=MINobj)
        prst = LikelihoodState(MIN)
        print('[INFO] Total lnLike: ', MIN.logLike.value())
        print('[INFO] Fit Quality:  ', MINobj.getQuality())
        print('[INFO] Return Code:  ', MINobj.getRetCode())
        print('[INFO] Fit Distance: ', MINobj.getDistance())
        MIN.logLike.writeXml(_saveFolder + 'model_pass2_band%02i%s.xml' % (minidx, suffix))

        modelcnts = get_results(
            MIN, minidx, maxidx, Emin, Emax, centerSrc,
            approxTs=approxTs, TSLimit=TSLimit,
            outfile=_saveFolder + 'results_pyLike_band%02i%s.dat' % (minidx, suffix)
        )

        nobs = sum(MIN.nobs[minidx:maxidx])
        if nobs:
            print("Model Cnts / Obs Cnts: %s / %s = 1.%+.5f" % (modelcnts, nobs, modelcnts/nobs-1.))
        else:
            print("Model Cnts / Obs Cnts: %s / %s" % (modelcnts, nobs))
        get_covar(MIN, _saveFolder + 'covariance_pyLike_band%02i%s.dat' % (minidx, suffix))

        if get_loglikeprofile:
            for cs in centerSrc:
                print('[profile] calculating the profile of %s...'%cs)
                prst.restore()
                cs1 = ''.join(cs.split())
                loglikeProfile(MIN, cs, outfile=_saveFolder + 'loglikeprofile_band%02i_%s%s.dat' % (minidx, cs1, suffix),
                               maxdlike=maxdlike, max_delta_sqdlike=max_delta_sqdlike)
        del MIN, MINobj

def cli():
    import argparse
    import multiprocessing

    parser = argparse.ArgumentParser(description='PyLikelihood Script!', prefix_chars='-+')
    parser.add_argument("srcMaps", type=str, help='Source Maps')
    parser.add_argument("expCube", type=str, help='Live Time Cube')
    parser.add_argument("binnedExpMap", type=str, help='Exposure Cube')
    parser.add_argument("srcModel", type=str, help='model.xml')
    parser.add_argument("irfs", type=str, help='IRFs')
    parser.add_argument("centerSrc", type=str, help='center source. if starting with "@", a file containing the centerSrcs is assumed')

    parser.add_argument("-njobs", type=int, default=1, help='The number of jobs you wish to spawn')

    parser.add_argument("+edispon", action='store_true', default=False, help='Whether to enable edisp (default: False)')
    parser.add_argument("-edispbins", type=int, default=-2, help='the extra ebins added (default=-2)')

    parser.add_argument("-minBin", type=int, default=0, help='minBin_c=max(minBin, 0), [minBin_c, maxBin_c] will be calculated')
    parser.add_argument("-maxBin", type=int, default=1000, help='maxBin_c=min(maxBin, nEbins), [minBin_c, maxBin_c] will be calculated')
    parser.add_argument("-deltaBin", type=int, default=1, help='the number of bins used in one sed fitting. Default: 1')

    parser.add_argument("-wtsmap", type=str, default=None, help='the filename of the likelihood weights')

    parser.add_argument("+freezeWeak", action='store_true', default=False, help="the flag to enable automatical weak source freeze (default: False)")
    parser.add_argument('-freezeTs', type=float, default=1., help='the sources below the given ts value will be freezed')

    parser.add_argument("+accurateTS", action='store_true', default=False, help='Whether to calculate accurate TS value for center source (default: False).')
    parser.add_argument("-TSLimit", type=float, default=10., help='Below which the upper limit of center source will be calculated.')

    parser.add_argument("-saveFolder", type=str, default='./', help='The folder to save output files')
    parser.add_argument("-sedout", type=str, default='myseddata_{sname:s}.dat', help='The output sed filename, use {sname} to replace the src name')

    parser.add_argument("+get_loglike", action='store_true', default=False, help='Whether to get loglike profile (default: False).')
    parser.add_argument("-maxdlike", type=float, default=50., help='Max dlike in loglike profile')
    parser.add_argument("-max_delta_sqdlike", type=float, default=.25, help='when dlike smaller than max_delta_sqdlike/nebins, program will refine it')
    #parser.add_argument("-max_delta_sqdlike", type=float, default=2., help='when dlike smaller than max_delta_sqdlike/nebins, program will refine it')

    args = parser.parse_args()

    if args.deltaBin>1:
        delta_bin_ = args.deltaBin
    else:
        delta_bin_ = 1

    eb = SimpleEBoundObj(args.binnedExpMap)
    _my_max_delta_sqdlike = args.max_delta_sqdlike#/eb.nEBound
  # print(_my_max_delta_sqdlike)

    if args.centerSrc.startswith('@'):
        csrclst = []
        with open(args.centerSrc[1:]) as fh:
            for l in fh:
                if l.startswith('#'):
                    continue
                sname = l.split('#')[0].strip()
                if sname:
                    csrclst.append(sname)
        if not csrclst:
            raise IOError('the centersrc list is empty')
    else:
        csrclst = [args.centerSrc]

    if args.njobs > 1:
        minbin0 = max(0, args.minBin)
        maxbin0 = min(eb.nEBound-1, args.maxBin)
        nband = int(np.ceil((maxbin0-minbin0+1.)/delta_bin_)) # the number of energy bands

        ebins = np.arange(nband-1, -1, -1)
        njobs = min(args.njobs, len(ebins))
        split_ebins = np.array_split(ebins, njobs)
        pool = multiprocessing.Pool(processes=njobs)
        for ijob in range(njobs):
            ebinsToCalc = split_ebins[ijob]
            myminbin = int(minbin0+delta_bin_*ebinsToCalc.min())
            mymaxbin = int(min(minbin0-1+delta_bin_*(1+ebinsToCalc.max()), maxbin0))
            err = pool.apply_async(simpleSed, (
                args.srcMaps, args.expCube, args.binnedExpMap, args.srcModel, args.irfs, csrclst,
                myminbin, mymaxbin, args.deltaBin, args.wtsmap,
                args.edispon, args.edispbins, not args.accurateTS, args.TSLimit, args.freezeWeak, args.freezeTs,
                args.get_loglike, args.maxdlike, _my_max_delta_sqdlike,
                args.saveFolder)
            )
        pool.close()
        pool.join()
        err.get()
    else:
        simpleSed(args.srcMaps, args.expCube, args.binnedExpMap, args.srcModel, args.irfs, csrclst,
                  args.minBin, args.maxBin, args.deltaBin, args.wtsmap,
                  args.edispon, args.edispbins, not args.accurateTS, args.TSLimit, args.freezeWeak, args.freezeTs,
                  args.get_loglike, args.maxdlike, _my_max_delta_sqdlike,
                  args.saveFolder)

    try:
        collectSED(csrclst, ccube=args.binnedExpMap, ULTSLimit=args.TSLimit,
                   output=args.sedout, formatter='results_pyLike_band{index:02d}.dat')
    except Exception as e:
        print('collectSED fails because: '+str(e))

if __name__ == '__main__': cli()
