#!/usr/bin/env python

import os
import os.path
import re
import time
import random
import tempfile

from threading import Lock

from dq2.clientapi.DQ2 import DQ2
from dq2.common.DQException import *
from dq2.repository.DQRepositoryException import DQClosedDatasetException, DQFrozenDatasetException

from Ganga.Lib.LCG.Utility import get_uuid
from GangaAtlas.Lib.Credentials.ProxyHelper import getNickname

from AutoD3PDMakerPlugin import AutoD3PDMakerPlugin
from AutoD3PDMakerTask import AutoD3PDMakerTask
from AutoD3PDMakerUtility import *

from AMAAthena.AMAUtilsTool import AMAUtilsTool

class AMAEventMakerPlugin(AutoD3PDMakerPlugin):
    '''
    AutoD3PDMaker Plugin for AMAEvent generation.
    '''

    def __init__(self, dq2setup='', basedir='', codedir='', reportdir=''):

        AutoD3PDMakerPlugin.__init__(self)

        ## this plugin will generate new GangaTasks to run the post-processes
        self.ppBatchMode = True

        ## if no dq2setup specified, set it to the one on AFS@CERN
        if not dq2setup:
            dq2setup = '/afs/cern.ch/atlas/offline/external/GRID/ddm/DQ2Clients/setup.sh'
        self.dq2setup = dq2setup

        ## if not specifying the basedir for storing the downloaded ntuple, use current working directory
        if not basedir:
            basedir = os.getcwd()
        self.basedir = basedir

        if not codedir:
            codedir = os.getcwd()
        self.codedir = codedir

        if not reportdir:
            reportdir = os.getcwd()
        self.reportdir = reportdir

        self.sync_lock = Lock()

        self.mt_output        = 'ama_summary.tgz'
        self.output_ds_type   = 'AMAEvent'

    def verifyTaskObj(self, taskObj):
        """
        checking if key attributes of AMAEvent task def. are provided.
        """

        sampleTaskDict = {'ama_userarea': 'amaathena_userarea.tar.gz',
                          'amaconfig'   : 'AMAEvents_7TeV_Common_McAtNlo.py',
                          'input'       : 'mc09_7TeV.105200.T1_McAtNlo_Jimmy.merge.AOD.e510_s624_s633_r1085_r1113_tid115772_00',
                          'metadata'    : {'version'       : 1,
                                           'sample'        : 'mc09_7TeV',
                                           'run'           : '105200',
                                           'algorithm'     : 'T1_McAtNlo_Jimmy',
                                           'tag'           : 'e510_s624_s633_r1085_r1113',
                                           'atlas_release' : '15.6.3.11.1',
                                           'atlas_project' : 'TopPhys',
                                           'ama_flags'     : ['MC09','TOPINPUTS','TRIG']}}

        ## checking main attributes
        for k in sampleTaskDict.keys():
            if not taskObj.has_key(k):
                self.logger.error('missing task attribute: %s' % k)
                return False

        ## checking metadata attributes
        for k in sampleTaskDict['metadata'].keys():
            if not taskObj['metadata'].has_key(k):
                self.logger.error('missing task metadata attribute: %s' % k)
                return False

        ## everything is fine
        return True

    def __amaconfig_to_athena_joption__(self, amaconfig, amaconfig_flags):
        """
        converting amaconfig into athena job option given the extra amaconfig_flags
        """

        re_py = re.compile('.*\.py$')

        if not os.path.exists( amaconfig ):
            self.logger.error('amaconfig not found: %s' % amaconfig)
            return None

        if re_py.match( amaconfig ):
            self.logger.warning('amaconfig is already a job option file, ignore job option convertion')
            return amaconfig
        else:
            
            (fid, ama_joption) = tempfile.mkstemp(suffix='.py',prefix=os.path.basename(amaconfig))

            amatool = AMAUtilsTool()
            amatool.SetConfigFile(amaconfig)
            amatool.SetFlags(amaconfig_flags)

            jobOptions = amatool.ComposeJO(amaconfig)

            f = os.fdopen(fid,'w')
            f.write(jobOptions)
            f.close()

            return ama_joption

    def makeGangaTask(self, taskObj):
        """
        generates the GangaTask script based on the given taskObj.
        """

        ama_userarea    = taskObj.ama_userarea
        amaconfig       = taskObj.amaconfig
        amaconfig_flags = []
        atlas_release   = taskObj.metadata['atlas_release']

        atlas_grlinfo  = {}
        if taskObj.metadata.has_key('atlas_grlinfo'):
            atlas_grlinfo  = taskObj.metadata['atlas_grlinfo']

        try:
            amaconfig_flags = taskObj.amaconfig_flags
        except AttributeError:
            pass

        target_cloud   = None
        target_site    = None
        target_backend = 'LCG'

        files_per_job  = -1

        try:
            target_cloud = taskObj.cloud
        except AttributeError:
            pass

        try:
            target_site = taskObj.site
        except AttributeError:
            pass

        try:
            target_backend = taskObj.backend
        except AttributeError:
            pass

        ## determin how many files should be processed by a single job
        try:
            files_per_job = taskObj.fpj
        except AttributeError:
            pass

        ## fpj not given by task definition, try to resolve it according to varies information
        if files_per_job == -1:
            if amaconfig.find('data') >= 0:
                files_per_job = 4
            else:
                files_per_job = 1

        revs = atlas_release.split('.')

        codebase       = os.path.join( self.codedir, atlas_release )
        ama_userarea   = os.path.join( codebase, taskObj.ama_userarea )
        amaconfig      = self.__amaconfig_to_athena_joption__( os.path.join( codebase, taskObj.amaconfig ), amaconfig_flags)
        amajoption     = os.path.join( codebase, 'AMAAthena_jobOptions_new.py')

        extra_files    = []
        extra_files.append(os.path.join(codebase, 'JESUncertainty.root'))

        if not os.path.exists( ama_userarea ):
            self.logger.error('AMA UserArea tarball not found: %s' % ama_userarea)
            return None

        if not os.path.exists( amaconfig ):
            self.logger.error('AMA Config. job option not found: %s' % amaconfig)
            return None

        if not os.path.exists( amajoption ):
            self.logger.error('AMA top-level job option not found: %s' % amajoption)
            return None

        for f in extra_files:
            if not os.path.exists( f ):
                self.logger.error('extra input file not found: %s' % f)
                return None

        ## making rundef file on the fly
        (fid, rundef) = tempfile.mkstemp(suffix='.py',prefix='rundef')

        f = os.fdopen(fid,'w')
        f.write('SampleName=\'%s\'\n' % taskObj.metadata['sample'].rstrip("/"))
        f.write('ConfigFile=\'%s\'\n' % os.path.basename(amaconfig))
        f.write('FlagList=\'\'\n')
        f.write('EvtMax=-1\n')
        f.write('AMAAthenaFlags=%s\n' % repr(taskObj.metadata['ama_flags']) )

        if atlas_grlinfo:

            if not atlas_grlinfo['url']:
                self.logger.error( 'No URL to GRL xml file' )
                taskObj.error = 'No URL to GRL xml file'
                f.close()
                return None

            self.logger.info('retrieving GRL XML file: %s' % atlas_grlinfo['url'])
            grl_xml = get_remote_txt_content( atlas_grlinfo['url'] )

            if not grl_xml:
                self.logger.error( 'Fail to retrieve GRL xml: %s' % atlas_grlinfo['url'] )
                taskObj.error = 'Fail to retrieve GRL xml: %s' % atlas_grlinfo['url']
                f.close()
                return None

            f.write('grlxml_cnt="""%s"""\n' % grl_xml)
            f.write('f = open(\'%s.xml\',\'w\')\n' % atlas_grlinfo['name'])
            f.write('f.write(grlxml_cnt)\n')
            f.write('f.close()\n')

            f.write('import os\n')
            f.write('os.environ[\'GRList_name\']=\'%s\'\n' % atlas_grlinfo['name'])
            f.write('os.environ[\'GRList_filename\']=\'./%s.xml\'\n' % atlas_grlinfo['name'])

        f.close()

        ## generate GangaTask
        t = AnaTask()

        ## control the task to launch 100 jobs at the same time
        t.float = 100
        t.name  = 'AutoD3PDMaker_%s' % taskObj.id

        t.transforms[0] = AutoD3PDMakerTransform()

        t.transforms[0].name = 'D3PDMaker'

        t.transforms[0].files_per_job = files_per_job
        t.transforms[0].application.max_events = -1
        t.transforms[0].application.option_file += [ File(rundef), File(amajoption), File(amaconfig) ]
        t.transforms[0].application.athena_compile = False

        if target_backend.upper() in ['PANDA']:
            
            self.logger.info('Updating user area tarball ... ')

            athena_run_dir = 'PhysicsAnalysis/AnalysisCommon/AMA/AMAAthena/run/'

            new_ama_userarea = renew_userarea_tarball( ama_userarea, athena_run_dir, map(lambda x:x.name, t.transforms[0].application.option_file) + extra_files )

            self.logger.info('New user area tarball: %s' % new_ama_userarea)

            t.transforms[0].application.user_area = File(new_ama_userarea)
        else:
            t.transforms[0].application.user_area = File(ama_userarea)
            
        t.transforms[0].application.atlas_cmtconfig = 'i686-slc5-gcc43-opt'
        t.transforms[0].application.atlas_release = '.'.join(revs[0:3])


        atlas_project = ''
        try:
            atlas_project = taskObj.metadata['atlas_project']
        except KeyError:
            pass

        if len(revs) > 3:
            t.transforms[0].application.atlas_project    = atlas_project
            t.transforms[0].application.atlas_production = '.'.join(revs)

        atlas_rtag = resolve_atlas_rtag(atlas_project=atlas_project, atlas_release=atlas_release, platform=t.transforms[0].application.atlas_cmtconfig)


        self.logger.debug('Atlas release Tag: %s' % atlas_rtag)

        t.transforms[0].application.atlas_run_config = {'input': {'noInput': True}, 'other': {}, 'output': {'outHist': False, 'alloutputs': []}}

        ## avoid crash in Ganga 5.5.10 and on, one needs to set up atlas_dbrelease to something meaningful or empty
        t.transforms[0].application.atlas_dbrelease = 'LATEST'

        #t.transforms[0].application.atlas_dbrelease   = 'ddo.000001.Atlas.Ideal.DBRelease.v070801:DBRelease-7.8.1.tar.gz'
        #t.transforms[0].application.atlas_environment = ['DBRELEASE_OVERRIDE=7.8.1']

        ## send back the AMA output files when job is finished
        #t.transforms[0].outputsandbox = ['summary/*']
        #t.transforms[0].outputdata = None
        ## or store the output on a grid SE, this can save your local disk
        t.transforms[0].outputdata = DQ2OutputDataset()
        t.transforms[0].outputdata.outputdata += [self.mt_output]

        dsList = []
        if taskObj.input.endswith('/'):
            ## expand the dataset container into individual datasets
            try:
                dq2 = DQ2()
                dsList = dq2.listDatasetsInContainer(taskObj.input)
            except DQException:
                self.logger.error('cannot resolve dataset container: %s' % taskObj.input)
                t.remove(remove_jobs=False)
                t = None
                return t
        else:
            dsList.append( taskObj.input )

        ## fail the task creation if no datasets given
        if len( dsList ) == 0:
            self.logger.error('dataset doesn\'t exist or container is empty: %s' % taskObj.input )
            t.remove(False)
            t = None
            return t

        ## making more transformations, one for each dataset
        for id in range(1, len(dsList) ):
            t.transforms.append(t.transforms[0].copy())

        ## finding good locations to run this task
        good_locs_map = {}
        use_blacklist = True
        if target_site:
            use_blacklist = False
        good_locs_map = findClouds(atlas_rtag, dsList, use_blacklist=use_blacklist, bl_locs=self.getUserBlackListSite(), completeDSOnly=False)

        ## setting backend requirements for each transformation
        for id in range( len(dsList) ):

            ds = dsList[id]

            t.transforms[id].use_blacklist = use_blacklist

            t.transforms[id].inputdata = DQ2Dataset()
            t.transforms[id].inputdata.type = 'FILE_STAGER'
            t.transforms[id].inputdata.dataset = [ ds ]

            good_locs = good_locs_map[ ds ]
            self.logger.debug( '%s %s %s' % (atlas_rtag, ds, repr(good_locs)) )

            self.logger.debug('good sites: %s' % repr(good_locs))

            clist = good_locs.keys()

            ## 1. if target_cloud is specified by user; but not found to be a good location, set it back to None
            if target_cloud not in clist:

                self.logger.warning( 'resource unavailable in %s cloud, re-assign to other cloud' % target_cloud )

                target_cloud = None

            ## 2. if target_site is specified by user; try to resolve it's cloud; if not available, the target_cloud will be reset to None
            ## TODO: convert the DDM site name into Panda site name, before that the site selection should be always set to default (i.e. AUTO) in PANDA
            if target_backend.upper() in ['PANDA']:
                target_site = None
            
            if target_site:
                target_cloud = None
                for c,slist in good_locs.items():
                    if target_site in slist:
                        target_cloud = c
                        break
                        
            ## 3. pick up a cloud for user if target_cloud is None; the target_site is then set to None in anycase
            if not target_cloud:

                target_site = None

                ## temporarily remove FR cloud as we usually encounter memory problem there
                ## TODO: better algorithm to be applied for FR sites

                if 'FR' in clist:
                    self.logger.debug('ignore FR cloud')
                    clist.remove('FR')

                ## remove NG cloud and Tier1 from UK cloud
                #if 'TO' in clist:
                #    self.logger.debug('ignore T0 cloud')
                #    clist.remove('T0')

                if 'NG' in clist:
                    self.logger.debug('ignore NG cloud')
                    clist.remove('NG')

                if 'UK' in clist:
                    for uk_loc in good_locs['UK']:
                        if re.match('^RAL-LCG2', uk_loc):
                            self.logger.debug('remove %s from UK cloud' % uk_loc)
                            good_locs['UK'].remove(uk_loc)

                    if len(good_locs['UK']) == 0:
                        self.logger.debug('0 UK site available, ignore UK cloud')
                        clist.remove('UK')

                if len(clist) == 0:

                    self.logger.warning('no good locations found, please check release version vs. dataset locations')
                    taskObj.error = 'no good location found on the Grid'
                    t.remove(remove_jobs=False)
                    t = None

                    break

                elif len(clist) == 1 and clist[0] == 'US':
                    self.logger.debug('only US sites available, try Panda:US cloud')
                    target_cloud = 'US'

                else:
                    if 'US' in clist:
                        self.logger.debug('ignore US cloud')
                        clist.remove('US')

                    target_cloud = clist[ random.randint(0, len(clist)-1) ]

                    self.logger.debug('available clouds: %s, try LCG:%s cloud' % (','.join(clist), target_cloud))

            ## 4. now we have proper setting for target_cloud/site, compose the backend requirement accordingly.
            if target_cloud == 'US':
                t.transforms[id].backend = Panda()
                t.transforms[id].outputdata.outputdata = []
                t.transforms[id].backend.extOutFile += [self.mt_output]
                t.transforms[id].backend.requirements.cloud = 'US'

                t.transforms[id].application.atlas_run_dir = 'PhysicsAnalysis/AnalysisCommon/AMA/AMAAthena/run/'

                if target_site:
                    t.transforms[id].backend.sites = target_site
            else:
                if target_backend.upper() in ['PANDA']:
                    t.transforms[id].backend = Panda()
                    t.transforms[id].outputdata.outputdata = []
                    t.transforms[id].backend.extOutFile += [self.mt_output]
                    t.transforms[id].backend.requirements.cloud = target_cloud

                    t.transforms[id].application.atlas_run_dir = 'PhysicsAnalysis/AnalysisCommon/AMA/AMAAthena/run/'

                    if target_site:
                        t.transforms[id].backend.sites = target_site
                else:
                    t.transforms[id].backend = LCG()
                    t.transforms[id].backend.requirements.cloud  = target_cloud
                    t.transforms[id].inputsandbox += map(lambda x:File(x), extra_files)

                    if target_site:
                        t.transforms[id].backend.requirements.sites = [ target_site ]

                    ## TODO: need a better way to determin how much memory is needed
                    #t.transforms[id].backend.requirements.memory = 4000

            ## 5. update the taskObject
            taskObj.cloud = ''
            taskObj.site  = ''
            taskObj.backend = target_backend

            if target_cloud:
                taskObj.cloud = target_cloud

            if target_site:
                taskObj.site = target_site

            ## Tests:
            ## oops, make a mistake to prepare the task ... let's see how this framework handles it.
            ##  - the task generation failed so that the task status still in 'new' but not recorded in the framework
            ##t.transforms[id].backend.abcdefg = 'OoO'

        return t

    def __makePPTask__(self, dsfileList, atlas_release, amaevent_dataset_prefix, dq2c):

        script_template = '''
#!/bin/sh -x

SRC_DATASET=$1
SRC_FILENAME=$2
DEST_DATASET=$3
DEST_LOCATION=$4

## function for setting up pre-installed dq2client tools on WN
dq2client_setup () {

    if [ ! -z $DQ2_CLIENT_VERSION ]; then

        ## client side version request
        source $VO_ATLAS_SW_DIR/ddm/$DQ2_CLIENT_VERSION/setup.sh

    else

        ## latest version: general case
        source $VO_ATLAS_SW_DIR/ddm/latest/setup.sh

    fi

    # check configuration
    echo "DQ2CLIENT setup:"
    env | grep 'DQ2_'
    which dq2-ls

    ## return 0 if dq2-ls is found in PATH; otherwise, return 1
    return $?
}

## function for setting up CMT environment
cmt_setup () {

    # setup ATLAS software
    unset CMTPATH

    export LCG_CATALOG_TYPE=lfc

    #  LFC Client Timeouts
    export LFC_CONNTIMEOUT=180
    export LFC_CONRETRY=2
    export LFC_CONRETRYINT=60
    # improve dcap reading speed
    export DCACHE_RAHEAD=TRUE
    #export DCACHE_RA_BUFFER=262144
    # Switch on private libdcap patch with improved read-ahead buffer algorithm
    export DC_LOCAL_CACHE_BUFFER=1
    if [ n$DQ2_LOCAL_SITE_ID == n'LRZ-LMU_DATADISK' ] && [ n$DATASETTYPE == n'DQ2_LOCAL' ]; then
        export DCACHE_CLIENT_ACTIVE=1
    fi

    ATLAS_RELEASE_DIR=$VO_ATLAS_SW_DIR/software/$ATLAS_RELEASE

    if [ ! -z `echo $ATLAS_RELEASE | grep 11.` ]; then
        source $ATLAS_RELEASE_DIR/setup.sh
    elif [ ! -z `echo $ATLAS_RELEASE | grep 12.` ] || [ ! -z `echo $ATLAS_RELEASE | grep 13.` ] || [ ! -z `echo $ATLAS_RELEASE | grep 14.` ] || [ ! -z `echo $ATLAS_RELEASE | grep 15.` ]; then
        #if [ n$ATLAS_PROJECT = n'AtlasPoint1' ]; then
        if [ ! -z $ATLAS_PROJECT ] && [ ! -z $ATLAS_PRODUCTION ]; then
            source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=$ATLAS_PRODUCTION,$ATLAS_PROJECT
        elif [ ! -z $ATLAS_PROJECT ]; then
            source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=$ATLAS_RELEASE,$ATLAS_PROJECT
        else
            source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=AtlasOffline,$ATLAS_RELEASE
        fi

        # check if 64 bit was made and correct it
        if [ n$CMTCONFIG == n'x86_64-slc5-gcc43-opt'  ]; then

            if [ ! -z $ATLAS_PROJECT ] && [ ! -z $ATLAS_PRODUCTION ]; then
                source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=$ATLAS_PRODUCTION,$ATLAS_PROJECT,32,setup
            elif [ ! -z $ATLAS_PROJECT ]; then
                source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=$ATLAS_RELEASE,$ATLAS_PROJECT,32,setup
            else
                source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=AtlasOffline,$ATLAS_RELEASE,32,setup
            fi
        fi
    fi

    # print relevant env. variables for debug
    echo "CMT setup:"
    env | grep 'CMT'
    echo "SITE setup:"
    env | grep 'SITE'
}

## function for setting up athena runtime environment, no compilation
runtime_setup () {
    source $SITEROOT/AtlasOffline/$ATLAS_RELEASE/AtlasOfflineRunTime/cmt/setup.sh
}

## function for downloading file from a dq2 dataset
dq2_getfile() {
    DATASET=$1
    FILENAME=$2

    echo "Downloading $FILENAME from $DATASET"

    dq2-get --client-id=ganga -t 1800 -L ROAMING -a -d -D -H $PWD -f $FILENAME $DATASET

    return $?
}


## function for getting number of files in a dataset
dq2_numfiles() {

    DATASET=$1

    numfile=0

    echo "Getting number of files of $DATASET"

    numfile=`dq2-list-files $DATASET | grep 'total files' | awk '{print $NF}'`

    return $numfile
}

## remove files from a dataset (needed when there is a glitch in registering some files)
dq2_delfiles() {

    DATASET=$1

    retcode=0
    
    guid_list=""

    for guid in `dq2-list-files $DATASET | grep -v 'total' | grep -v 'date' | awk '{print $2}'`; do
        guid_list="$guid_list $guid"
    done

    echo "GUID to be deleted: $guid_list"

    if [ ! -z "$guid_list" ]; then
        dq2-delete-files $DATASET $guid_list
        retcode=$?
    fi

    return $retcode
}

## function for checking if dest. dataset is existing and frozen
dq2_is_dataset_frozen() {
    DATASET=$1

    is_frozen=0

    ds_status=`dq2-get-metadata $DATASET | grep -e '^state' | awk '{print $NF}'`

    if [ n$ds_status = n'frozen' ]; then
        is_frozen=1
    fi

    return $is_frozen
}

MYHOME=$PWD

## detect ATLAS software
if [ -z $VO_ATLAS_SW_DIR ]; then
   echo "No ATLAS Software found." 1>&2
   exit 1
fi

## pick up good python according to the lfc module import
## 1. try system default
## 2. try the one with Athena release
echo "==== PYTHONPATH: path, version, architecture ==="
which python
python -V
python -c 'import struct; print struct.calcsize("P") * 8.; import lfc'

if [ $? -ne 0 ]; then
    cmt_setup
    runtime_setup
    which python
    python -V
    python -c 'import struct; print struct.calcsize("P") * 8.; import lfc'
fi

## setup dq2 client
dq2client_setup
if [ $? -ne 0 ]; then
    echo "Cannot setup dq2 client" 1>&2
    exit 1
fi

## check if the dataset is frozen, return right-away a successful job if it is
dq2_is_dataset_frozen $DEST_DATASET
if [ $? -eq 1 ]; then
    echo "dataset $DEST_DATASET exists and is frozen. Presuming datasets has been produced."
    exit 0
fi

## run dq2-get
dq2_getfile $SRC_DATASET $SRC_FILENAME
if [ $? -ne 0 ] || [ ! -e $PWD/$SRC_FILENAME ]; then
    echo "Fail to download $SRC_FILENAME from $SRC_DATASET" 1>&2
    exit 2
fi

## untar the summary.tar.gz
if [ ! -e $PWD/$SRC_FILENAME ]; then
    echo "$SRC_FILENAME not available" 1>&2
    exit 2
fi

tar xzf $PWD/$SRC_FILENAME

if [ $? -ne 0 ]; then
    echo "Fail to untar $PWD/$SRC_FILENAME" 1>&2
    exit 3
fi

## as the tarball can be big, remove it immediately after extraction
rm -rf $PWD/$SRC_FILENAME

cd summary

ls -l

now=`date -u +%s`

let fcnt=0

for f in `ls`; do
    myid=`uuidgen`
    head=`echo $f | awk -F '_' '{print $1}'`

    if [ n$head = n'AMAEventStreamer' ]; then
        tail=`echo $f | awk -F '_nevs' '{print $2}'`
        if [ ! -z $tail ]; then
            new_name="AMAEvents_${myid}_nevs${tail}"
        else
            new_name="AMAEvents_${myid}.root"
        fi
    else
        new_name="${head}_${myid}.root"
    fi

    let fcnt=fcnt+1
    
    mv $f $new_name
done
ls -l

## run dq2-put and let it open
dq2-put -a -d -C -s $PWD -L $DEST_LOCATION $DEST_DATASET

if [ $? -ne 0 ]; then
    echo "Fail to dq2-put to $DEST_DATASET" 1>&2
    exit 3
fi

## comparing the number of files in dataset to make sure everything is registered properly
dq2_numfiles $DEST_DATASET

reg_fcnt=$?

if [ $reg_fcnt -ne $fcnt ]; then
    echo "Number of registered file doesn't match ($reg_fcnt != $fcnt)" 1>&2
    ## delete previously registered files in the dataset
    dq2_delfiles $DEST_DATASET

    retcode=$?

    if [ $retcode -ne 0 ]; then
        echo "Deleting registered file failed, $DEST_DATASET exit code: $retcode"
    fi

    exit 4
fi

## close and freeze the dataset if everything is fine
dq2-close-dataset $DEST_DATASET

dq2-freeze-dataset $DEST_DATASET

cd $MYHOME

exit 0
    '''

        ## making rundef file on the fly
        (fid, myexec) = tempfile.mkstemp(suffix='.sh',prefix='amavent_maker_pp_')

        f = os.fdopen(fid,'w')
        f.write('%s' % script_template)
        f.close()

        t  = Task()
        t.name  = 'AutoD3PDMakerPP'

        fcnt = 0

        amaevent_dslist = []

        for ds in dsfileList.keys():

            self.logger.debug('checking location: %s' % ds)

            dsinfo = dq2c.listDatasetReplicas(ds)

            (vuid,locinfo) = dsinfo.popitem()

            dsloc   = ''
            loclist = []

            if locinfo[0]:
                loclist = locinfo[0]
            elif locinfo[1]:
                loclist = locinfo[1]
            else:
                self.logger.error('cannot find dataset location: %s' % ds)
                t.remove(False)
                t = None
                return t

            ## dealing with the case that RAL-LCG doesn't take user analysis jobs
            if (len(loclist) == 1) and (loclist[0].find('RAL-LCG2') >= 0):
                self.logger.error('dataset %s only at %s, please DaTRI it.' % (ds, loclist[0]))
                t.remove(False)
                t = None
                return t

            for loc in loclist:
                if not loc.find('RAL-LCG2') >= 0:
                    dsloc = loc
                    break
                else:
                    self.logger.warning('exclude location for PP job: %s' % loc)

            if not dsloc:
                self.logger.error('no good location for running dataset: %s' % ds)
                t.remove(False)
                t = None
                return t

            ## resolving post-processing script arguments of each job
            myargs = []
            for file in dsfileList[ds]:

                fcnt += 1

#                amaevent_dataset = '%s.%d.%s' % (ds_prefix, fcnt, dsloc)
                amaevent_dataset = '%s.%d' % (amaevent_dataset_prefix, fcnt)

                dsinfo = dq2c.listDatasets(amaevent_dataset)

                ## create new dataset
                if len(dsinfo.keys()) == 0:
                    new_dsinfo = dq2c.registerNewDataset('%s' % amaevent_dataset)

                amaevent_dslist.append(amaevent_dataset)

                myargs.append( [ds, file, amaevent_dataset, dsloc] )

            ## create one transform for post-processing multiple ama_summary.tgz files in one dataset
            tf = ArgTransform()
            tf.args = myargs
            tf.name = 'AMAEventMakingPP'
            tf.application = ExecutableTask()
            tf.application.exe = File(myexec)
            tf.application.env = {'ATLAS_RELEASE': atlas_release}

            tf.backend = LCG()
            tf.backend.requirements.sites += [dsloc]

            ## here we just take atlas_release, which is the first 3 digits representing the AtlasOffline version
            atlas_rtag = resolve_atlas_rtag(atlas_project='', atlas_release=atlas_release, platform='i686-slc5-gcc43-opt')
            tf.backend.requirements.software.append( atlas_rtag )

            t.transforms.append(tf)

        ## run everything immediately
            t.float = fcnt
#        if fcnt > 200:
#            t.float = 200
#        else:
#            t.float = fcnt

        ## register datasets in the container
        try:
            dq2c.registerDatasetsInContainer('%s/' % amaevent_dataset_prefix, amaevent_dslist)
            t.run()
        except Exception,e:
            t.remove(False)
            t = None

        return t

    def __make_container_dsname__(self, taskObj):
        """
        make the output container dataset name based on the information provided in taskObj.
        """

        dsname = ''

        if 'template' in taskObj.output.keys():
            ds_tmp = taskObj.output['template']

            ## TODO: pattern replacement
            re_attrs = re.compile(r'%([^%]*)%')

            for attr_str in re_attrs.findall(ds_tmp):

                val = taskObj

                for attr in attr_str.split('.'):

                    ## convert list into dictionary with id as key
                    if type(val) is type([]):
                        myval = {}
                        i = 0
                        for v in val:
                            myval[ '%s' % repr(i) ] = v
                            i += 1
                        val = myval

                    ## searching the next level value by key
                    if attr in val.keys():
                        val = val[attr]
                    else:
                        val = None
                        break

                if not val:
                    val = 'unknown'
                    self.logger.warning('attribute "%s" not found in making output container dataset name' % attr_str)

                ds_tmp = ds_tmp.replace( '%' + attr_str + '%', val)

            dsname = ds_tmp

        else:
            dsname = '%s' % get_uuid()

        return dsname

    def __create_container_dataset__(self, taskObj):
        """
        create a new container dataset for the completed task
        """

        ick = False

        ds_prefix    = None
        ds_container = None

        ## making AMAEvent dataset name
        ##  - new DDM policy, user dataset has to have user nickname as the 2nd field
        userNickname = getNickname(allowMissingNickname=True)

        if not userNickname:
            ick = False

        ds_prefix  = 'user10.%s' % userNickname
        ds_prefix += '.%s' % self.__make_container_dsname__(taskObj)

        ds_prefix += '.%s.%d' % (self.output_ds_type, int(time.time()) )

        ds_container = '%s/' % ds_prefix

        ## creat dataset contain using ds_prefix
        dq2c = DQ2()

        dsinfo = dq2c.listDatasets(ds_container)

        try:
            
            if len(dsinfo.keys()) == 0:
                ## create new container named by ds_prefix
                self.logger.debug('creating new container dataset: %s' % ds_container)
                dq2c.registerContainer(ds_container)
                ick = True
                
        except DQException, e:
            
            self.logger.warning(e)
            ick = False

        return ick, ds_prefix, ds_container

    def __get_output_files__(self, taskObj):
        """
        resolves the output dataset and files
        """

        ick = False
        dsfileList = {}

        gt = None
        try:
            gt = tasks(taskObj.gtId)
        except Exception, e:
            return ick, dsfileList

        ## number of completed jobs from GangaTask's point of view
        no_completed_jobs = gt.n_status('completed')

        ## get real completed jobs associated with this task
        ## 1. iterate through different transforms
        ## 2. if # partitions in the transforms is the same as the number of
        ##    total jobs associated to this transform, iterate all jobs and keep only "completed" jobs
        ## 3. if # partitions in the transforms is not the same as the number of
        ##    total jobs associated to this transform (usually because jobs are submitted twice),
        ##    check partition by partition and take at most 1 "completed" job from the partition
        gt_jobs     = []
        miss_parts  = []

        for trf in gt.transforms:
            n_parts = trf.n_all()

            for id_part in range(1,n_parts+1):
                pjlist = trf.getPartitionJobs(id_part)
                find_part_job = False
                for j in pjlist:
                    if j.status == 'completed' and j.outputdata.output:
                        gt_jobs.append(j)
                        find_part_job = True
                        break

                if not find_part_job:
                    miss_parts.append(id_part)

        ## false the post-process if number of really completed jobs != number of expected jobs
        if len(gt_jobs) != no_completed_jobs:
            miss_parts_str = ','.join( map(lambda x:repr(x), miss_parts) )
            self.logger.warning('task %d: no. really completed jobs (%d) != %d, missing parts: %s' % (gt.id, len(gt_jobs), no_completed_jobs, miss_parts_str))

        req_completed_jobs = no_completed_jobs

        if re.match('^mc',taskObj.input):
            req_completed_jobs = int(no_completed_jobs * 0.9)

        if len( gt_jobs ) < req_completed_jobs:
            self.logger.warning('post-processing task will not start')
            ick = False
        else:
            for sj in gt_jobs:

                for finfo in sj.outputdata.output:

                    fattrs   = finfo.split(',')
                    dataset  = fattrs[0]
                    filename = fattrs[1]
                    guid     = fattrs[2]
                    size     = fattrs[3]
                    cksum    = fattrs[4]
                    location = fattrs[5]

                    ## ignore the log file
                    if filename.find('.log.tgz') >= 0:
                        pass
                    else:
                        if not dsfileList.has_key(dataset):
                            dsfileList[dataset] = []
                        dsfileList[dataset].append(filename)

            ick = True

        return ick, dsfileList

    def postprocessTask(self, taskObj):
        """
        postprocesses the given task.

        - AMAEvent dataset is created right away
        - generating a new GangaTask to run post-processing jobs at NIKHEF
        - ama_summary.tgz is downloaded to the WN and then move again to NIKHEF's grid storage.
        """

        ick = True

        gtId_pp = None

        ## arrange the base dir for storing the Ntuple files
        md = taskObj.metadata

        ick, ds_prefix, ds_container = self.__create_container_dataset__(taskObj)

        if not ick:
            return ick, gtId_pp

        gt = None
        try:
            gt = tasks(taskObj.gtId)
        except Exception, e:
            self.logger.warning(e)
            ick = False
            return ick, gtId_pp

        ## make sure we take only the first 3 digits from the main task's athena_release setting
        atlas_release = '.'.join( gt.transforms[0].application.atlas_release.split('.')[:3] )

        ## resolve the dataset and file information of the output from the main task
        ick, dsfileList = self.__get_output_files__(taskObj)

        if not ick:
            return ick, gtId_pp

        ## create the post-processing task
        dq2c = DQ2()

        t = None
        self.sync_lock.acquire()
        try:
            t = self.__makePPTask__(dsfileList, atlas_release, ds_prefix, dq2c)
        except Exception,e:
            self.logger.error(e)
            t = None
        self.sync_lock.release()

        if t:
            gtId_pp = t.id
            taskObj.output['ds'] = ds_container
            ick = True
        else:
            ick = False

        return ick, gtId_pp

    def makeTaskReport(self, taskObjList):
        '''
        make task report in JSON format.
        '''

        now = time.strftime('%d %b %Y %H:%M:%S UTC', time.gmtime())

        tasksFinish     = [{'rpt_status':'finished', 'group':'DATA', 'rpt_time':now, 'tasks':[]},
                           {'rpt_status':'finished', 'group':'MC'  , 'rpt_time':now, 'tasks':[]}]

        tasksRun        = [{'rpt_status':'running'   , 'group':'RUNNING',    'rpt_time':now, 'tasks':[]}]
        tasksFail       = [{'rpt_status':'failed'    , 'group':'FAILED',     'rpt_time':now, 'tasks':[]}]
        tasksUnfinish   = [{'rpt_status':'unfinished', 'group':'UNFINISHED', 'rpt_time':now, 'tasks':[]}]

        for t in taskObjList:

            t_slim = {}

            t_slim['seq_id']        = 0
            t_slim['status']        = t['status']
            t_slim['lastUpdate']    = t['lastUpdate']
            t_slim['input']         = t['input']
            t_slim['output_ds']     = ''
            t_slim['sample']        = t['metadata']['sample']
            t_slim['sample_tag']    = t['metadata']['tag']
            t_slim['sample_run']    = t['metadata']['run']
            t_slim['version']       = t['metadata']['version']
            t_slim['amaconfig']     = t['amaconfig']
            t_slim['amaconfig_flag_str'] = ''
            t_slim['ama_userarea']  = t['ama_userarea']
            t_slim['ama_flag_str']  = ','.join( t['metadata']['ama_flags'] )
            t_slim['atlas_project'] = t['metadata']['atlas_project']
            t_slim['atlas_release'] = t['metadata']['atlas_release']
            t_slim['nickname']      = ''
            t_slim['location']      = ''
            t_slim['backend']       = 'LCG'
            t_slim['error']         = ''
            t_slim['progress']      = {}

            if t['metadata'].has_key('nickname'):
                t_slim['nickname'] = t['metadata']['nickname']

            if t.has_key('error'):
                t_slim['error'] = t['error']

            if t['output'].has_key('ds'):
                t_slim['output_ds'] = t['output']['ds']

            if t.has_key('amaconfig_flags'):
                t_slim['amaconfig_flag_str']= ','.join( t['amaconfig_flags'] )

            if t.has_key('cloud') and t['cloud']:
                t_slim['location'] = t['cloud']

            if t.has_key('site') and t['site']:
                t_slim['location'] += ':' + t['site']

            if t.has_key('backend') and t['backend']:
                t_slim['backend'] = t['backend']

            groupId = 0
            if t.status == AutoD3PDMakerTask.status_map['finished']:

                if re.match('^group.*', t['metadata']['sample']):
                    groupId = 0
                elif re.match('^data[0-9]?.*', t['metadata']['sample']):
                    groupId = 0
                else:
                    groupId = 1
                    
                t_slim['seq_id'] = len(tasksFinish[groupId]['tasks']) + 1
                tasksFinish[groupId]['tasks'].append(t_slim)
                continue

            if t.status in [AutoD3PDMakerTask.status_map['running'], AutoD3PDMakerTask.status_map['pause']]:

                try:
                    t_slim['progress'].update( {'main': t['progress']['main'] } )
                except KeyError:
                    pass

                t_slim['seq_id'] = len(tasksRun[groupId]['tasks']) + 1
                tasksRun[groupId]['tasks'].append(t_slim)
                continue

            if t.status == AutoD3PDMakerTask.status_map['finishing']:

                try:
                    t_slim['progress'].update( {'pp': t['progress']['pp']} )
                except KeyError:
                    pass
                
                t_slim['seq_id'] = len(tasksRun[groupId]['tasks']) + 1
                tasksRun[groupId]['tasks'].append(t_slim)
                continue

            if t.status == AutoD3PDMakerTask.status_map['failed']:
                t_slim['seq_id'] = len(tasksFail[groupId]['tasks']) + 1
                tasksFail[groupId]['tasks'].append(t_slim)
                continue

            if t.status == AutoD3PDMakerTask.status_map['unfinished']:
                t_slim['seq_id'] = len(tasksUnfinish[groupId]['tasks']) + 1
                tasksUnfinish[groupId]['tasks'].append(t_slim)
                continue

        json_fpath_finish_data = os.path.join(self.reportdir, 'd3pdmaker_tlist_finish_data.json')
        json_fpath_finish_mc   = os.path.join(self.reportdir, 'd3pdmaker_tlist_finish_mc.json')
        json_fpath_run         = os.path.join(self.reportdir, 'd3pdmaker_tlist_run.json')
        json_fpath_fail        = os.path.join(self.reportdir, 'd3pdmaker_tlist_fail.json')
        json_fpath_unfinish    = os.path.join(self.reportdir, 'd3pdmaker_tlist_unfinish.json')

        def __make_report_json__(taskDict, json_fpath):

            f = open(json_fpath, 'w')
            f.write('%s\n' % repr(taskDict).replace('\'','"'))
            f.close()

        __make_report_json__( tasksFinish[0]   , json_fpath_finish_data )
        __make_report_json__( tasksFinish[1]   , json_fpath_finish_mc   )
        __make_report_json__( tasksRun[0]      , json_fpath_run )
        __make_report_json__( tasksFail[0]     , json_fpath_fail )
        __make_report_json__( tasksUnfinish[0] , json_fpath_unfinish )

        return True

class FastStructNtupleMakerPlugin(AMAEventMakerPlugin):
    '''
    AutoD3PDMaker Plugin for StructNtuple generation.
    '''

    def __init__(self, dq2setup='', basedir='', codedir='', reportdir=''):
        AMAEventMakerPlugin.__init__(self, dq2setup=dq2setup, basedir=basedir, codedir=codedir, reportdir=reportdir)

        self.mt_output        = 'StructNTuple.root'
        self.output_ds_type   = 'STRUCT'
        self.backend          = 'PANDA'
        self.ppBatchMode      = False

    def postprocessTask(self, taskObj):
        """
        postprocesses the given task.
        """

        ick = True
        gtId_pp = None

        ick, ds_prefix, ds_container = self.__create_container_dataset__(taskObj)

        if not ick:
            return ick, gtId_pp

        ## resolve the dataset and file information of the output from the main task
        ick, dsfileList = self.__get_output_files__(taskObj)

        if not ick:
            return ick, gtId_pp

        self.logger.info( 'output summary: %s' % repr(dsfileList) )

        dq2c = DQ2()
        ## 1. for every keys in dsfileList, close and freeze the dataset
        ## 2. add the frozen datasets in step 1 into the ds_container
        ## 3. update taskObj with dataset container
        for ds in dsfileList.keys():
            try:
                dq2c.closeDataset(ds)
                dq2c.freezeDataset(ds)
            except DQClosedDatasetException:
                pass
            except DQFrozenDatasetException:
                pass
            except DQ2Exception, e:
                self.logger.error(e)
                taskObj.error = 'fail to close/freeze output dataset'
                ick = False
                return ick, gtId_pp
        try:
            dq2c.registerDatasetsInContainer(name=ds_container, datasets=dsfileList.keys())
        except DQ2Exception, e:
            self.logger.error(e)
            taskObj.error = 'fail to add datasets in container'
            ick = False
            return ick, gtId_pp

        taskObj.output['ds'] = ds_container

        return ick, gtId_pp

class StructNtupleMakerPlugin(AMAEventMakerPlugin):
    '''
    AutoD3PDMaker Plugin for StructNtuple generation.
    '''

    def __init__(self, dq2setup='', basedir='', codedir='', reportdir=''):
        AMAEventMakerPlugin.__init__(self, dq2setup=dq2setup, basedir=basedir, codedir=codedir, reportdir=reportdir)

        self.mt_output        = 'STRUCTNTUP.root'
        self.output_ds_type   = 'StructNtuple'
        self.backend          = 'LCG'

    def __makePPTask__(self, dsfileList, atlas_release, structs_dataset_prefix, dq2c):

        script_template = '''
#!/bin/sh -x

SRC_DATASET=$1
SRC_FLISTCARD=$2
DEST_DATASET=$3
DEST_LOCATION=$4
BACKEND=$5

## function for setting up pre-installed dq2client tools on WN
dq2client_setup () {

    BACKEND=$1

    if [ n$BACKEND = n'Local' ]; then
        source /project/atlas/nikhef/dq2/dq2_setup.sh.NIKHEF
    fi

    if [ n$BACKEND = n'LCG' ]; then
        if [ ! -z $DQ2_CLIENT_VERSION ]; then

            ## client side version request
            source $VO_ATLAS_SW_DIR/ddm/$DQ2_CLIENT_VERSION/setup.sh

        else

            ## latest version: general case
            source $VO_ATLAS_SW_DIR/ddm/latest/setup.sh

        fi
    fi

    # check configuration
    echo "DQ2CLIENT setup:"
    env | grep 'DQ2_'
    which dq2-ls

    ## return 0 if dq2-ls is found in PATH; otherwise, return 1
    return $?
}

## function for setting up CMT environment
cmt_setup () {

    # setup ATLAS software
    unset CMTPATH

    export LCG_CATALOG_TYPE=lfc

    #  LFC Client Timeouts
    export LFC_CONNTIMEOUT=180
    export LFC_CONRETRY=2
    export LFC_CONRETRYINT=60
    # improve dcap reading speed
    export DCACHE_RAHEAD=TRUE
    #export DCACHE_RA_BUFFER=262144
    # Switch on private libdcap patch with improved read-ahead buffer algorithm
    export DC_LOCAL_CACHE_BUFFER=1
    if [ n$DQ2_LOCAL_SITE_ID == n'LRZ-LMU_DATADISK' ] && [ n$DATASETTYPE == n'DQ2_LOCAL' ]; then
        export DCACHE_CLIENT_ACTIVE=1
    fi

    ATLAS_RELEASE_DIR=$VO_ATLAS_SW_DIR/software/$ATLAS_RELEASE

    if [ ! -z `echo $ATLAS_RELEASE | grep 11.` ]; then
        source $ATLAS_RELEASE_DIR/setup.sh
    elif [ ! -z `echo $ATLAS_RELEASE | grep 12.` ] || [ ! -z `echo $ATLAS_RELEASE | grep 13.` ] || [ ! -z `echo $ATLAS_RELEASE | grep 14.` ] || [ ! -z `echo $ATLAS_RELEASE | grep 15.` ]; then
        #if [ n$ATLAS_PROJECT = n'AtlasPoint1' ]; then
        if [ ! -z $ATLAS_PROJECT ] && [ ! -z $ATLAS_PRODUCTION ]; then
            source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=$ATLAS_PRODUCTION,$ATLAS_PROJECT
        elif [ ! -z $ATLAS_PROJECT ]; then
            source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=$ATLAS_RELEASE,$ATLAS_PROJECT
        else
            source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=AtlasOffline,$ATLAS_RELEASE
        fi

        # check if 64 bit was made and correct it
        if [ n$CMTCONFIG == n'x86_64-slc5-gcc43-opt'  ]; then

            if [ ! -z $ATLAS_PROJECT ] && [ ! -z $ATLAS_PRODUCTION ]; then
                source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=$ATLAS_PRODUCTION,$ATLAS_PROJECT,32,setup
            elif [ ! -z $ATLAS_PROJECT ]; then
                source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=$ATLAS_RELEASE,$ATLAS_PROJECT,32,setup
            else
                source $ATLAS_RELEASE_DIR/cmtsite/setup.sh -tag=AtlasOffline,$ATLAS_RELEASE,32,setup
            fi
        fi
    fi

    # print relevant env. variables for debug
    echo "CMT setup:"
    env | grep 'CMT'
    echo "SITE setup:"
    env | grep 'SITE'
}

## function for setting up athena runtime environment, no compilation
runtime_setup () {
    source $SITEROOT/AtlasOffline/$ATLAS_RELEASE/AtlasOfflineRunTime/cmt/setup.sh
}

## function for downloading file from a dq2 dataset
dq2_getfile() {
    DATASET=$1
    FILENAME=$2

    echo "Downloading $FILENAME from $DATASET"

    dq2-get --client-id=ganga -t 1800 -L ROAMING -a -d -D -H $PWD -f $FILENAME $DATASET

    return $?
}

## function for checking if dest. dataset is existing and frozen
dq2_is_dataset_frozen() {
    DATASET=$1

    is_frozen=0

    ds_status=`dq2-get-metadata $DATASET | grep -e '^state' | awk '{print $NF}'`

    if [ n$ds_status = n'frozen' ]; then
        is_frozen=1
    fi

    return $is_frozen
}

MYHOME=$PWD

ls -l

# check if the $SRC_FLISTCARD is presented as a local file
if  [ ! -e $PWD/$SRC_FLISTCARD ]; then
    echo "source flist card not available: $PWD/SRC_FLISTCARD" 1>&2
    exit 2
fi

# detect ATLAS software
if [ n$BACKEND = n'LCG' ] && [ -z $VO_ATLAS_SW_DIR ]; then
   echo "No ATLAS Software found." 1>&2
   exit 1
fi

## pick up good python according to the lfc module import
## 1. try system default
## 2. try the one with Athena release
echo "==== PYTHONPATH: path, version, architecture ==="
which python
python -V
python -c 'import struct; print struct.calcsize("P") * 8.; import lfc'

if [ $? -ne 0 ]; then
    cmt_setup
    runtime_setup
    which python
    python -V
    python -c 'import struct; print struct.calcsize("P") * 8.; import lfc'
fi

## setup dq2 client
dq2client_setup $BACKEND
if [ $? -ne 0 ]; then
    echo "Cannot setup dq2 client" 1>&2
    exit 1
fi

## check if the dataset is frozen, return right-away a successful job if it is
dq2_is_dataset_frozen $DEST_DATASET
if [ $? -eq 1 ]; then
    echo "dataset $DEST_DATASET exists and is frozen. Presuming datasets has been produced."
    exit 0
fi

#### creating new dataset from the existing dataset
for f in `grep "$SRC_DATASET" $SRC_FLISTCARD | awk '{for (i=2; i<=NF; i++) printf "%s ",$i; printf "\\n";}'`; do

    dq2-put -a -d -C -D $SRC_DATASET -f $f -L $DEST_LOCATION $DEST_DATASET

    if [ $? -ne 0 ]; then
        echo "Fail to dq2-put to $DEST_DATASET" 1>&2
        exit 3
    fi
done


## close the dataset
dq2-close-dataset $DEST_DATASET
if [ $? -ne 0 ]; then
    echo "Fail to close dataset $DEST_DATASET" 1>&2
    exit 3
fi

## freeze the dataset
dq2-freeze-dataset $DEST_DATASET
if [ -$? -ne 0 ]; then
    echo "Fail to freeze dataset $DEST_DATASET" 1>&2
    exit 3
fi

exit 0
    '''

        ## making rundef file on the fly
        (fid, myexec) = tempfile.mkstemp(suffix='.sh',prefix='structs_maker_pp_')

        f = os.fdopen(fid,'w')
        f.write('%s' % script_template)
        f.close()

        t  = Task()
        t.name  = 'AutoD3PDMakerPP'

        dscnt = 0

        structs_dslist = []

        myargs = []

        ## making a "ds" "flist" card
        (fid, my_flist_card) = tempfile.mkstemp(suffix='.txt', prefix='structs_maker_dsfile_')

        f = os.fdopen(fid,'w')

        for ds in dsfileList.keys():

            self.logger.debug('checking location: %s' % ds)

            dsinfo = dq2c.listDatasetReplicas(ds)

            (vuid,locinfo) = dsinfo.popitem()

            dsloc   = ''
            loclist = []
            if locinfo[0]:
                loclist = locinfo[0]
            elif locinfo[1]:
                loclist = locinfo[1]
            else:
                self.logger.error('cannot find dataset location: %s' % ds)
                t.remove(False)
                t = None
                return t

            ## dealing with the case that RAL-LCG doesn't take user analysis jobs
            if (len(loclist) == 1) and (loclist[0].find('RAL-LCG2') >= 0):
                self.logger.error('dataset %s only at %s, please DaTRI it.' % (ds, loclist[0]))
                t.remove(False)
                t = None
                return t

            for loc in loclist:
                if not loc.find('RAL-LCG2') >= 0:
                    dsloc = loc
                    break
                else:
                    self.logger.warning('exclude location for PP job: %s' % loc)

            if not dsloc:
                self.logger.error('no good location for running dataset: %s' % ds)
                t.remove(False)
                t = None
                return t
#
#
#
#            self.logger.debug('checking location: %s' % ds)
#
#            dsinfo = dq2c.listDatasetReplicas(ds)
#
#            (vuid,locinfo) = dsinfo.popitem()
#
#            dsloc = ''
#            if locinfo[0]:
#                dsloc = locinfo[0][0]
#            elif locinfo[1]:
#                dsloc = locinfo[1][0]
#            else:
#                self.logger.error('cannot find dataset location: %s' % ds)
#                t.remove(False)
#                t = None
#                return t

            dscnt += 1

            structs_dataset = '%s.%d' % (structs_dataset_prefix, dscnt)

            dsinfo = dq2c.listDatasets(structs_dataset)

            ## create new dataset
            if len(dsinfo.keys()) == 0:
                new_dsinfo = dq2c.registerNewDataset('%s' % structs_dataset)

            structs_dslist.append(structs_dataset)

            f.write('%s %s\n' % (ds, ' '.join(dsfileList[ds])))

            myargs.append( [ds, os.path.basename(my_flist_card), structs_dataset, dsloc, self.backend] )

        f.close()

        ## create one transform for post-processing multiple ama_summary.tgz files in one dataset
        tf = ArgTransform()
        tf.args = myargs
        tf.name = 'StructNtupleMakingPP'
        tf.application = ExecutableTask()
        tf.application.exe = File(myexec)
        tf.application.env = {'ATLAS_RELEASE': atlas_release}
        tf.inputsandbox += [ File(my_flist_card) ]

        tf.backend = LCG()
        #tf.backend.CE = 'gazon.nikhef.nl:2119/jobmanager-pbs-short'
        tf.backend.requirements = LCGRequirements()

        ## here we just take atlas_release, which is the first 3 digits representing the AtlasOffline version
        atlas_rtag = resolve_atlas_rtag(atlas_project='', atlas_release=atlas_release, platform='i686-slc5-gcc43-opt')
        tf.backend.requirements.software.append( atlas_rtag )

        t.transforms.append(tf)

        t.float = len(dsfileList.keys())

        ## register datasets in the container
        try:
            dq2c.registerDatasetsInContainer('%s/' % structs_dataset_prefix, structs_dslist)
            t.run()
        except Exception,e:
            t.remove(False)
            t = None

        return t
