# ama_util.py
# 
# Copyright (C) 2010 Hurng-Chun Lee <hurngchunlee@gmail.com>
# 
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import common_util
import ddm_util
import lfc_util
import os
import re

def make_fs_sample(dsn, site=None, dsloc_pref_regex='', sampleName='', chunkSize=-1, completeOnly=True, debug=False):

    ## sample definition list (organized by site)
    sampleDefList = {}

    ## get datasets by name
    datasets = []

    ## site is given
    if site:
        datasets = ddm_util.get_datasets(dsn, site=site, completeOnly=completeOnly, debug=debug)
        datasets.sort()

        sampleDefList[site] = __impl_make_fs_sample__(datasets, site, sampleName=sampleName, chunkSize=chunkSize, completeOnly=completeOnly, debug=debug)
    else:

        ds_rep = {}
        if dsn.endswith('/'):

            rep_info = ddm_util.get_container_ds_replicas( dsn )

            ## loop over each dataset in container
            for ds in rep_info.keys():

                rep = None

                ## loop over each version in dataset
                for vuid, info in rep_info[ds].items():

                    ## firstly check complete version
                    if info[1]:
                        ## first location of complete replica
                        rep = __pickup_replica_location__(info[1], preference_regex=dsloc_pref_regex)
                        
                    ## secondary check the incomplete version if there is no complete version
                    elif (not completeOnly) and info[0]:
                        ## first location of the incomplete replica
                        rep = __pickup_replica_location__(info[0], preference_regex=dsloc_pref_regex)

                    ## if replica found, break the loop over versions
                    if rep:
                        break

                ## update ds_rep dictionary
                if rep != None:
                    if not ds_rep.has_key( rep ):
                        ds_rep[ rep ] = []
                    ds_rep[rep].append( ds )
                else:
                    common_util.log('ERROR: no replica found: %s' % ds)
        else:
            rep_info = ddm_util.get_dataset_replicas(dsn)

            for ds, rep_info in rep_info.items():

                if rep_info:

                    rep = rep_info[0]

                    if not ds_rep.has_key( rep ):
                        ds_rep[ rep ] = []
                    ds_rep[ rep ].append(ds)

                else:
                    common_util.log('ERROR: no replica found: %s' % ds)

        if ds_rep:
            for site, datasets in ds_rep.items():
                sampleDefList[site] = __impl_make_fs_sample__(datasets, site, sampleName=sampleName, chunkSize=chunkSize, completeOnly=completeOnly, debug=debug)
        
    return sampleDefList

def __pickup_replica_location__(locations, preference_regex=''):
    """
    selects the replica location based on the preference
    """

    re_pref = None

    if preference_regex:
        re_pref = re.compile(preference_regex)


    # default is the first location
    loc = locations[0]

    # look for preferred location if specified
    if re_pref:
        for l in locations:
            if re_pref.match( l ):
                loc = l
                break

    return loc

def __impl_make_fs_sample__(datasets, site, sampleName='', chunkSize=-1, completeOnly=True, debug=False):

    sampleDefList = []

    if not sampleName:
        sampleName = 'MySample'

    samplefile_prefix = '%s_%s' % (sampleName, site)
    samplefile_suffix = '.def'

    common_util.log('Generating sample file ...')

    lfc_host = ddm_util.get_lfc_host(site)
    srm_host = ddm_util.get_srm_host(site)

    srm_endpt_info   = ddm_util.get_srm_endpoint(site)
    srm_endpt_prefix = ddm_util.get_simple_srmpath(srm_endpt_info['endpt'])

    if debug:
        common_util.log('datasets  : %s' % repr(datasets))
        common_util.log('DDM site  : %s' % site)
        common_util.log('lfc host  : %s' % lfc_host)
        common_util.log('srm host  : %s' % srm_host)
        common_util.log('srm prefix: %s' % srm_endpt_prefix)

    ## get files of datasets
    files = ddm_util.get_files(datasets)
    guids = []
    for ds in files.keys():
        guids += files[ds][0].keys()

    ## get physical locations of the files
    pfns = lfc_util.get_pfns(lfc_host, guids)

    header  = 'TITLE: %s\n' % sampleName
    header += 'FLAGS: GridCopy=1\n'

    def __make_new_file__(id):
        f = open('%s.%d%s' % (samplefile_prefix, id, samplefile_suffix), 'w')
        f.write(header)
        return f

    id  = 0
    cnt = 0

    cnt_sum = 0

    ## create the first file
    f = __make_new_file__(id)

    sampleDefList.append( f.name )

    for ds in files.keys():
        fileInfo = files[ds][0]
        for guid in fileInfo.keys():
            try:
                for pfn in pfns[guid]:

                    ## remove the full SRM endpoint description
                    pfn = ddm_util.get_simple_srmpath(pfn)

                    ## TODO: this is a quick but dirty solution to ignore summary file of AMA
                    ##       need a better way to exclude certain files
                    if pfn.find('summary') > -1:
                        break

                    if pfn.find('STRUCTNTUP') > -1:
                        break

                    if pfn.find('nevs0.root') > -1:
                        break

                    if pfn.find(srm_endpt_prefix) > -1:
                        cnt += 1
                        cnt_sum += 1
                        f.write('gridcopy://%s\n' % pfn)

                        ## when reaching the chunkSize:
                        ##  - close the current file
                        ##  - reset file counter
                        ##  - increase file id
                        ##  - create a new file indexed by the new id
                        if cnt == chunkSize:
                            f.close()
                            cnt = 0
                            id += 1
                            f = __make_new_file__(id)
                            sampleDefList.append( f.name )

            except KeyError, e:
                pass

    ## close the file in any case
    if f:

        fpath = f.name
        f.close()

        ## remove the last file if there is no content in it.
        if cnt == 0:
            os.remove( fpath )

            sampleDefList.remove( fpath )

            if id == 0:
                ## the only file is removed ... something must be wrong, a warning is given
                common_util.log('WARNING: no sample file is generated')

    if cnt_sum == 0:
        common_util.log('WARNING: no entries in sample file')

    common_util.log('Sample files are generated')

    return sampleDefList
