import sys
import re
import common_util

## make sure the Athena environment is set properly to have pyAMI library available
try:
    from pyAMI.pyAMI import *
except ImportError:
    raise ImportError( 'Unable to load pyAMI python module. Setup AtlasCore runtime environment first.' )

import re

class AMIQuery:

    def __init__(self):

        self.debug   = False
        self.userSQL = False
        self.sql     = ''
        self.order   = ''

        self._predicate = ''
        self.entity = 'dataset'
        self.select = [ 'logicalDatasetName' ]
        self.query = ''
        self.amiclient = AMI()

        #self.dbName = {'mc08':'', 'mc09':'', '' }


    def getUserPredicate(self):
        return self._predicate[self._predicate.find("(1=1)")+9:]

    def makeCommand(self , limit="0,10"):

        if self.query:
            argv = self.query.split()

        re_arg = re.compile('^([a-zA-Z0-9_-]*)([><=]+)([a-zA-Z0-9_.%-]*)$')
        re_num = re.compile('^[0-9]+$')

        argument = []
        argument.append('SearchQuery')
        argument.append('entity=%s' % self.entity)

        if not self.userSQL:
            self._predicate = "(1=1)"
            for arg in argv:

                matches = re_arg.match(arg)

                if matches:
                    attr = matches.group(1)
                    optr = matches.group(2)
                    valu = matches.group(3)

                    ## this is the case of numerical number comparison
                    if re_num.match(valu):
                        self._predicate += ' AND %s %s %s' % (attr, optr, valu)
                    else:
                        valu = valu + '%'
                        self._predicate = self._predicate + " AND %s like '%s'" % (attr, valu)
                else:
                    print 'ignore invalid query argument: %s' % arg


            ## make query argument passing to AMI service
            self.sql = 'SELECT %s WHERE' % ','.join( self.select )

            if self.entity in ['dataset']:
                self.sql += ' amiStatus=\'VALID\' AND'

            if not self.order:
                self.order = self.select[0]
                
            self.sql += ' %s ORDER BY %s LIMIT %s' % (self._predicate, self.order, limit)
        
        else:
            self.sql += ' LIMIT %s' % limit

        argument.append('glite=%s' % self.sql)
        argument.append('project=Atlas_Production')
        argument.append('processingStep=Atlas_Production')
        argument.append('entityName=%s' % self.entity)
        argument.append('mode=defaultField')

        return argument

    def runQuery(self):

        startLimit = 0
        # This number is the max to be returned by one request.
        # to avoid SOAP explosion
        # If there are more then we must resend the request.
        amountToDo = 5000
        resultList  = []

        try:
            
            finished = False
            numDone  = 0
            numToDo  = 0

            while (not finished):

                argument = self.makeCommand( limit=str(startLimit) + "," + str(amountToDo) )

                if self.debug:
                    common_util.log( 'Entity: %s SQL: %s' % (self.entity, self.sql) )

                result = self.amiclient.execute( argument )

                dom = result.getAMIdom()

                if self.debug:
                    dom.writexml( open('ami_search_%s.xml' % self.entity,'w') )

                rowsets = dom.getElementsByTagName('rowset')
                infos = dom.getElementsByTagName('info')

                for info in infos:
                    # the first child gives the total number found: example
                    #"View 100 records, starting from row 1 of 2653 records "
                    #print info.firstChild.nodeValue

                    # take a look at the results each time - as the number may have changed between two queries
                    ofIndex = info.firstChild.nodeValue.find("of")

                    if (ofIndex <0 ):
                        numToDo=0
                    else:
                        numDone = amountToDo + numDone
                        recordsIndex = info.firstChild.nodeValue.find("records", ofIndex)
                        numToDo = info.firstChild.nodeValue[ofIndex + 2:recordsIndex - 1]
                    #recordsIndex = info.firstChild.nodeValue.find("records", ofIndex)
                    #numToDo = info.firstChild.nodeValue[ofIndex + 2:recordsIndex - 1]

                finished = (int(numDone) >= int(numToDo))

                if(not finished):
                    startLimit = startLimit + amountToDo

                if (numDone >0):
                    for rowset in rowsets:
                        rows = rowset.getElementsByTagName('row')
                        for row in rows:

                            ## initiate the data dictionary for each row
                            data = {}
                            for sel in self.select:
                                data[sel.lower()] = None

                            fields = row.getElementsByTagName('field')

                            for field in fields:

                                try:
                                    fieldname = field.attributes['name'].value

                                    if ( fieldname.lower() in map(lambda x:x.lower(), self.select) ):
                                        value = field.firstChild.nodeValue
                                        data[ fieldname.lower() ] = value
                                except:
                                    pass
                                      
                            resultList.append(data)

#            if self.debug:
#                now = datetime.now()
#                common_util.log( "Request Time : " + now.strftime("%Y-%m-%d %H:%M")+ "\nRequest : "+ self.getUserPredicate() )
#                common_util.log( str(numToDo) + " datasets found for this query" )

        except Exception, msg:
            common_util.log( 'Error: ' + str(msg) )

        return resultList

def getRealDatasetsByRuns(runlist, dataType='AOD', stream='', version=''):
    '''
    Selects the datasets derived from ATLAS detector data corresponding to the given runs.
    '''

    runlist.sort()

    min_run = min(runlist)
    max_run = max(runlist)

    ## initiate the dataset dictionary with key = run number, value = list of datasets
    datasets = {}
    for run in runlist:
        datasets[run] = []
        
    amiQry = AMIQuery()
    amiQry.entity = 'dataset'
    amiQry.select = [ 'logicalDatasetName', 'identifier', 'runNumber' ]
    
    if dataType and ( dataType not in ['AOD','RAW','ESD','DESD', 'DAOD'] ):
        common_util.log('WARNING: dataType %s is not supported.  Selection will work on AOD type of data' % dataType)
        dataType = 'AOD'
    
    qry_list = []
    if dataType:
        qry_list.append('dataType=%s' % dataType)
        
    if stream:
        qry_list.append('streamName=%s' % stream)
        
    if version:
        qry_list.append('version=%s' % version)
        
    if min_run:
        qry_list.append('runNumber>=%d' % min_run)
        
    if max_run:
        qry_list.append('runNumber<=%d' % max_run)

    amiQry.query  = ' '.join(qry_list)

    ds_results = amiQry.runQuery()
    
    for ds_item in ds_results:
        ds  = ds_item[ 'logicalDatasetName'.lower() ]
        id  = ds_item[ 'identifier'.lower() ]
        run = ds_item[ 'runNumber'.lower() ]
        
        if run and datasets.has_key( int(run) ):
            datasets[ int(run) ].append( {'id':id, 'name': ds, 'run': int(run)} )

    return datasets


def getFilesByLumiRange(ds_id, lumiRanges=[]):
    '''
    Selects the files in a dataset (specified by AMI dataset id) contains the given lumi range.
    '''
    files = []

    return files


def _filterFilesInLumiRange_(ami_results, lumi_range):

    lumibn_list = []
    filtered_ami_results = []

    re_lumir = re.compile('^.*(lb[0-9]+)\-(lb[0-9]+).*$')

    ## quickly go through the result dictionary to get the name, lumibn
    for item in ami_results:

        lumibn = item[ 'lumiBN'.lower() ]

        if not (lumibn is None):
            lumibn_list.append( int(lumibn) )

    lumibn_list.sort()

    ## filter the items to keep only the files within the lumi_range:
    ##  - the result without lumibn value will by kept anyway
    for item in ami_results:

        lumibn = item[ 'lumiBN'.lower() ]

        if lumibn is None:
            filtered_ami_results.append( item )
        else:

            lumibn_i = int(lumibn)

            if lumibn_i <= lumi_range[1]:
                if lumibn_i >= lumi_range[0]:
                    filtered_ami_results.append( item )
                else:
                    idx = lumibn_list.index( lumibn_i )

                    if idx+1 == len(lumibn_list):

                        try:
                            ## alternative: parsing the file name to get the uppper-bound of the lumi-range of this file
                            lfn = item[ 'LFN'.lower() ]

                            matches = re_lumir.match( lfn )

                            if matches:
                                #lumi_beg = int( re.sub(r'^lb0+', '', matches.group(1)) )
                                lumi_end = int( re.sub(r'^lb0+', '', matches.group(2)) )

                                ## add the item if the end of lumi block covers the lumi-range given by the user
                                if lumi_end >= lumi_range[1]:
                                    filtered_ami_results.append( item )

                        except:
                            ## if it failed in parsing/matching the particular LB string in LFN
                            ## add the item anyway
                            filtered_ami_results.append( item )

                    elif lumibn_list[ idx+1 ] > lumi_range[0]:
                        filtered_ami_results.append( item )

    return filtered_ami_results