#!/usr/bin/python
import os, sets, re, common, common_util
import lfc, lfcthr
from threading import Thread, Lock
from Queue import Queue, Empty

def del_lfc_registries_by_pfns(lfc_host, files, nthread=10, debug=False):
    '''deleting files represented by a list of PFNs.
       Current implementation using multiple calls of lcg-uf.
       TODO: using LFC APIs for bulk query if the interface is available.'''
    doneList = []

    # preparing the queue for querying lfn 
    wq = Queue(len(files))
    for file in files:
        wq.put(file)

    mylock = Lock()
    def worker(id):
        while not wq.empty():
            try:
                file = wq.get(block=True, timeout=1)                ## querying GUID using lcg-lg
                cmd = 'export LFC_HOST=%s; lcg-lg --vo atlas %s 2>/dev/null' % (lfc_host, file)
                if debug:
                    common_util.log(cmd)
                (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                if not ec:
                    guid = out[0].strip()

                    cmd = 'export LFC_HOST=%s; lcg-uf -v -f --vo atlas %s %s 2>/dev/null' % (lfc_host, guid, file)
                    if debug:
                        common_util.log(cmd)

                    (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                    if not ec or ec == 256: # workaround for "No such GUID" error
                        mylock.acquire()
                        ## if a successful copy, perform lfc update 
                        doneList.append(file)
                        mylock.release()

            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return doneList

def del_lfc_registries_by_lfns(lfc_host, files, se_pattern=None, nthread=10, debug=False):
    '''deleting files represented by a list of LFNs.
       For LFNs having more than one replica, only remove the replicas matches the se_pattern.      
       Current implementation using multiple calls of lcg-uf.
       TODO: using LFC APIs for bulk query if the interface is available.'''
    doneList = []

    # preparing the queue for querying lfn 
    wq = Queue(len(files))
    for file in files:
        wq.put(file)

    mylock = Lock()
    def worker(id):
        while not wq.empty():
            try:
                file = wq.get(block=True, timeout=1) ## querying GUID using lcg-lg
                lfn  = re.sub(r'^lfn:','',file)
                cmd  = 'export LFC_HOST=%s; lcg-lg --vo atlas lfn:%s 2>/dev/null' % (lfc_host, lfn)
                if debug:
                    common_util.log(cmd)

                (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                if not ec:
                    guid = out[0].strip()
                    guid = re.sub(r'^guid:','', guid)
                    cmd = 'export LFC_HOST=%s; lcg-lr --vo atlas guid:%s 2>/dev/null' % (lfc_host, guid)
                    if debug:
                        common_util.log(cmd)
                    (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                    if not ec:
                        # filter out the PFNS to be deleted
                        pfns = []
                        if se_pattern:
                            for line in out:
                                if line.find(se_pattern) > -1:
                                    pfns.append(line.strip()) 
                        else:
                            pfns = map(lambda x:x.strip(), out)
     
                        # do the cleanup
                        failure_cnt = 0 
                        for pfn in pfns:

                            cmd = 'export LFC_HOST=%s; lcg-uf -v -f --vo atlas guid:%s %s 2>/dev/null' % (lfc_host, guid, pfn)
                            if debug:
                                common_util.log(cmd)

                            (ec, out) = common_util.exec_cmd(cmd, max_trial=1)
                            if ec and ec != 256: # something wrong and it's not "No such GUID" error
                                failure_cnt += 1
                     
                        if failure_cnt == 0: # everything is fine 
                            mylock.acquire()
                            ## if a successful copy, perform lfc update 
                            doneList.append(file)
                            mylock.release()
                    else:
                        print 'cannot get pfns: %s' % guid
                else:
                    print 'cannot get guid: %s' % lfn
            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return doneList

def delpfns(pfns,lfc_host,setype='srmv2',lfcOnly=False,nthread=10,debug=False):
    '''
    Delete specific replicas given in the pfns hashtable.
    This method is for deleting some dummy physical replicas of a grid file. 
 
    The hashtable format: 
        pfns = {guid1: [pfn1,pfn2 ...], guid2: [pfn1, pfn2 ...]}
    '''

    doneList = []

    # preparing the queue for querying lfn 
    guids = pfns.keys()
    wq = Queue(len(guids))
    for guid in guids:
        wq.put(guid)

    mylock = Lock()
    def worker(id):
        while not wq.empty():
            try:
                guid  = wq.get(block=True, timeout=1)
                _pfns = pfns[guid]

                failure_cnt = 0
                for pfn in _pfns:

                    cmd = ''
                    if lfcOnly:
                        cmd = 'export LFC_HOST=%s; lcg-uf -v -f --vo atlas guid:%s %s 2>/dev/null' % (lfc_host, guid, pfn)
                    else:
                        cmd = 'export LFC_HOST=%s; lcg-del -v -t 600 -T %s --vo atlas %s 2>/dev/null' % (lfc_host, setype, pfn)
                    if debug:
                        common_util.log(cmd)

                    (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                    if ec and ec != 256: # something wrong and it's not "No such GUID" error
                        failure_cnt += 1
                
                if failure_cnt == 0: # everything is fine 
                    mylock.acquire()
                    doneList.append(guid)
                    mylock.release()
            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return doneList
    
def delreplicas(guids,lfc_host,se_host,debug=False):
    """ 
    Delete all replicas from a dataset from a list of SEs with bulk techniques
    """
    allstat = True
    os.environ['LFC_HOST'] = lfc_host 

    stat = 0
    counter = 0
    result,results = [],[]
    while counter<len(guids):
        tmp = min(counter + 1000, len(guids))
        try:
            print guids[counter:tmp]
            stat,tmpresult = lfc.lfc_delreplicas(guids[counter:tmp],se_host)
            result.extend(tmpresult)
            print result
        except:
            common_util.log("LFC-delreplicas didn't work: %s" % lfc.sstrerror(lfc.cvar.serrno))
            stat = -1
            result = [0]
            allstat = False
        counter = tmp

    result = list(sets.Set(result))
    if result != [0] :
        results = [lfc.sstrerror(j) for j in result]
        common_util.log('Errors during deletion: %s ' % ' : '.join(results))
        allstat=False
    if stat != 0:
        allstat=False
        common_util.log("LFC-delreplicas Error on %s,%s: %s,%s " % (se_host,lfc_host,lfc.sstrerror(lfc.cvar.serrno),stat))
    common_util.log('LFC-delreplica done; %s entries deleted' % result)
    if debug and allstat:
        common_util.log('Errors during deletion: %s ' % ' : '.join(results))
    return allstat

def get_pfns(lfc_host, guids, dummyOnly=False, debug=False):
    '''getting pfns corresponding to the given list of files represented
       by guids). If dummyOnly, then only the pfns doublely copied on the 
       the same SE are presented (determinated by SE hostname parsed from
       the PFNs).'''

    pfns = {}
    csum = {}

    chunk_size = 5000

    common_util.log('using LFC: %s ' % lfc_host)
    os.putenv('LFC_HOST', lfc_host)

    def _resolveDummy(_pfns):
        '''resolving the dummy PFNs based on SE hostname'''
        _pfns_dummy = {}
        for _guid in _pfns.keys():
            _replicas = _pfns[_guid]
            _replicas.sort()
            seCache  = None
            pfnCache = None
            id = -1
            for _pfn in _replicas:
                id += 1 
                _se = common.urisplit(_pfn)[1]
                if _se != seCache:
                    seCache  = _se
                    pfnCache = _pfn
                else:
                    # keep the dummy PFN
                    if not _pfns_dummy.has_key(_guid):
                        _pfns_dummy[_guid] = [pfnCache]
                    _pfns_dummy[_guid].append(_pfn)
        return _pfns_dummy

    if lfc.lfc_startsess('', '') == 0:
       result, list1 = lfc.lfc_getreplicas(guids,"")
       lfc.lfc_endsess()

       if len(list1) == 0:
           common_util.log('no replicas')
       
       for s in list1:
           if s != None:
               if s.sfn:
                   if not pfns.has_key(s.guid):
                       pfns[s.guid] = []
                   pfns[s.guid].append(s.sfn)
                   csum[s.guid] = {'csumtype':'', 'csumvalue':''}
                   if s.csumtype:
                       csum[s.guid]['csumtype'] = s.csumtype
                   if s.csumvalue:
                       csum[s.guid]['csumvalue'] = s.csumvalue
       if dummyOnly:
           pfns = _resolveDummy(pfns) 
    else:
        common_util.log('cannot connect to LFC')

    if debug:
        for guid in csum.keys():
            common_util.log( '%s %s:%s' % (guid,csum[guid]['csumtype'],csum[guid]['csumvalue']) )

    return pfns

def get_guids_by_lfns(lfc_host, lfns, nthread=10, debug=False):
    '''getting guids corresponding to the given list of LFNs. 
       The method is implemented with multi-thread lfc python API'''

    guids = {}

    # preparing the queue for querying lfn 
    wq = Queue(len(lfns))
    for lfn in lfns:
        wq.put(lfn)

    mylock = Lock()

    def worker(id):
        while not wq.empty():
            try:
                lfn = wq.get(block=True, timeout=1)
                lfn = re.sub(r'^lfn:','',lfn)

                stat = lfcthr.lfc_filestatg()
                res  = lfcthr.lfc_statg(lfn,"",stat)

                if res == 0:
                    mylock.acquire()
                    guids[lfn] = stat.guid 
                    mylock.release()
                else:
                    print 'cannot get guid: %s' % lfn

            except Empty:
                pass

    os.environ['LFC_HOST'] = lfc_host
    lfcthr.init()
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return guids
    
def get_guids(lfc_host, files, nthread=10, debug=False):
    '''getting guids corresponding to the given list of files represented
       by lfns or pfns). The protocol scheme (lfn: or srm:) is mandatory.
       Current implementation using multiple calls of lcg-lg.
       TODO: using LFC APIs for bulk query if the interface is available.'''

    guids = {}

    # preparing the queue for querying lfn 
    wq = Queue(len(files))
    for file in files:
        wq.put(file)

    mylock = Lock()
    def worker(id):
        while not wq.empty():
            try:
                file = wq.get(block=True, timeout=1)
                #lfn  = re.sub(r'^lfn:','',file)
                ## querying LFN using lcg-la 
                cmd = 'export LFC_HOST=%s; lcg-lg --vo atlas %s 2>/dev/null' % (lfc_host, file)
                if debug:
                    common_util.log(cmd)

                (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                if not ec:
                    mylock.acquire()
                    # take first line and get rid of r'^guid:' 
                    guids[file] = re.sub(r'^guid:','',out[0].strip())
                    mylock.release()
                else:
                    print 'cannot get guid: %s' % file

            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return guids

def del_lfns(lfc_host, files, is_dir=False, nthread=10, debug=False):
    '''deleting files represented by a list of LFNs.
       Current implementation using multiple calls of lfc-rm.
       TODO: using LFC APIs for bulk query if the interface is available.'''
    doneList = []

    # preparing the queue for querying lfn 
    wq = Queue(len(files))
    for file in files:
        wq.put(file)

    mylock = Lock()
    def worker(id):
        while not wq.empty():
            try:
                file = wq.get(block=True, timeout=1)
                if is_dir:
                    cmd = 'export LFC_HOST=%s; lfc-rm -R -f %s' % (lfc_host, file)
                else:
                    cmd = 'export LFC_HOST=%s; lfc-rm %s' % (lfc_host, file)

                if debug:
                    common_util.log(cmd)

                (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                if not ec:
                    mylock.acquire()
                    doneList.append(file)
                    mylock.release()

            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return doneList

def get_lfns(lfc_host, files, nthread=10, debug=False):
    '''getting logical file name corresponding to the given list of files represented
       by guids or pfns). Current implementation using multiple calls of lcg-la.
       TODO: using LFC APIs for bulk query if the interface is available.'''

    lfns = {}

    # preparing the queue for querying lfn 
    wq = Queue(len(files))
    for file in files:
        wq.put(file)

    mylock = Lock()
    def worker(id):
        while not wq.empty():
            try:
                file = wq.get(block=True, timeout=1)
                ## querying LFN using lcg-la 
                cmd = 'export LFC_HOST=%s; lcg-la --vo atlas %s 2>/dev/null' % (lfc_host, file)
                if debug:
                    common_util.log(cmd)

                (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                if not ec:
                    mylock.acquire()
                    lfns[file] = map(lambda x:x.replace('lfn:','').strip(), out)
                    mylock.release()

            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return lfns

def list_lfn_dirs(lfc_host, dirs, nthread=10, debug=False):
    '''list files recursively in a given list of logical directories. 
       Current implementation using multiple calls of "lfc-ls -l -R" 
       TODO: using LFC APIs for bulk query if the interface is available.'''

    lfns = [] 

    # preparing the queue for querying lfn 
    wq = Queue(len(dirs))
    for dir in dirs:
        wq.put(dir)

    mylock = Lock()
    def worker(id):
        while not wq.empty():
            try:
                dir = wq.get(block=True, timeout=1)

                cmd = 'export LFC_HOST=%s; lfc-ls -l -R %s 2>/dev/null' % (lfc_host, dir)
                if debug:
                    common_util.log(cmd)

                (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                if not ec:
                    for line in out:
                        data = line.strip()
                        if data.endswith(':'):
                            # it's a name of the directory
                            ppath = data[:-1]
                            continue

                        if data.find('-') == 0:
                            # this is a file under the directory
                            fname  = data.split()[-1]
                            # the last column is the file name
                            lfn = os.path.join(ppath,fname)
                            mylock.acquire()
                            lfns.append(lfn)
                            mylock.release()
                            continue
            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return lfns
