#!/usr/bin/env python
import common
import common_util
import os
import os.path
import re
import sys
import time
from sets import Set
from threading import Thread, Lock
from Queue import Queue, Empty
try:
   from dq2.info import TiersOfATLAS
   from dq2.info.TiersOfATLAS import ToATLASCache
   from dq2.clientapi.DQ2 import DQ2
   from dq2.common.DQConstants import DatasetState
   from dq2.common.DQException import DQException, DQSecurityException
   from dq2.location.DQLocationConstants import LocationState
   from dq2.repository.DQRepositoryException import DQUnknownDatasetException
   from dq2.common.client.x509 import get_x509
   from dq2.common.DQConstants import DateCriteria
except ImportError:
   print 'dq2 path is not included into your PYTHONPATH. Will try using dq2 path at CERN via /afs'
   if os.path.exists('/afs/cern.ch/atlas/offline/external/GRID/ddm/current')==1:
       sys.path.insert(0,'/afs/cern.ch/atlas/offline/external/GRID/ddm/current')
   try:
       from dq2.info import TiersOfATLAS
       from dq2.clientapi.DQ2 import DQ2
       from dq2.common.DQConstants import DatasetState
       from dq2.common.DQException import DQException, DQSecurityException
       from dq2.location.DQLocationConstants import LocationState
       from dq2.repository.DQRepositoryException import DQUnknownDatasetException
       from dq2.common.client.x509 import get_x509
       from dq2.common.DQConstants import DateCriteria
   except:
       print 'Something about your DQ2 installation seems wrong, please check it. Maybe your afs mount is unavailable'
       sys.exit(0) 

import ToACache

ds_stat = DatasetState()

def get_ds_state_desc(int_state):
    '''
    convert the dataset state integer to description 
    '''
    return ds_stat.DESCRIPTION[int_state]

def get_srm_endpoint(site):
    '''
    Gets the SRM endpoint of a site registered in TiersOfATLAS. 
    '''

    srm_endpoint_info = {'token':None, 'endpt':None}
    re_srm2 = re.compile('^token:(.*):(srm:\/\/.*)\s*$')

    tmp = TiersOfATLAS.getSiteProperty(site,'srm')
    if tmp:
        srm_endpoint_info['endpt'] = tmp

    mat = re_srm2.match(tmp)
    if mat:
        srm_endpoint_info['token'] = mat.group(1)
        srm_endpoint_info['endpt'] = mat.group(2)

    return srm_endpoint_info

def get_srm_host(site):
    '''
    Gets the SRM hostname of the given site. 
    '''
    srm_endpoint_info = get_srm_endpoint(site)
    
    authority = common.urisplit(srm_endpoint_info['endpt'])[1]
    
    return authority.split(':')[0]

def get_simple_srmpath(full_srmpath):
    '''
    Removes 'srm/managerv2?SFN' from a full qualified SRM endpoint. 
    '''
    # for matching the pfns from LFC, the srmv2 endpoint pattern ':port/sr'
    re_srmv2_fp = re.compile('^(srm:\/\/.*):([0-9]*)\/srm\/managerv2\?SFN=(.*)$')
    m = re_srmv2_fp.match(full_srmpath)
    if m:
        path = m.group(1) + m.group(3)
    else:
        path = full_srmpath

    return path

def get_srmv2_sites(cloud=None, token=None, debug=False):
    '''
    Gets a list of SRMV2 enabled DDM sites in a given cloud.

    if token is given, only the site with the specific token type
    will be selected.
    '''

    srmv2_sites = []

    ## a better way of getting all sites within a cloud
    ## however, it seems there is a bug in DQ2 API so it  
    ## always returns an empty site list. 
    # all_sites   = TiersOfATLAS.getSitesInCloud(cloud)

    ## a bit of hack with non-public DQ2 API interface
    cache = TiersOfATLAS.ToACache

    all_sites = []
    if not cloud:
        all_sites = TiersOfATLAS.getAllDestinationSites()
    else:
        if cloud == 'T0':
            return ['CERNPROD']
        if cloud not in cache.dbcloud:
            return []
        all_sites = TiersOfATLAS.getSites(cache.dbcloud[cloud])

    for site in all_sites:
        srm = TiersOfATLAS.getSiteProperty(site,'srm')

        # presuming the srm endpoint looks like:
        #   token:ATLASDATADISK:srm://grid-cert-03.roma1.infn.it ...
        if srm is not None and srm.find('token') != -1:
            if token:
                if srm.split(':')[1] == token:
                    srmv2_sites.append(site)
            else: 
                srmv2_sites.append(site)
    
    return srmv2_sites

def get_lfc_host(site):
    '''
    Gets the LFC host of a site registered in TiersOfATLAS.
    '''

    lfc_url = TiersOfATLAS.getLocalCatalog(site)
    if lfc_url:
        return lfc_url.split('/')[2][:-1]
    else:
        return None

def get_dataset_master_replicas(datasets, nthread=10, debug=False):
    '''
    Gets master replicas of datasets.

    returned dataset master replicas: 
    {
        ds1: ['site A'],
        ds2: ['site B'] 
    }
    '''
    master_replicas = {}

    # preparing the queue for querying lfn 
    wq = Queue(len(datasets))
    for ds in datasets:
        wq.put(ds)

    mylock = Lock()

    def worker(id):
        dq2 = DQ2()

        while not wq.empty():
            try:
                ds = wq.get(block=True, timeout=1)

                if debug:
                    common_util.log('query master replica of %s' % ds)

                try:
                    mr = dq2.getMasterReplicaLocation(ds) 

                    print mr
 
                    if mr:
                        mylock.acquire()
                        master_replicas[ds] = mr
                        mylock.release()

                except DQException,e:
                    common_util.log('%s' % e) 

            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return master_replicas

def get_dataset_byctime(ctime, ds_pattern=None, site=None, completeOnly=False):
    '''
    Gets the datasets match the given ds_pattern and created by the given ctime in site.

    returned a list of dataset names
    '''

    datasets = []

    dq2 = DQ2()

    now = time.time()

    sec = int(ctime - now)

    ds_dict = dq2.listDatasetsByCreationDate(sec, DateCriteria.GEQ, dsn=ds_pattern, location=site, complete=completeOnly)

    if ds_dict:
        datasets = ds_dict.keys()

    return datasets

def get_dataset_subscriptions(ds_pattern, cloud=None, sites=[], nthread=10, debug=False):
    '''
    Gets the dataset subscriptions within a given cloud.
   
    returned dataset subscription dictionary:
    {
        ds1: ['site A', 'site B'],
        ds2: ['site A', 'site C', 'site D'] 
    }
    '''
    sub_locations = {}

    srmv2_sites = get_srmv2_sites(cloud, token=None, debug=debug)

    if sites:
        srmv2_sites = list( Set( srmv2_sites ) & Set( sites ) )

    if debug:
        common_util.log('srmv2 sites in %s cloud: %s' % (cloud, ','.join(srmv2_sites)))

    # preparing the queue for querying lfn 
    wq = Queue(len(srmv2_sites))
    for site in srmv2_sites:
        wq.put(site)

    mylock = Lock()

    def worker(id):

        while not wq.empty():
            try:
                site = wq.get(block=True, timeout=1)

                if debug:
                    common_util.log('query %s on %s' % (ds_pattern, site))
 
                subs = get_subscriptions_site(site=site, ds_pattern=ds_pattern, debug=debug)

                if subs:
                    mylock.acquire()
                    for ds in subs:
                        if not sub_locations.has_key(ds):
                            sub_locations[ds] = []
                        sub_locations[ds].append(site)
                    mylock.release()
            except Empty:
                pass

    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return sub_locations

def get_dataset_replicas(ds_pattern, cloud=None, sites=[], completeOnly=False, nthread=10, debug=False):
    '''
    Gets the dataset replications within a given cloud.
   
    returned dataset replica dictionary: 
    {
        ds1: ['site A', 'site B'],
        ds2: ['site A', 'site C', 'site D'] 
    }
    '''
    dataset_locations = {}

    srmv2_sites = get_srmv2_sites(cloud, token=None, debug=debug)

    if sites:
        srmv2_sites = list( Set( srmv2_sites ) & Set( sites ) )

    if debug:
        common_util.log('srmv2 sites in %s cloud: %s' % (cloud, ','.join(srmv2_sites)))

    # preparing the queue for querying lfn 
    wq = Queue(len(srmv2_sites))
    for site in srmv2_sites:
        wq.put(site)

    mylock = Lock()

    def worker(id):
#        dq2 = DQ2()

        while not wq.empty():
            try:
                site = wq.get(block=True, timeout=1)

                if debug:
                    common_util.log('query %s on %s' % (ds_pattern, site))
 
                datasets = get_datasets(ds_pattern, site=site, completeOnly=completeOnly, debug=debug)
                if datasets:
                    mylock.acquire()
                    for ds in datasets:
                        if not dataset_locations.has_key(ds):
                            dataset_locations[ds] = []
                        dataset_locations[ds].append(site)
                    mylock.release()
            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return dataset_locations

def get_subscriptions_site(site, ds_pattern=None, debug=False):
    '''
    Lists the dataset subscriptions to a given site.
    if ds_pattern is given, only the dataset subscription is selected.
    '''

    import fnmatch

    datasets = []

    dq2 = DQ2()

    all_subs = dq2.listSubscriptionsInSite(site)

    if ds_pattern:
        datasets = fnmatch.filter(all_subs.keys(), ds_pattern)
    else:
        datasets = all_subs.keys()

    return datasets

def get_container_ds_replicas( container_ds ):

    dq2 = DQ2()

    return dq2.listDatasetReplicasInContainer( container_ds )

def get_datasets_in_container( container_ds ):

    dq2 = DQ2()

    return dq2.listDatasetsInContainer( container_ds )

def get_datasets(ds_pattern, site=None, completeOnly=False, debug=False):
    '''
    Lists the datasets with the names match the given pattern.
    If site name is specified, list only datasets have a replica at the given site.
    If completeOnly is Ture, list only the complete datasets.   
    '''

    datasets = [] 

    dq2 = DQ2()

    if site:

        ds_pat_list = []

        if ds_pattern.endswith('/'):
            ds_pat_list = dq2.listDatasetsInContainer(ds_pattern)
        else:
            ds_pat_list.append(ds_pattern)
            
        # this query doesn't count dataset completeness
        ds_tmp = []

        for ds_pat in ds_pat_list:
            ds_tmp += dq2.listDatasetsByNameInSite(site, name=ds_pat)

        # query on each datasets to check the sites with a complete replica
        if completeOnly:

            # multi thread queries on dataset replication information 
            wq = Queue(len(ds_tmp))
            for ds in ds_tmp:
                wq.put(ds)
  
            mylock = Lock()

            def worker(id):

                mydq2 = DQ2()
  
                while not wq.empty():
                    try:
                        ds = wq.get(block=True, timeout=1)
  
                        if debug:
                            common_util.log('query for dataset replicas: %s' % ds)

                        ds_replicas = mydq2.listDatasetReplicas(ds, old=False)

                        try:
                            total = ds_replicas[site][0]['total']
                            found = ds_replicas[site][0]['found'] 
                            if (total > 0) and (found == total):
                                mylock.acquire()
                                datasets.append(ds)
                                mylock.release()
                        except KeyError:
                            pass
                    except Empty:
                        pass
  
            # starting the migration threads for moving files one-by-one
            threads = []
            nthread = 10
            for i in range(nthread):
                t = Thread(target=worker, kwargs={'id': i})
                t.setDaemon(False)
                threads.append(t)
  
            for t in threads:
                t.start()
  
            for t in threads:
                t.join()

            #for ds in ds_tmp:
            #    ds_replicas = dq2.listDatasetReplicas(ds, old=False)
            #    try:
            #        total = ds_replicas[site][0]['total']
            #        found = ds_replicas[site][0]['found'] 
            #        if (total > 0) and (found == total):
            #            datasets.append(ds)
            #    except KeyError:
            #        pass
        else:
            datasets = ds_tmp
    else:
        if completeOnly:
            common_util.log('complete only dataset selection not supported if DDM site not specified.')
        ds_tmp = dq2.listDatasets(ds_pattern)
        datasets = ds_tmp.keys()

    return datasets

def get_size_datasets(datasets, nthread=10, debug=False):
    '''
    Counts the total amount of the given datasets size
    '''
    finfo = get_files(datasets=datasets, nthread=nthread, debug=debug)

    sum_all = 0
    for ds in finfo.keys():
        sum_ds = 0
        for guid,info in finfo[ds][0].items():
            sum_ds += int(info['filesize'])
        common_util.log('%s: %d' % (ds, sum_ds) )
        sum_all += sum_ds

    return sum_all

def get_files(datasets, nthread=10, debug=False):
    '''
    Gets GUIDs of the files in datasets.
    '''

    files = {} 

    # preparing the queue for querying lfn 
    wq = Queue(len(datasets))
    for ds in datasets:
        wq.put(ds)

    ## no need to create too many threads if the number of datasets is less
    if len(datasets) < nthread:
        nthread = len(datasets)

    mylock = Lock()

    def worker(id):
        dq2 = DQ2()

        while not wq.empty():
            try:
                ds = wq.get(block=True, timeout=1)

                if debug:
                    common_util.log('list files: %s' % ds)
 
                rslts = dq2.listFilesInDataset(ds)
                if rslts:
                    mylock.acquire()
                    files[ds] = rslts
                    mylock.release()
            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return files

def list_deletion(site, dsn, debug=False):
    '''
    List the deleted dataset at site.
    '''
    dq2 = DQ2()

    return dq2.listDeletedReplicas(site, name=dsn) 

def get_file_status(surls, setype='srmv2', nthread=10, debug=False):
    '''
    List file information using lcg-ls
    ''' 
    status = {}

    # preparing the queue for querying lfn 
    wq = Queue(len(surls))
    for surl in surls:
        wq.put(surl)

    mylock = Lock()

    def worker(id):
        while not wq.empty():
            try:
                surl = wq.get(block=True, timeout=1)
                cmd = 'lcg-ls -l -t 300 -D srmv2 -T %s %s 2>/dev/null' % (setype, surl)

                if debug:
                    common_util.log('list file: %s' % file)
 
                (ec, out) = common_util.exec_cmd(cmd, max_trial=1)

                mylock.acquire()
                if not ec:
                    status[surl] = out[0].strip()
                else:
                    status[surl] = 'UNKNOWN'

                mylock.release()
            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads   = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return status

def get_ds_metadata(datasets, attrs=[], nthread=10, debug=False):
    '''
    Get ds attributes of multiple datasets in parallel 
    '''
    common_util.log('retrieving dataset attributes: %s' % repr(attrs))

    attrvals = {}

    for ds in datasets:
        attrvals[ds] = {}

    if len(attrs) > 0:

        # preparing the queue for querying lfn 
        wq = Queue(len(datasets))
        for ds in datasets:
            wq.put(ds)
 
        mylock = Lock()
 
        def worker(id):
            dq2 = DQ2()
            while not wq.empty():
                try:
                    ds = wq.get(block=True, timeout=1)
                    #common_util.log('getting attributes of ds: %s' % ds)
                    vals = dq2.getMetaDataAttribute(ds, attrs)
                    if vals:
                        mylock.acquire()
                        attrvals[ds] = vals
                        mylock.release()
                except Empty:
                    pass
 
        # starting the migration threads for moving files one-by-one
        threads = []
 
        if nthread > 10: nthread = 10
 
        for i in range(nthread):
            t = Thread(target=worker, kwargs={'id': i})
            t.setDaemon(False)
            threads.append(t)
 
        for t in threads:
            t.start()
 
        for t in threads:
            t.join()
    
    return attrvals    

def get_file_locations(dataset, cloud=None, token='ATLASDATADISK', debug=False):
    '''
    Summarize the locations of files (in terms of sitename) of a dataset
    '''

    common_util.log('resolving sites with token: %s' % token)
    sites = get_srmv2_sites(cloud, token=token, debug=debug)

    print sites

    replicas = {}
    # preparing the queue for querying lfn 
    wq = Queue(len(sites))
    for site in sites:
        wq.put(site)

    mylock = Lock()

    def worker(id):
        dq2 = DQ2()
        while not wq.empty():
            try:
                site = wq.get(block=True, timeout=1)
                common_util.log('resolving dataset files at %s' % site)
                replicaInfo = dq2.listFileReplicas(site, dataset)
                if replicaInfo:
                    mylock.acquire()
                    for guid in replicaInfo[0]['content']:
                        if not replicas.has_key(guid):
                            replicas[guid] = []
                        replicas[guid].append(site)
                    mylock.release()
            except Empty:
                pass

    # starting the migration threads for moving files one-by-one
    threads = []

    nthread = len(sites)
    if nthread > 10: nthread = 10

    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    return replicas    

def claim_deletion(sites, datasets, debug=False, deep=True):
    '''
    Claims central deletion on datasets at sites.
    '''
    dq2 = DQ2()
    for site in sites:
        for name in datasets:
            datasets_del = dq2.listDatasetsByNameInSite(site=site, name=name)
            for dataset in datasets_del:
                common_util.log('claiming deletion on %s %s' % (site, dataset))
                dq2.deleteDatasetReplicas(dsn=dataset, locations=[site,], deep=deep)
