#!/usr/bin/env python
import types
import tarfile
import md5
import re
import time
import zlib
import os
import fnmatch
import traceback
import tempfile
import gzip
import shutil

from Queue import Queue

from dq2.info import TiersOfATLAS

from Ganga.Utility.Shell import Shell
from Ganga.Utility.GridShell import getShell
from Ganga.Utility.logging import getLogger
from Ganga.Lib.LCG.Utility import get_uuid
from GangaAtlas.Lib.ATLASDataset.DQ2Dataset import DQ2Dataset
from GangaAtlas.Lib.AtlasLCGRequirements.AtlasLCGRequirements import _loadCESEInfo, _downloadCESEInfo

# helper routines
def msg_exception(logger, exc_type, exc_value, exc_traceback):

    html_escape_table = {"&": "&amp;",
                         '"': "&quot;",
                         "'": "&apos;",
                         ">": "&gt;",
                         "<": "&lt;"}

    tb_info = traceback.extract_tb(exc_traceback)

    for tb in tb_info:
        logger.warning('File %s line %d in %s: %s' % tb )
    logger.warning('%s: %s' % (exc_type, exc_value) )
    
    keyError = ''

    ## No idea how to make the string safe for web report
    #keyError = ','.join( map( lambda x:x.replace("\n",'') , traceback.format_exception_only( exc_type, exc_value ) ) )

    return keyError

def locate(pattern, root=os.curdir):
    '''Locate all files matching supplied filename pattern in and below
    supplied root directory.'''
    for path, dirs, files in os.walk(os.path.abspath(root)):
        for filename in fnmatch.filter(files, pattern):
            yield os.path.join(path, filename)

def get_md5sum(fname):
    ''' Calculates the MD5 checksum of a file '''

    f = open(fname, 'rb')
    m = md5.new()
    while True:
        d = f.read(8096)
        if not d:
            break
        m.update(d)
    f.close()
    return m.hexdigest()

def get_adler32sum(fname):
    ''' Calculate the Adler32 checksum of a file '''

    cksum = None
    f = open(fname,'rb')
    while True:
        d = f.read(8096)
        if not d:
            break

        if not cksum:
            #cksum = hex( zlib.adler32(d) & 0xffffffff )
            cksum = zlib.adler32(d)
        else:
            #cksum = hex( zlib.adler32(d, cksum) & 0xffffffff )
            cksum = zlib.adler32(d, cksum)
    f.close()

    # remove the tailing 'L' charactor
    cksum_str = re.sub(r'L$','', hex(cksum & 0xffffffff) )

    return cksum_str

def readStrippedLines(fileName):
    '''reads in a list of strings from a file'''

    lines = []
    f = open(fileName, 'r')
    for l in f.readlines():
        lines.append(l.strip())
    f.close()
    return lines

def untar(tgzfile, dest_dir):
    '''
    extracts files from a given tgzfile into the given dest_dir.
    This routine is copied from Ganga.Core.Sandbox
    '''

    logger = getLogger('AutoD3PDMakerUtility')

    if os.access(tgzfile, os.F_OK):
        # workaround for broken tarfile module (2.4) which does
        # not open certain tarfiles
        # see: http://bugs.python.org/issue4218
#        if sys.hexversion < 0x020500F0:
#            if os.system("tar -C %s -xzf %s"%(dest_dir,tgzfile)):
#                logger.warning("Problem with extracting sandbox file %s to %s. This is os.system() workaround for python < 2.5.",tgzfile,dest_dir)
#            return
#
#        try:
#            tf = tarfile.open(tgzfile,"r:gz")
#        except tarfile.ReadError:
#            logger.warning('Sandbox is empty or unreadable')
#            return
#        else:
#            [tf.extract(tarinfo,dest_dir) for tarinfo in tf]
#            tf.close()

        ## using system call to speed up the tarball extraction process
        s = Shell()

        rc, out, m = s.cmd1("tar -C %s -xzf %s"%(dest_dir,tgzfile), allowed_exit=[0,255])

        if rc != 0:
            logger.warning("Problem with extracting sandbox file %s to %s: %s" % (tgzfile,dest_dir,out))

        return

## append additional files in the user_area and re-make the gzipped tarball
def renew_userarea_tarball(org_tgz_fpath, athena_run_dir, joption_fpaths=[]):

    #ick = False
    logger = getLogger('AutoD3PDMakerUtility')

    def __check_and_add_joptions__(tar_fpath, athena_run_dir, jo_paths):

        tmpdir = tempfile.mkdtemp()

        jo_paths_update = {}

        ## exam the job option files in the tarball and generates a update list
        f_tar = None
        try:
            f_tar = tarfile.open(tar_fpath, mode='r')

            for jo_path in jo_paths:

                add_to_arch = False

                if not os.path.exists(jo_path):
                    raise IOError('job option file not found: %s' % jo_path)

                my_arcname = '%s/%s' % ( re.sub(r'\/$','', athena_run_dir), os.path.basename(jo_path) )

                try:
                    t_info = f_tar.getmember( my_arcname )
                    f_tar.extract( t_info, tmpdir )

                    if get_md5sum( jo_path ) != get_md5sum( os.path.join(tmpdir, t_info.name) ):
                        add_to_arch = True

                except KeyError:
                    add_to_arch = True

                if add_to_arch:
                    jo_paths_update[jo_path] = my_arcname
        finally:
            if f_tar:
                f_tar.close()
            shutil.rmtree(tmpdir)

        ## re-open the tarball to append job option files in it
        f_tar = None
        try:
            f_tar = tarfile.open(tar_fpath, mode='a')

            for jo_path, arcname in jo_paths_update.items():
                logger.debug( 'add %s into user area tarball' % jo_path )
                f_tar.add( jo_path, arcname=arcname, recursive=False)

        finally:
            if f_tar:
                f_tar.close()

        return

    new_tgz_fpath = None

    if os.path.exists( org_tgz_fpath ) and tarfile.is_tarfile( org_tgz_fpath ):

        tmpdir = tempfile.mkdtemp()

        re_gzipfile = re.compile('(.*)\.gz$')
        match = re_gzipfile.match( org_tgz_fpath )

        if ( match ):

            new_tgz_fpath = os.path.join( tmpdir, 'userarea_%s.tar.gz' % get_uuid() )

            ## unzip the tarball
            tar_fpath = os.path.join( tmpdir, os.path.basename( match.group(1) ) )

            zip_file = gzip.open( org_tgz_fpath, 'rb' )
            tar_file = open( tar_fpath, 'wb' )
            while True:
                d = zip_file.read(8096)

                if not d:
                    break
                else:
                    tar_file.write(d)

            tar_file.close()
            zip_file.close()

            ## modify the tarball
            try:
                __check_and_add_joptions__( tar_fpath, athena_run_dir, joption_fpaths )
            finally:
                ## zip the tarball again even the tarball update was failed
                zip_file = gzip.open( new_tgz_fpath, 'wb' )
                tar_file = open( tar_fpath, 'rb' )
                while True:
                    d = tar_file.read(8096)

                    if not d:
                        break
                    else:
                        zip_file.write(d)

                zip_file.close()
                tar_file.close()

                ## remove the tar file in any case
                try:
                    os.remove( tar_fpath )
                except Exception:
                    pass

            #ick = True

    return new_tgz_fpath

def table_output(title, header_list, rowdata_list, fobj):
    '''
    prints the data in a well-formated table with title and headers.
    '''

    # scan throuhg all attributes to determin the size of each row
    row_size = {}
    for attr in header_list:
        row_size[attr] = len(attr) + 2

    for item in rowdata_list:

        for attr in header_list:

            if item[ attr ] is None:
                size = 5
            else:
                size = len( repr(item[ attr ]) ) + 2

            if row_size[attr] < size:
                row_size[attr] = size


    # formating the table header
    header_str = ''
    for attr in header_list:
        header_str += attr.center( row_size[attr] )

    if title:
        print >>fobj, '[ %s ]' % title

    if len(rowdata_list) > 0:
        print >>fobj, '-'*sum(row_size.values())
        print >>fobj, header_str
        print >>fobj, '-'*sum(row_size.values())

    # formating the table contents
    for item in rowdata_list:

        row_str = ''

        for attr in header_list:
            if item[ attr ] is None:
                row_str += 'N/A'.center( row_size[attr] )
            else:
                if type( item[ attr ] ) in [ types.IntType, types.LongType, types.FloatType ]:
                    row_str += repr( item[ attr ] ).rjust( row_size[attr] )

                elif type ( item[attr] ) in [ types.DictType, types.ListType ]:
                    row_str += repr( item[ attr ] ).center( row_size[attr] )

                else:
                    row_str += item[ attr ].center( row_size[attr] )

        print >>fobj, row_str

    if len(rowdata_list) > 0:
        print >>fobj, '-'*sum(row_size.values())

    ## flush out the data to the file object
    fobj.flush()

def urisplit(uri):
   """
   Basic URI Parser according to STD66 aka RFC3986

   >>> urisplit("scheme://authority/path?query#fragment")
   ('scheme', 'authority', 'path', 'query', 'fragment')

   """
   # regex straight from STD 66 section B
   regex = '^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?'
   p = re.match(regex, uri).groups()
   scheme, authority, path, query, fragment = p[1], p[3], p[4], p[6], p[8]
   #if not path: path = None
   return (scheme, authority, path, query, fragment)

def get_srm_endpoint(site):
    '''
    Gets the SRM endpoint of a site registered in TiersOfATLAS.
    '''

    srm_endpoint_info = {'token':None, 'endpt':None}
    re_srm2 = re.compile('^token:(.*):(srm:\/\/.*)\s*$')

    tmp = TiersOfATLAS.getSiteProperty(site,'srm')
    if tmp:
        srm_endpoint_info['endpt'] = tmp

    mat = re_srm2.match(tmp)
    if mat:
        srm_endpoint_info['token'] = mat.group(1)
        srm_endpoint_info['endpt'] = mat.group(2)

    return srm_endpoint_info

def get_srm_host(site):
    '''
    Gets the SRM hostname of the given site.
    '''
    srm_endpoint_info = get_srm_endpoint(site)

    authority = urisplit(srm_endpoint_info['endpt'])[1]

    return authority.split(':')[0]


def get_site_domain(site):
    '''
    Gets the site domain.
    '''

    tmp = TiersOfATLAS.getSiteProperty(site,'domain')

    domain = '.*unknown.*'

    if tmp:
        domain = tmp.split('/')[0]
    else:
        domain = '.*%s.*' % get_srm_host(site)

    return domain


def get_cloud_statistics():
    """analyzing the task repository to calculate number of jobs in different status on different cloud"""

    load_cloud = {}

    for t in tasks:
        for trf in t.transforms:

            try:
                cloud = trf.backend.requirements.cloud
                if not load_cloud.has_key(cloud):
                    load_cloud[cloud] = [0,0,0,0]

                load_cloud[cloud][0] += trf.n_all()
                load_cloud[cloud][1] += trf.n_status('running')
                load_cloud[cloud][2] += trf.n_status('completed')
                load_cloud[cloud][3] += trf.n_status('failed')
            except AttributeError:
                pass

    return load_cloud

def findClouds(atlas_rtag, datasets, use_blacklist=True, bl_locs=[], completeDSOnly=True):
    """matching atlas_rtags availability with dataset location, returnning a list of possible clouds"""

    ## retrieve blacklisted site
    if use_blacklist:
        ceseinfo = _loadCESEInfo()
        if not ceseinfo or ( time.time() - ceseinfo['time'] > 3600 ):
            ceseinfo_new = _downloadCESEInfo()
            if ceseinfo_new:
                ceseinfo = ceseinfo_new

        if ceseinfo:
            bl_locs += ceseinfo['blacklist']

    ## getting the SE name close to the CE where the required ATLAS release tag is deployed
    cmd = 'lcg-info --vo atlas --list-ce --query \'Tag=%s\' --attrs CloseSE --sed' % atlas_rtag

    s = getShell('GLITE')
    rc, out, m = s.cmd1(cmd,allowed_exit=[0,255])

    se_list = []
    if rc == 0:
        for l in out.strip().split('\n'):
            l.strip()
            ce,ses = l.split('%')
            se_list += ses.split('&')

    ##print se_list

    ## getting the dataset locations and matching it with release locations
    good_locs_map = {}
    for dataset in datasets:

        good_locs_map[dataset] = {}

        ds = DQ2Dataset()
        ds.dataset += [ dataset ]

        if completeDSOnly:
            locs = ds.get_locations( complete=1 )
        else:
            locs = ds.get_locations( complete=0 )

        good_locs = []
        for loc in locs:
            if loc not in bl_locs:
#                se = get_srm_host(loc)
#                if se in se_list:
#                    good_locs.append(loc)
                re_se = re.compile( get_site_domain(loc) )
                for se in se_list:
                    if re_se.match(se):
                        good_locs.append(loc)
                        break

        ##print good_locs

        ## convert good_locs into Cloud info
        dbcloud = {}
        for dbc,cloud in TiersOfATLAS.ToACache.dbcloud.items():
            dbcloud[cloud] = dbc

        good_clouds = {}
        for loc in good_locs:
            c = dbcloud[ TiersOfATLAS.whichCloud(loc) ]

            if not good_clouds.has_key( c ):
                good_clouds[c] = []

            good_clouds[c].append(loc)

    ##print good_clouds

        good_locs_map[dataset] = good_clouds

    return good_locs_map

def resolve_atlas_rtag(atlas_project='', atlas_release='', platform='i686-slc5-gcc43-opt'):
    """resolving the application tag name of the Athena software deployment on the Grid"""

    logger = getLogger('AutoD3PDMakerUtility')

    revs = atlas_release.split('.')

    atlas_rtag        = ''
    atlas_rtag_suffix = ''
    
    if len(revs) > 3:
        if not atlas_project:
            atlas_project = 'AtlasProduction'

        if atlas_project == 'AtlasProduction':
            ## AtlasProduction takes only first 3 versioning numbers
            atlas_rtag_suffix = '%s-%s' % ('.'.join(revs[0:4]), platform)
        else:
            atlas_rtag_suffix = '%s-%s' % ('.'.join(revs), platform)
    else:
        ## versioning number not more than 3, assuming the offline version
        atlas_project = ''
        atlas_rtag_suffix = '%s-%s' % ('.'.join(revs[0:3]), platform)

    if atlas_project in [ 'AtlasPoint1', 'AtlasTier0', 'AtlasProduction' ]:
        atlas_rtag_prefix = 'VO-atlas-%s' % re.sub('^Atlas', '', atlas_project).lower()
    elif atlas_project:
        atlas_rtag_prefix = 'VO-atlas-%s' % atlas_project.lower()
    else:
        atlas_rtag_prefix = 'VO-atlas-offline'

    atlas_rtag = '%s-%s' % (atlas_rtag_prefix, atlas_rtag_suffix)

    logger.debug('Atlas release Tag: %s' % atlas_rtag)

    return atlas_rtag

def get_remote_txt_content(url):
    """
    Get the content referred by the given URL
    """
    s = Shell()

    cmd = 'curl -s %s' % url

    rc, out, m = s.cmd1(cmd, allowed_exit=[0,255])

    if rc != 0:
        return None
    else:
        return out

# helper classes
class AutoD3PDMakerError(Exception):
    """
    Basic class for general AutoD3PDMaker errors.
    """
    def __init__(self, message):
        self.message = message

class AutoD3PDMakerQueue(Queue):
    '''
    Extended Queue object for storing/handling a list of AutoD3PDMakerTasks in memory.
    '''
    def __init__(self, fpathDump='d3pdmaker_task.list'):
        Queue.__init__(self)
        self.fpathDump = fpathDump

    def dumpTasks(self, mode='w'):
        """dumps all unfinished tasks to fpathDump"""
        f = open(self.fpathDump, mode)

        try:
            while not self.empty():
                t = self.get_nowait()
                f.write( '%s\n' % repr(t) )
        except Empty:
            pass

        f.close()
