from config import DataFormats
import logging



class DatasetBuilder(object):
    """ This is a base class to be extended by various 
instrument/manufacturer specific instance classes.
This should provide some generic file metadata handling mechanisms
    """

    def __init__(self,dataFormat):
      self._log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
      self.dataFormat = dataFormat
      self.indexingTemplate = None
      self.operations =  [] 
      self.protonums = []
      self.priordata = None
      self.purgelist = [] # overwritten - reset as reference to supplied list  

    def purgeCheck(self, file):
      #if file is in  self.priordata, then add to self.purgelist 
      #self.priordata is a list of retrieved ICAT dataset_and_datafiles
      #self.purgelist  is a reference to toplevel fed-in params['PURGE']
      #so don't go replacing the reference (its the only way to pass
      # state back to the calling function currently)  

      if not self.priordata:
        return
      for dataset in self.priordata:
        if not hasattr(dataset, "_datafileCollection"): continue
        datafiles = dataset._datafileCollection
        for datafile in datafiles:
          if datafile._name == file['name']:
             if len(self.purgelist) < 10: 
               self._log.debug("ICAT match on %s dataset %s file %s", 
                    file['name'], dataset._name, datafile._id )
             self.purgelist.append({'dataset':dataset._name, 
                                    'filename':datafile._name,
                                    'datafile':datafile._id})
             return
 
    def addCommonFileData(self, filepath, elements=[]):
      import hashlib
      import os
      from datetime import datetime
      created = os.path.getmtime(filepath)
      createdate = datetime.fromtimestamp(created)
      created = createdate.strftime("%Y-%m-%dT%H:%M:%S")
      modified = os.path.getctime(filepath)
      modifieddate = datetime.fromtimestamp(modified)
      modified = modifieddate.strftime("%Y-%m-%dT%H:%M:%S")
      size = os.path.getsize(filepath)
      version = str(11)
      f = open(filepath)
      h = hashlib.sha1()
      h.update(f.read())
      hash = h.hexdigest()
      f.close()
      checksum = "sha1: " + str(hash)
      elements.extend([
                     {'name': 'datafile_create_time',
                      'content': str(created) }, 
                     {'name': 'datafile_modify_time',
                      'content': str(modified) }, 
                     {'name': 'file_size',
                      'content': str(size) }, 
                     {'name': 'checksum',
                      'content': checksum }, 
      ])


    def remapDir(self, folder):
      """really don't know what the ultimate form aught to be  
at the moment. Should we map to unix FSH? """
      part = folder.split(':')
      if len(part)==2:
        return part[1]
      if folder.startswith('\\\\'):
        part = folder.split('\\')
        if len(part)>3:
          return  '\\'.join(part[2:])
      return folder



    def deduceDatasetsFromFiles(self, collection, indexingTemplate=None):
      """ 
collection = list of recently modified/created files.
indexingTemplate = a template to exclude files used for indexing purposes
      """
      self.indexingTemplate = indexingTemplate
      datafileFolders = collection.dataFiles
      datasets = {}
      for folder in datafileFolders.keys():
        filelist = datafileFolders[folder]['recentFileList']
        hash = self.genProtosFromFileList(filelist)
        for key in hash.keys():
          datasets[key] = hash[key]
      return datasets

      """
    ----- insert collection.hierarchy['folders']  processor here
      otherFolders = collection.hierarchy['folders']
      foldernames = otherFolders.keys().sort()
      dset = {} # receptacle for amalgamated files
      for folder in foldernames:
         filelist = otherFolders[folder]['recentFileList']

         # loop over all new files in folder
         for i in range(0,len(filelist)):
           filepath = filelist.pop()  
      """


    def genProtosFromFileList(self, filelist):
      """ Aggregate datafiles into groups based on their
          filenames and a pattern matching RE.
          Return a dictionary,datasets, with  datafile lists.
      """
      if not filelist or len(filelist)<=0: return [] # no info

      #from os.ntpath import split # for windows
      from ntpath import split # for windows
      import re

      indexing_re = None
      if self.indexingTemplate:
        indexing = str.replace(self.indexingTemplate,'#','\\d')
        indexing_re = re.compile('.*' + indexing)
        self._log.debug("\nCheck index against %s" , indexing )
      
      if  not self.dataFormat or \
          not DataFormats.ARTIFACTS.has_key(self.dataFormat):
        raise Exception, "unknown image frame type" + self.dataFormat + " " + file
      collectInfo = DataFormats.ARTIFACTS[self.dataFormat]
      frame_re = collectInfo['data_re']
      field_sep = collectInfo['data_field_sep']

      dset = {} # receptacle for amalgamated files

      # loop over all new files in folder
      for i in range(0,len(filelist)):
        filepath = filelist.pop()  

        # preliminary check to isolate indexing files
        if indexing_re and indexing_re.search(filepath):
           scan = DataFormats.INDEXING
           if not dset.has_key(scan):
             dset[scan]=[]
           dset[scan].append(filepath)
           continue

        # check for ordinary scan datafile frames to aggregate by scan
        dir, file = split(filepath) 
        part = file.split(field_sep)

        # not a frame file  or not part of a coherent dataset
        if (not frame_re.search(file) ) or len(part)<=1: 
          scan = DataFormats.SUNDRIES
          if not dset.has_key(scan):
            dset[scan]=[]
          dset[scan].append(filepath)
          continue

        # normal data scan frame
        scan = dir + "/" + field_sep.join(part[0:-1])

        frame = part[-1]
        if not dset.has_key(scan):
           dset[scan]=[]
        dset[scan].append(file)

      #  print the datasets
      keylist = dset.keys()
      keylist.sort()
      for key in keylist:
        flist = dset[key]
        self._log.debug("base %s files %d", key, len(flist) )

      return dset      


    def buildFolderSetsFromCollection(self, collection, priordata=None, purgelist=None):
      """
      For arbitrary, non-data folders.
      This method builds a datastructure that the XML template method can
      iterate over to dump whole folders of files and parameters as datasets.
      """
      # from ntpath import split # for windows
      from os.path import split # for windows
#      import os.path 
#      import re, types

      self.priordata = priordata # local ref
      self.purgelist = purgelist # local ref

      folders = collection.hierarchy['folders']
      foldernames = folders.keys() # .sort()
      foldernames.sort()
      dsets = [] # receptacle for amalgamated files
      if not foldernames: 
        self._log.debug("No subfolders ????")
        return dsets 
      filetypes = DataFormats.FILETYPES
      for folder in foldernames:
        filelist = folders[folder]['recentFileList']
        files = []
        # loop over all new files in folder
        for fileinfo in filelist:
          file,mtime,fclass = fileinfo
          dir , filename = split(file) # os.path.split ????
          self._log.debug("%s %s %s %s", dir, filename, mtime, fclass)
          fclassinfo = filetypes[fclass]
          format  = fclassinfo['format']
          versions = fclassinfo['version']
          if type(versions) == type([]):
            if len(versions) == 1:
              version = versions[0]
            else:
              # what the hell we gunna do now?????
              version  ="?"
          else:
            version  ="?"
          elements = [ #{'name': 'description',
                         # 'content': '' }, 
#                         {'name': 'datafile_version',
#                          'content': '1' }, 
#                         {'name': 'datafile_version_comment',
#                          'content': '' }, 
                       {'name': 'datafile_format',
                        'content': format }, 
                       {'name': 'datafile_format_version',
                        'content': version }, 
          ]
          self.addCommonFileData(file, elements)
          file = {'dir':dir, 'name':filename, 'params': [ ],
               'elements': elements }
          # probably we need to specify file types etc 
          # and or remove ICAT unsupported/registered format types
          self.purgeCheck(file)
          files.append(file)

        if folder==".":
          dsetname = DataFormats.SUNDRIES
        else:
          dsetname = folder
        dset = {'proto':None, 'num':None, 'mach':None, 
                'dsetname': dsetname,
                'description': "additional files",
                'params':[] , 'type': "derived",
                'files': files,
        }
        dsets.append(dset)
             
      return dsets


