#!/usr/bin/env python

import sys
import os
import errno

import FATDE
import FATDirectory
import FileSystem

debug = False

def compareFiles(o1, o2):
   if o1.getCreateDate() < o2.getCreateDate():
      return -1
   if o1.getCreateDate() > o2.getCreateDate():
      return 1
   if o1.getCreateDate() == o2.getCreateDate():
      # if dates equal, use cluster num
      if o1.getClusterNum() < o2.getClusterNum():
         return -1
      if o1.getClusterNum() > o2.getClusterNum():
         return 1
      if o1.getClusterNum() == o2.getClusterNum():
         return 0

def printStats(fileSystems, image):

   total = 0
   sectors = image.findDirectorySectors()
   print "len(dirSectors)", len(sectors)
   for s in sectors:
      total += len(image.getSectorEntries(s))

   print "\tNum dir entries in sectors", total
   
   total = 0
   for s in sectors:
      if image.startDir(image.readSector(s)):
         total += 1
         
   print "\tNum root dir sectors", total
   
   for fs in fileSystems:
      print fs
      
      dirClusters = image.findDirectoryClusters(fs)
      
      print "\tlen(dirClusters)", len(dirClusters)
      numRoot = 0
      numFullRoot = 0
      
      total = 0
      for c in dirClusters:
         entries = image.getClusterEntries(c, fs)
         total += len(entries)
         
      print "\tdir entries", total
         

      for c in dirClusters:
         cluster = image.readCluster(c, fs)
         if image.startDir(cluster) and image.validCluster(c, fs):
            numRoot += 1
            if image.full(cluster):
               numFullRoot += 1
               
      print "\tNum root dir clusters", numRoot, "[" + str(numFullRoot) + "]"

class Image:
   
   def __init__(self, imageName):
      self.imageName = imageName
#      self.ifp = open(imageName)
      
      
   #
   #  Find all sectors that could be a directory structure
   #
   #  This takes a while ... so save the results if you run it.
   #  Subsequent runs can just use the saved values
   #
   def findDirectorySectors(self):
      cacheName = 'dat/' + os.path.basename(self.imageName) + '_sectors.dat'
      if os.path.exists(cacheName):
         sectors = []
         for line in open(cacheName):
            line = line[:-1]
            sectors.append(int(line))
         return sectors
      else:
         sectors = FATDE.findDirectorySectors(self.imageName, True)
         if not os.path.exists("dat/"):
            try:
               os.makedirs("dat/")
            except OSError as exc: # Python >2.5
               if exc.errno == errno.EEXIST:
                  pass
               else: raise exc.errno
         
         cache = open(cacheName, 'w')
         for sector in sectors:
            cache.write(str(sector) + '\n')
         cache.close()
         
         return sectors
         
   def readSector(self, sectorNum):
      return FATDE.readSector(self.imageName, sectorNum)

   #
   #  We have two directory entries that point to the same place
   #  
   #
   def __findFileSystem(self, root, pointer, rootentries):
      index = rootentries.index(pointer) - 1
      
      # find the root entry for d[1] so we can get its cluster number
      while not rootentries[index].name == "." + 10 * ' ':
         index -= 1
         
      rootpointer = rootentries[index]
         
      # Now we have two measures of distance
      s1 = root.offset / 512
      s2 = rootpointer.offset / 512
      
      c1 = root.cluster()
      c2 = rootpointer.cluster()
      
      ## This two distances are the same, so we can work out the cluster size
      if c2 == c1:
         return None
         
      clustersize = (s2 - s1) / (c2 - c1)
      dataarea = s1 - c1 * clustersize
      
      return FileSystem.FileSystem(clustersize, dataarea)


   def startDir(self, sector):
      e1 = FATDE.FATDE(sector[0:0x20])
      e2 = FATDE.FATDE(sector[0x20:0x40])
      
      return e1.name == "." + 10 * " " and e2.name == ".." + 9 * " "

   #
   #  We have a collection of FAT directory entries. Now we want to establish how the file system is organised.
   #
   #  There may be more than one file system. We can use the redundancy that a directory refers to itself.
   #  There should be more than one entry pointing to directory (i.e. original entry, self reference, children).
   #
   #  So, we find all root entries (those starting '.' and '..') and then within those we find any entries that
   #  point to the '.' entry.
   #
   #  When we have two entries pointing to the same directory, then we can calculate the cluster size and the start of the data
   #  area
   #
   def findFileSystems(self):

      sectors = self.findDirectorySectors()

      rootentries = []
      # Collect all directory entries which originate in a root sector
      for sectorNum in sectors:
         sector = self.readSector(sectorNum)
         if self.startDir(sector):     
            # we have a root sector
            for offset in range(0, 512, 0x20):
               # for each directory entry (0x20 apart)
               dir_entry = FATDE.FATDE(sector[offset:offset+0x20])
               if dir_entry.endOfDirList():
                  # no more entries in this sector               
                  break
                  
               if not dir_entry.longFileName():
                  # Don't want long file names
                  dir_entry.offset = sectorNum * 512 + offset # Remember where we found it
                  rootentries.append(dir_entry)
               
         
      # have my list of entries ... now to work.
      relateddirs = []
      for i in range(len(rootentries)):
         if rootentries[i].name == "." + 10 * ' ':
            same = []
            same.append(rootentries[i])
            for j in range(len(rootentries)):
               if not i == j and rootentries[j].cluster() == rootentries[i].cluster():
                  same.append(rootentries[j])
                  
            relateddirs.append(same)
      
      #
      #  Now find the file systems
      #
      fileSystems = []
      for d in relateddirs:
         if len(d) > 1:
            fs = self.__findFileSystem(d[0], d[1], rootentries)
            
            if not fs is None:
               if fs not in fileSystems:
                  # check cluster size a power of 2
                  if fs.clusterSize <> 0 and fs.clusterSize & (fs.clusterSize - 1) == 0:
                     # check data area is sensible
                     if fs.dataArea >= 0:
                        fileSystems.append(fs)

      return fileSystems
   
   # NB, this needs to be relatively thread-safe.
   # initially there was a class file pointer (self.ifp)
   # and disaster ensued. Each call now gets its own private
   # file pointer.
   def readCluster(self, clusterNum, fs):
      ifp = open(self.imageName)
      ifp.seek((clusterNum * fs.clusterSize + fs.dataArea) * 512)
      
      return ifp.read(fs.clusterSize * 512)
      
   def getClusterEntries(self, clusterNum, fs):
      c = self.readCluster(clusterNum, fs)
      
      entries = []
      for offset in range(0, len(c), 0x20):
         # for each directory entry (0x20 apart)
         dir_entry = FATDE.FATDE(c[offset:offset+0x20])
         if dir_entry.endOfDirList():
            # no more entries in this cluster               
            break

         entries.append(dir_entry)

      return entries
      
   def getSectorEntries(self, sectorNum):
      s = self.readSector(sectorNum)
      
      entries = []
      for offset in range(0, len(s), 0x20):
         # for each directory entry (0x20 apart)
         dir_entry = FATDE.FATDE(s[offset:offset+0x20])
         if dir_entry.endOfDirList():
            # no more entries in this cluster               
            break

         entries.append(dir_entry)

      return entries
      
   def getEntries(self, dirClusters, fs):
      entries = []
      # Collect all directory entries which originate in a root sector
      for clusterNum in dirClusters:
         cluster = self.readCluster(clusterNum, fs)
         for offset in range(0, len(cluster), 0x20):
            # for each directory entry (0x20 apart)
            dir_entry = FATDE.FATDE(cluster[offset:offset+0x20])
            if dir_entry.endOfDirList():
               # no more entries in this cluster               
               break
               
            if not dir_entry.longFileName():
               # Don't want long file names
               dir_entry.offset = clusterNum, offset # Remember where we found it
               entries.append(dir_entry)

      return entries
      
   def getAllEntries(self, dirClusters, fs):
      entries = []
      # Collect all directory entries which originate in a root sector
      for clusterNum in dirClusters:
         cluster = self.readCluster(clusterNum, fs)
         for offset in range(0, len(cluster), 0x20):
            # for each directory entry (0x20 apart)
            dir_entry = FATDE.FATDE(cluster[offset:offset+0x20])
            if dir_entry.endOfDirList():
               # no more entries in this cluster               
               break
               
            dir_entry.offset = clusterNum, offset # Remember where we found it
            entries.append(dir_entry)

      return entries

   def findDirectoryClusters(self, fs):
      sectors = self.findDirectorySectors()
      dirclusters = []
      for sector in sectors:
         if (sector - fs.dataArea) % fs.clusterSize == 0:
            dirclusters.append((sector - fs.dataArea) / fs.clusterSize)
      
      return dirclusters
      
   def full(self, cluster):
      # Check last entry ... if valid, => the directory is full
      dir_entry = FATDE.FATDE(cluster[-0x20:])
      return dir_entry.isValid()
      
   def validCluster(self, c, fs):
      cluster = self.readCluster(c, fs)
      first = FATDE.FATDE(cluster[0:0x20])
      return c == first.cluster()
      
   def findBlocks(self, fs):
      # Get the directory clusters
      dirclusters = self.findDirectoryClusters(fs)

      entries = self.getEntries(dirclusters, fs)
          
      # sort by creation time so that we can get blocks  
      entries.sort(compareFiles)
      
      
      # Let's find the blocks
      blocks = []
      block = []
      next = entries[0].getSizeInClusters(fs.clusterSize * 512)
      currentCluster = entries[0].getClusterNum()
      next = currentCluster + next
      
      # Start the block off ...
      block.append(entries[0])

      for entry in entries[1:]:   # entries[0] handled separately
         # . and .. are virtual entries, don't contribute to blocks, so ignore
         if entry.name not in ["." + 10 * " ", ".." + 9 * " "]:
            # Check if this entry follows on from the previous
            #  ... a zero size file fits anywhere
            #  ... or snug fit: entry starts after next
            if next <> entry.getClusterNum():
               # Doesn't follow neatly ... need a new block 
               blocks.append(block) # ... first finish this one
               block = [] # ... create the new block
            block.append(entry)

            if entry.getClusterNum() <> 0:
               # Why might this happen?? If an empty file?
               currentCluster = entry.getClusterNum()

            next = currentCluster + entry.getSizeInClusters(fs.clusterSize * 512)
            
      return blocks
      
   def findRootDirs(self, fs):
      # Get the directory clusters
      dirclusters = self.findDirectoryClusters(fs)
      rootDirs = []
      for c in dirclusters:
         cluster = self.readCluster(c, fs)
         #
         #  Maybe should consider cluster 0 a root directory.
         #  Add it in later ... retro fit it if it points to other fs entries.
         #
         #if (self.startDir(cluster) and self.validCluster(c, fs)) or c == 2:
         if self.startDir(cluster) and self.validCluster(c, fs):
            chain = [ c ]
            rootDirs.append(chain)
            if not self.full(cluster):
               chain.append(-1)
               
      return rootDirs
      
   #
   #  This technique basically uses the fact that we know the time (probably) that a cluster was added
   #  to a directory. We then have the last allocated cluster and assume that we are added to the same
   #  directory.
   #     A better technique would be to [1] first use all parent directories to know which clusters belong
   #  to a given directory. [2a] Then use LFN chksums to join them up.
   #  [2b] Then use LFN entries to join other directories (only join if unique matching clusters)
   #  [3] Then use this create technique to add any unfound entries where they fit.
   #  [4] Finally use .doc files metadata to guess where directories go.
   #
   def joinDirectoryClusters(self, rootDirs, blocks, fs):
      chains = {}

      #
      #  Now using the blocks, I want to match up directories.
      #
      for i in range(1, len(blocks)):
         prev = blocks[i-1]
         next = blocks[i]
         if next[0].cluster() - prev[-1].getNextFreeCluster(fs.clusterSize * 512) == 1:
            # Candidate for joining up
            # Space for a cluster. Often joined up when a directory is extended by one cluster.
            # next.offset[0] is the directory cluster which this file belongs to ...
            if next[0].offset[0] == prev[-1].getNextFreeCluster(fs.clusterSize * 512):
               # ... it matches the block
               # Can join these blocks
               if debug: print prev[-1].offset[0], "extended by", next[0].offset[0]
               
               # remember this chain so we can connect it next time.
               chains[prev[-1].offset[0]] = next[0].offset[0]
            else:
               if debug:
                  print "Cannot extend"
                  print "\t", prev[-1] 
                  print "\t", next[0] 
      
      for r in rootDirs:
         end = r[-1]
         if end <> -1:
            while end in chains:
               end = chains[end]
               r.append(end)
               if not self.full(self.readCluster(end, fs)):
                  r.append(-1)
                  break
   
      return rootDirs   # Don't really need to return this, because we have joined them in place.
      

   def printRootDirs(self, rootDirs):
      for r in rootDirs:
         print r
      
   def validateDirectoryClusterChains(self, rootDirs, fs):
      #
      #  Now we check directory cluster chains to ensure that they match from a LFN point of view.
      #  A sequence of clusters representing a directory must match LFNs.
      #  We use the matchFollowing method to check if the last entry of a cluster matches the first of the
      #  following cluster.
      #            
      misMatches = 0
      for r in rootDirs:
         if debug: print r
         # Print entries across clusters to see if we are matching
         for i in range(1, len(r)):
            if r[i] <> -1:
               c1 = self.readCluster(r[i - 1], fs)
               c2 = self.readCluster(r[i], fs)
               e1 = FATDE.FATDE(c1[-0x20:]) # last entry of previous
               e2 = FATDE.FATDE(c2[:0x20]) # first of second
               match = e1.matchFollowing(e2)
               if match < 0:
                  misMatches += 1
                  if debug:
                     print "\tOoops, non matching clusters for c[", (i-1), "] in dir sequence" 
                     print "\t", e1, " [* " + str(match) + " *]"
                     print "\t", e2
                     print "\t###"
                     
      return misMatches
      
   def getNumClusters(self, rootDirs):
      numClusters = 0
      for r in rootDirs:
         for c in r:
            if c <> -1: 
               numClusters += 1
                     
      return numClusters
   
   #
   #  Recursively update all directory entries, so they point to the actual entry.
   #  Now if I find an entry ... a FATFile, I get the FATDE entry, but I also get
   #  the Directory with the children so I can follow the chain.
   #   
   # Isn't going to work, because getChildren builds up the list every time.
   def update(self, directory, directories):
      for child in directory.getChildren():
         # Need to check that it's not done already
         if child.isDirectory():
            cluster = child.getCluster()
            if cluster in directories:
               childDir = directories[cluster]
               child.directory = childDir
               print child.getName(), child.directory, len(child.directory.getChildren())
            else:
               print str(cluster) + " not in directories!!" 
            #self.update(childDir, directories)

   #
   #  Using this can find the directory given the cluster number.
   #
   def getDirectories(self, image, fs, rootDirs):
      # From the rootDIr, create an actual FATdirectory
      # Store directory entries
      clusterDirs = {}
      for r in rootDirs:
         cluster = r[0]
         directory = FATDirectory.FATDirectory(image, fs, r)
         if cluster in clusterDirs:
            print "\t*** Bugger, more than one directory with cluster: ", cluster
         else:
            clusterDirs[cluster] = directory
            
      return clusterDirs
   
   #
   #  Finds orphans ... directories who have lost their parent directories
   #
   def findOrphans(self, directories):
      #
      #  So we have a load of directory chains. Now we need to establish parenthood.
      #     or lack thereof
      #
      orphans = []
      for d in directories.values():
         parent = d.getParent()
         if debug: print parent, d
         if not parent in directories:
            # This directory doesn't really exist. Probably overwritten or else was the root directory which has no parent.
            orphans.append(d.getCluster())            

      return orphans
      
   def checkUnaccountedDirectories(self, rootDirs, fs):
      #
      #  Run through any unaccount directory clusters FIXME: Gotta follow this through
      #
      accounted = []
      for r in rootDirs:
         for c in r:
            accounted.append(c)

      #
      #  Examine any unaccountfor clusters ...
      #
      dirclusters = self.findDirectoryClusters(fs)
#      if debug:  
      if True:  
         print set(dirclusters) - set(accounted)
         
         for c in [113, 21, 123]:
            print "+++", c, "+++"
            
            entries = self.getClusterEntries(c, fs)
            for entry in entries:
               print entry
      
         for c in set(dirclusters) - set(accounted):
            if c <> -1:
               print "+++", c, "+++"
               
               entries = self.getClusterEntries(c, fs)
               for entry in entries:
                  if not entry.isValid():
                     print "**\t",
                  print entry

   #
   #  Find the file systems and initialise all the directory structures.
   #
   def getFileSystems(self):
      # Find any file systems
      fileSystems = self.findFileSystems()
      if debug: print fileSystems
      if debug: printStats(fileSystems, self)
      
      # Now initialise the file systems so they are usable
      for fs in fileSystems:
         #  Remember the image.
         fs.image = self
         # This searchs thru all the entries to locate contiguous blocks
         #  Blocks can be used to identify non fragmented clusters.
         blocks = self.findBlocks(fs)
         
         # Find all start of directories ('.'., '..' as first entries)
         rootDirs = self.findRootDirs(fs)
         
         self.joinDirectoryClusters(rootDirs, blocks, fs)
         #for r in rootDirs:
         #   print r
         
         numMismatches = self.validateDirectoryClusterChains(rootDirs, fs)
         if numMismatches > 0:
            print "Ooops, LFN entries mismatch in directory cluster chains."
         
         print "Num valid clusters ", self.getNumClusters(rootDirs)

         directories = self.getDirectories(self, fs, rootDirs)
         
         orphans = self.findOrphans(directories)
         for c in orphans:
            fs.rootDirs.append(directories[c])
            
         # remember the directories in the file system
         fs.directories = directories
            
         # Give names to the root directories
         # Set up directory names
         for i in range(len(fs.rootDirs)):
            clusterNum = fs.rootDirs[i].getCluster()
            fs.rootDirs[i].name = "C" + str(clusterNum)
         
         #print "FS Root dirs"
         #for d in fs.rootDirs:
         #   print d,
         #print
         
         if debug:
            for d in directories.values():
               print d.getParent(), d.clusters
         
         #self.checkUnaccountedDirectories(rootDirs, fs)
      # now set the names for the file systems
      for i in range(len(fileSystems)):
         fileSystems[i].name = "FS" + str(i+1)
         
      return fileSystems

   #
   #  get all sub directories.
   #     returns subdir such that subdir[parentClusterNum] returns the cluster
   #     nums of all sub directories of parentClusterNum
   #
   def getSubDirs(self, fs, dirclusters):
      entries = self.getEntries(dirclusters, fs)

      # Find all directories
      dirs = filter(lambda x: x.isDirectory(), entries)
      print "len(dirs)", len(dirs)

      # Find all directories which refer to parents.
      parents = filter(lambda x: x.isDotDot(), dirs)
      print "len(parents)", len(parents)
      
      # Add all these clusters to the parent
      sameParents = {}
      for p in parents:
         # Also check that p.creation time == p.getClusterNum().creationTime()
         sameParents.setdefault(p.getClusterNum(), set()).add(p.getHoldingCluster())
         
      return sameParents # really a dictionary of sub directories
      
   # The main problem with rebuilding a FAT file system is reconstructing directory structures.
   # Here we use subdirectory entries and match them to parent entries
   def joinDirs(self, fs, dirClusters):
      # using the sub directories, find all the clusters which belong to a particular directory.
      entries = self.getEntries(dirClusters, fs)
      dirs = filter(lambda d: d.isDirectory(), entries)
      ordinaryDirs = filter(lambda d: not d.isDotted(), dirs)

      parentDirs = filter(lambda x: x.isDotDot(), dirs)
      parents = {}
      for d in parentDirs:
         parents[d.getHoldingCluster()] = d.getClusterNum()
      print "parent keys = ", sorted(parents.keys())
      
      print "Ord dirs = ", sorted([d.getClusterNum() for d in ordinaryDirs])

      for d in ordinaryDirs:
         print d.getClusterNum(),
         if d.getClusterNum() in parents.keys():
            print parents[d.getClusterNum()]
         else:
            print " not in parents!"

      print "len(ordinaryDirs) = ", len(ordinaryDirs), ", len(parents) = ", len(parents)
      #for d in ordinaryDirs:
      #   print "\t** :" + str(d) + ": **"
      
         
      return clusterSets

def main(args):
   # Initialise the image
   image = Image(args[1])
   
   fileSystems = image.getFileSystems()
   
   # Choose just one file system
   fs = fileSystems[0]

   dirClusters = image.findDirectoryClusters(fs)
   entries = image.getEntries(dirClusters, fs)
   dirEntries = filter(lambda e: e.isDirectory(), entries)
   ordinaryDirEntries = filter(lambda e: not e.isDotted(), dirEntries)
   
   print "len(ordinaryDirEntries) =", len(ordinaryDirEntries)
   
   ordinaryDirs = [e.getClusterNum() for e in ordinaryDirEntries]
   
   dirClusters = {}
   valid = 0
   notValid = 0
   notParent = 0
   notSame = 0
   for d in ordinaryDirEntries:
      # This should be 
      cluster = image.readCluster(d.getClusterNum(), fs)
      first = FATDE.FATDE(cluster[0:0x20])
      if not first.isValid():
         print " not pointing to a valid directory entry."
         print "\t", d
         print
         notValid += 1
         continue
      if not first.isDot():
         print " not a parent directory entry"
         print "\t", d
         notParent += 1
         continue
      parent = FATDE.FATDE(cluster[0x20:0x40])
      valid += 1
      if d.getHoldingCluster() == parent.getClusterNum():
         print "** Valid **"
      else:
         print "Valid and not in parent cluster!"
         print "\tp = ", parent.getClusterNum(), "\tHolding = ", d.getHoldingCluster()
         notSame += 1
      print "\t", hex(d.getHoldingCluster()), d
      print "\t", hex(parent.getClusterNum()), parent
      
      dirClusters.setdefault(parent.getClusterNum(), set()).add(d.getHoldingCluster())

      
   print
   print "valid = ", valid, "notValid = ", notValid, "notParent = ", notParent, "notSame = ", notSame
   
   for d in sorted(dirClusters.keys()):
      clusters = dirClusters[d]
      print d, sorted(list(clusters))

         
# Then starting with newer file systems, mark clusters, mark physical disk as done.
# Then onto older ones, noting that parts of the disk have been overwritten by the newer file systems.
      
if __name__ == '__main__':
   debug = False
   main(sys.argv)

