#!/usr/bin/python
# coding=UTF-8

# code conventions
#  global variables: only use "global" keyword if writing to it

# Bugs and features
#  bug: doesn't copy all files (could be a filename problem with certain characters: console encoding differs from filesystem encoding)
#    hint: os.path.supports_unicode_filenames is True if unicode filenames are supported
#    hint: sys.getdefaultencoding() returns Unicode encoding schema (or ascii (Win))
#    hint: sys.getfilesystemencoding() returns encoding of file system. None means sometimes "default"; windows uses "mbcs" to/from unicode
#    hint: sys.stdout.encoding is 'mbcs' under Windows
#  implement "--run" option that actually performs created actions
#   add "ignore dups (in tree)" option to only backup (or remove) - is already the case if file not on target (ignoring duplicate in source, but should at least issue a warning)
#   check bidirectionality: add option to only work one direction (copy to or remove in target, or work both ways)
# TODO limit number of simultaneous file handles in all places
# TODO add minimum file size to check (ignore below, including zero length)
# TODO remove option fast (make default? why)
# TODO replace gethash by gethashes -> allows for parallel processing
# TODO find cause of double slashes that occurs sometimes
# TODO show unexpected differences for files of same name and location (don't simply copy, copy the larger file?)
# TODO add means to let hash calculate on remote host (per pp or similar)
# TODO if location and file name are same, assume same contents by not calculating hash (if using --fast option)
# TODO more pending and important todos are scattered over this file
# TODO better handling of empty files: currently causing problems, could be sped up, too
# TODO option to keep only one copy in source/target (partly already in default implementation), with shorter name, cleaner name or what ever pattern
# TODO in uncertain cases, use modification date for determination of original source (younger better)
# TODO whole DIR topic:
#   TODO what to do if whole folder is missing? Currently only file/bucket based. But could have extra action for whole missing subtrees.
#   TODO handle directory renames by recognizing same dir contents (e.g. by hash of hashes), better: find similar directories by mostly matching contents (plus/minus some files missing)
#   TODO what if whole folder renamed, but all contents are the same? fast option would check only same names and sizes and assume equal
# Fact: Default is copy to target, but move to source

import sys; import os; import hashlib; import time; import types; import re; import math

# for entry indexing, we sh/could use a class with fields instead
SIZE = 0; NAME = 1; CDIR = 2; TIME = 3; SOURCE = 4; BASE = 5;
FIRST = 0; HASH = 1 # for key indexing
BYTES = 256 # number of bytes to read at beginning, middle or end of files


def escapeLinux(string):
    ''' Escape a file name for linux file systems '''
#    while '//' in string:
#        string = string.replace('//', '/')
    return string.replace(u'!', u'\\!').replace(u' ', u'\\ ').replace(u'(', u'\\(').replace(u')', u'\\)').replace(u';', u'\\;').replace(u']', u'\\]').replace(u'[', u'\\[')

def join(*path):
    ''' Join path elements in an operating-system-aware manner.
        >>> print join('D:','a','b') # doctest: +ELLIPSIS
        D:...a...b
    '''
    if osys == None:
        return os.path.join(*path)
    elif osys == 'lin':
        return u'/'.join([x for x in path if x != u''])
    elif osys == 'win':
        return u'\\'.join([x for x in path if x != u''])
    raise Exception("Cannot join path elements for this type of OS (%s)" % osys)

def allTrue(l):
    ''' Helper function to easy pre- and post condition testing.
        >>> print allTrue([True, True, True])
        True
    '''
    return reduce(lambda a,b: a and b, l, True)

class Progress(object):
    ''' Class to show an animated progress indicator.
        steps = show x/n indicator
        percent = show percentage indicator
        text = show static or mutable text (set in increment or set)
        fd = output channel
        animPercDiv: Number of increments per percentage (half turn of indicator). Default: n/200 = 1 full turn per percent # TODO
    '''
    _animations = { ' ': '/', '/': '-', '-': '\\', '\\': '|', '|': '/' }
    def __init__(self, min, max, steps = False, percent = True, progress = True, text = None, progchar = '#', progspace = '.', fd = sys.stdout, animPercDiv = None):
        self._min = min
        self._max = max
        self._step = steps
        self._perc = percent
        self._text = text
        self._prog = progress
        self._char = progchar
        self._space = progspace
        self._fd = fd
        
        self._spread = max - min + 1
        self._val = min
        self._digs = math.floor(math.log(max, 10)) + 1
        
        self._animate = ' '
        
    def increment(self, byValue = 1, text = None):
#        self._animate = ' '
        self._val += byValue
        if text != None:
            self._text = text
    
    def set(self, value, text = None):
        self._animate = ' '
        self._val = value
        if text != None:
            self._text = text
    
    def animate(self, char = None):
        if char != None:
            self.draw(animate = char)
        else:
            self._animate = Progress._animations[self._animate]
            self.draw(animate = self._animate)
    
    def draw(self, animate = None):
        if animate == None:
            animate = self._space
        p = float(self._val) * 100. / float(self._spread)
        x = math.floor(p / 2.)
        p = "%3.f%% " % p
        s = ("[%%%d.f/%%%d.f] " % (self._digs, self._digs)) % (self._val, self._max)
        x = (self._char * int(x)) + (self._space * (50 - int(x)))
        r = "                \r"
        if self._text != None:
            r = animate + re.sub(r'\W', '', self._text) + r
        if self._prog:
            r = x + r
        if self._perc:
            r = p + r
        if self._step:
            r = s + r
        self._fd.write(unicode(r).encode(sys.stdout.encoding))
        self._fd.flush()

def getkey(entry):
    ''' This is the default implementation to allow for having different kinds of bucket keys.
        With the option --obey-names this gets monkeypatched by namekey().
    '''
    return entry[SIZE]

def namekey(entry):
    ''' This is the monkey patch function used when the option --obey-names is used. '''
    return (entry[SIZE], entry[NAME], ) # more complex key

def crawl(base, source = True, stopDirFirst = [u'.', u'~'], stopDirNames = [u'$RECYCLE.BIN', u'._.DS_Store', u'._MacOS', u'._Resources', u'_xhtml']): # TODO sure to ignore all these?
    ''' This crawls the directory given, starting with its immediate contents and recursing down.
        This might not be the most efficient algorithm, but it's simple and works reliably.
        >>> c = crawl('./test', source = False)
        >>> #  Crawling "./test" # only stderr is captured by doctest
        >>> b = [x for x in c if x[NAME] == 'a']
        >>> print len(b)
        1
        >>> d = b[0]
        >>> print d[SIZE],d[NAME],d[CDIR],d[SOURCE],d[BASE]
        2 a  False ./test
        >>> d = sorted([x[NAME] for x in c])
        >>> for x in d: print x,
        a b c e
    '''
    print >> sys.stderr, '  Crawling "' + base.encode(sys.stderr.encoding) + '"'
    sys.stderr.flush()
    ret = []
    for curdir, dirnames, filenames in os.walk((base + ossep[osys]).encode(sys.getfilesystemencoding()), topdown=True, followlinks=symlinks): # sep necessary for Windows C:\ D:\
        curdir = curdir.decode(sys.getfilesystemencoding())
        dirnames = [d.decode(sys.getfilesystemencoding()) for d in dirnames]
        filenames = [f.decode(sys.getfilesystemencoding()) for f in filenames]
        for i in range(len(dirnames), 1, -1): # handles all but the first entry
            if dirnames[i - 1][0] in stopDirFirst or dirnames[i - 1] in stopDirNames: # this trick allows concurrent modification of recursion dirs
                del dirnames[i - 1] # remove all entries starting with "."
        if len(dirnames) > 0: # special handling of first entry
            if dirnames[0][0] in stopDirFirst or dirnames[0] in stopDirNames:
                del dirnames[0]
        for fname in filenames: # create entries for all files
            name = os.path.join(curdir, fname)
            try:
                s = os.stat(name)
            except:
                print >> sys.stderr, ("Error querying file '%s'" % name).encode(sys.stderr.encoding)
                continue
            size = s.st_size
            time = s.st_mtime
            if not curdir.startswith(base):
                raise Exception("File path should start with base dir")
            entry = (size, fname, curdir[len(base) + 1:], time, source, base, ) # this is the entry format
            ret.append(entry)
    return ret
        

def combine(rawdata):
    ''' Sorts all files (from one or several file trees) into buckets of filesize (or size plus name).
    >>> c = combine([[(1, 'a', '', 23., True, '1')], [(2, 'b', '', 34., False, '2')]])
    >>> print len(c)
    2
    >>> for x in sorted(c.keys()): print x,
    1 2
    '''
    ret = {}
    for data in rawdata: # for each filetree
        for entry in data: # for each file entry
            key = getkey(entry)
            if key not in ret:
                ret[key] = [] # create new bucket
            ret[key].append(entry)
    return ret

def fullName(entry, target = None):
    ''' Helper to restore the absolute location of a file.
        >>> print fullName((1, 'a', './test/sub', 34., False, '~/svn/test', )) # doctest: +ELLIPSIS
        ~...svn...test...test...sub...a
    '''
    if osys == 'lin':
        base = u'/'.join(entry[BASE].split(u"\\")) # TODO split by any separator
        cdir = u'/'.join(entry[CDIR].split(u"\\"))
        name = u'/'.join(entry[NAME].split(u"\\"))
    elif osys == 'win':
        base = u"\\".join(entry[BASE].split(u'/')) # TODO split by any separator
        cdir = u"\\".join(entry[CDIR].split(u'/'))
        name = u"\\".join(entry[NAME].split(u'/'))
    elif osys == None:
        base = entry[BASE]
        cdir = entry[CDIR]
        name = entry[NAME]
    else:
        raise Exception("Unknown OS type")
    if target == None:
        return join(base, cdir, name)
    return join(target, cdir, name)

def fullDir(entry, target = None):
    ''' Helper to restore the absolute location of a file.
        >>> print fullName((1, 'a', './test/sub', 34., False, '~/svn/test', )) # doctest: +ELLIPSIS
        ~...svn...test...test...sub...a
    '''
    global osys
    if osys == 'lin':
        base = u'/'.join(entry[BASE].split(u"\\")) # TODO split by any separator
        cdir = (u'/'.join(entry[CDIR].split(u"\\")))
    elif osys == "win":
        base = u"\\".join(entry[BASE].split(u'/')) # TODO split by any separator
        cdir = (u"\\".join(entry[CDIR].split(u'/')))
    elif osys == None:
        base = entry[BASE]
        cdir = entry[CDIR]
    else:
        raise Exception("Unknown OS type")
    if target == None:
        return join(base, cdir)
    return join(target, cdir)

    
def getHash(entry, bufsize = 1 << 22): # 4MB chunks
    ''' Helper function to calculate a hashsum for the whole file contents.
        >>> print getHash((2, 'a', '', 34.34, True, './test', ))
        60b725f10c9c85c70d97880dfe8191b3
    '''
    name = fullName(entry)
    md5 = hashlib.md5() # new digester
    try:
        size = os.stat(name).st_size
        fd = open(name.encode(sys.getfilesystemencoding()), 'rb')
    except:
        return None
    if size > (1 << 24):# > 16MB: starting showing progress bar
        print >> sys.stderr, "\n",
        p = Progress(0, size, text = entry[NAME], fd = sys.stderr) # create progress indicator
        p.draw()
    while True:
        buffer = fd.read(bufsize)
        if buffer:
            md5.update(buffer)
            try:
                p.increment(len(buffer))
                p.animate()
            except:
                pass
        else:
            break
    fd.close()
    try:
        p.set(size)
    except:
        pass
    return md5.hexdigest()

def getPartial(fd, size, part, bytes = BYTES):
    ''' Helper to read one part of the file, returning a hex version of it.
        Part is either 0 (all), 1 (beginning), 2 (middle), 3 (end) of file.
        The helper assumes that fd is opened and for mode 1 no read or seek has been done yet.
        >>> fd = open('test3/a', 'rb')
        >>> print getPartial(fd, 100, 1, bytes=8) # manual override of default number of bytes
        3132333435363738
        >>> print getPartial(fd, 100, 2, bytes=4)
        39303132
        >>> print getPartial(fd, 100, 3, bytes=8)
        3334353637383930
        >>> fd.close()
        >>> fd = open('test3/b', 'rb')
        >>> print getPartial(fd, 20, 0) # 5F = "_", 0A = EOL
        5F313233343536373831323334313233343536370A
        >>> fd.close()
    '''
    if part == 0:
        s = '_' + fd.read() # gets special marker so that likelihood incidental mixup is reduced
    elif part == 1:
        s = fd.read(bytes)
    elif part == 2:
        fd.seek((size / 2) - (bytes / 2), 0) # '0' means beginning ('1' is relative to current location)
        s = fd.read(bytes)
    elif part == 3:
        fd.seek(-bytes, 2) # '2' means seek from end
        s = fd.read(bytes)
    return ''.join(["%02X" % ord(x) for x in s]) # TODO for comparison and hash building, this isn't really necessary, probably remove for speed up later

def getPartials(entries):
    ''' Helper function to obtain partial file contents from head and tail for quick comparsion.
        Input: list of file entries of a size (or size+name) bucket
        Output: pair(map of unique files from partial -> file entry, map of duplicates from partial -> list of possible duplicate file entries)
        Test scenario 1: differing in different positions
        >>> x = getPartials([(20, 'b', '', 0, True,  'test3'), (20, 'c', '', 0, True, 'test3'), (20, 'd', '', 0, True, 'test3')])
        >>> print len(x), len(x[0]), len(x[1]) # 3 elements
        3 3 0
        
        Test scenario 2: Same files
        >>> x = getPartials([(20, 'b', '', 0, True,  'test3'), (20, 'c', '', 0, True, 'test3'), (20, 'b', '', 0, True, 'test3')])
        >>> print len(x), len(x[0]), len(x[1]), len(x[1].values()[0]) # 3 elements
        3 1 1 2
    '''
    fds = {}
    sizes = {}
    smalls = {} # for small files, if they are unique here, they really are!
    errors = []
    for entry in entries:        
        name = fullName(entry)
        try:
            s = os.stat(name)
            size = s.st_size
        except:
            print >> sys.stderr, ("Error querying file '%s'" % name).encode(sys.stderr.encoding)
            errors.append(("Error querying file '%s'" % name).encode(sys.stderr.encoding))
            continue

        if size == 0 or size <= ignore_size: # special treatment is necesssary here!
            if '' not in smalls:
                smalls[''] = []
            smalls[''].append(entry)
            continue
        try:
            fd = open(name.encode(sys.getfilesystemencoding()), 'rb') # TODO don't open more than maxHandles file at once
        except:
            errors.append(name.encode(sys.stderr.encoding)) # TODO what happens to "errors"?
            continue
        if size <= 3 * BYTES: # safety check to speed up small file handling
            partial = getPartial(fd, size, 0) # create partial contents from whole file
            fd.close() # no further filtering necessary in this routine
            if partial not in smalls.keys():
                smalls[partial] = []
            smalls[partial].append(entry)
        else: # all larger files are stored for more intelligent processing
            fds[entry] = fd # store file descriptor for first round of reading
            sizes[fd] = size # store size in order not to need to stat again (TODO is it really THAT expensive? Need to confirm)
    # first round: check file beginnings of all larger files
    hexs = {}
    for entry, fd in fds.items():
        partial = getPartial(fd, sizes[fd], part = 1) # partial data from beginning
        if partial not in hexs.keys():
            hexs[partial] = []
        hexs[partial].append((fd, entry, ))
    # second round: check file middles
    unique = {} # for already known to be unique files
    hex2 = {}
    for old, entries in hexs.items(): # old partial and list of (fd, entry)
        if len(entries) == 1:
            (fd, entry) = entries[FIRST]
            unique[old] = entry  # store unique entry
            fd.close()
            del fds[entry]
            del sizes[fd]
            continue # no further investigation needed for this entry
        for (fd, entry) in entries: # more than one candidates left
            partial = old + getPartial(fd, sizes[fd], 2) # partial from middle of file
            if partial not in hex2.keys():
                hex2[partial] = []
            hex2[partial].append((fd, entry, ))
    # third round: check file endings
    hex3 = {}
    for old, entries in hex2.items():
        if len(entries) == 1:
            (fd, entry) = entries[FIRST]
            unique[old] = entry # store unique entry
            fd.close()
            del fds[entry]
            del sizes[fd]
            continue # no further investigation needed for this entry
        for (fd, entry) in entries:
            partial = old + getPartial(fd, sizes[fd], 3) # partial from end
            if partial not in hex3.keys():
                hex3[partial] = []
            hex3[partial].append(entry) # add entry only (!)
    # now check if any duplicates are left
    buckets = {}
    for partial, entries in hex3.items():
        if len(entries) == 1:
            unique[partial] = entries[FIRST] # store unique entry
        else:
            buckets[partial] = entries
    # close all remaining descriptors
    for fd in fds.values():
        fd.close()
    # check 'smalls' for uniqueness
    for (key, entries) in smalls.items():
        if len(entries) == 1:
            unique[key] = entries[FIRST]
        else:
            if key not in buckets:
                buckets[key] = []
            buckets[key] += entries
    # Post conditions
    assert allTrue([type(x) == types.StringType for x in unique.keys()])
    assert allTrue([type(x) == types.TupleType for x in unique.values()])
    assert allTrue([type(x) == types.StringType for x in buckets.keys()])
    assert allTrue([type(x) == types.ListType and len(x) > 1 and type(x[0]) == types.TupleType for x in buckets.values()])
    return (unique, buckets, errors, )

def reducedups(bysize):
    ''' Find duplicates by checking partial file contents.
        Input: map of (key -> list of entries, where key is either file size or (filesize, filename) 
        Output: map ((key, partial) -> list of entries, where key is either file size or (filesize, filename)
        Test scenario 1: start of files are same and different
        >>> x = {20: [(20, 'b', '', 0, True, 'test3'), (20, 'c', '', 0, True, 'test3')]}
        >>> y = reducedups(x)
        >>> print len(y)
        2
        >>> # print sorted(y.keys())
        ((30, 'b'
    '''
    ret = {}
    for key, entries in bysize.items():
        if len(entries) == 1 or (len(entries) > 1 and (entries[FIRST][SIZE] == 0 or entries[FIRST][SIZE] <= ignore_size)): # no dups or file size is zero
            ret[(key, '')] = entries # just keep existing singular entries for later, key is enough for discrimination already
            continue
        (unique, buckets, errors, ) = getPartials(entries) # for size (or size+name) bucket, find more unique files if possible
        for partial, entry in unique.items():
            k = (key, partial, ) # create new combined key
            if k not in ret:
                ret[k] = []
            ret[k] += [entry]
        for partial, duplicates in buckets.items():
            k = (key, partial, ) # create new combined key
            if k not in ret:
                ret[k] = []
            ret[k] += duplicates
    # Post conditions
    assert allTrue([type(x) == types.TupleType and type(x[0]) in [types.IntType, types.LongType, types.TupleType] and type(x[HASH]) == types.StringType for x in ret.keys()])
    assert allTrue([type(x) == types.ListType and len(x) >= 1 and type(x[0]) == types.TupleType for x in ret.values()])
    return ret

def doMaxParallelComparison(files, maxParallel = 20, chunkSize = 1 << 20):
    ''' Compare at max n files parallely.
        doMaxParallelComparison([os.path.join('test', 'a'), os.path.join('test','b')])
    '''
    assert len(files) > 0
    assert allTrue([type(x) == types.StringType for x in files])
    tmp = {}
    for fd in [open(f.encode(sys.getfilesystemencoding()), 'rb') for f in files]: # init first pool - all are deemed duplicates
        tmp[fd] = None
    bucks = [tmp]
    singulars = [] # results
    duplicates = [] # results
    while True: # repeat until all singular or EOF
        newbucks = [] # next round of buckets 
        newbuck = {}
        closed = []
        for buck in bucks: # buckets of files that were assumed to be equal so far (size > 1)
            for fd in buck.keys(): # read next block
                buck[fd] = fd.read(chunkSize)
            entries = buck.items()
            # first round: contents differ
            for fd, buf in entries[1:]:
                if len(buf) < chunkSize:
                    fd.close()
                    closed.append(fd)
                if entries[0][1] != buf:
                    newbuck[fd] = buf
            for fd in newbuck.keys():
                del buck[fd] # remove files in new buck from old one
            newbucks.append(buck) # add old buck minus differing files to next block round
            newbucks.append(newbuck) # add new buck
        # now remove already closed files
        for i, buck in enumerate(newbucks.items()):
            c = []
            for fd in buck.keys():
                if fd in closed:
                    c.append(fd)
                    del newbucks[i][fd] # remove from old buck
                    fd.close()
            if len(c) > 1:
                duplicates.append(c)
            elif c == 1:
                singulars.append(c)
        # prepare next round of block reading
        bucks = [buck for buck in newbucks if len(buck) > 1]
        singular = [buck for buck in newbucks if len(buck) <= 1]
        singulars += singular
        if len(bucks) == 0:
            break
        return ([[names[fd] for fd in dups] for dups in duplicates], [names[fd] for fd in singulars], )

def doParallelComparison(files, maxParallel = 4, chunkSize = 1 << 20):
    ''' Parallely compare these files, using up to maxParallel file handles and always reading up to chunkSize bytes before switching to the next parallel file in turns.
        Input: list of absolute file names to compare
        Output: list of lists of duplicates (or lists with just a single file in it)
        
        performance considerations: open and seek is slow, alternating between files is slow but better than reopen and allows finding differences before whole file has been forcefully read, also number of handles is limited, and ram is limited        
    '''
    assert len(files) > 0
    assert allTrue([type(x) == types.StringType for x in files])
    
    toprocess = list(files) # copy of the list of all file names to process and not yet started
    md5s = {} # map file name -> md5sum concatenated
    closed = [] # contains file names of all files that have already ended
    
    while len(toprocess) > 0: # perform main loop until all files have been processed
        fds = {} # map file name -> file descriptor
        while len(toprocess) > 0 and len(fds) < maxParallel: # as long as there are untouched files, open as much files as possible but not more than maxParallel at the same time
            name = toprocess.remove(0) # get next file
            fd = open(name, 'rb') # open it
            fds[name] = fd # store file handle
        assert len(fds) <= maxParallel
        
        slots = [fds.keys()] # one slot with all files currently open, assuming all are same in the beginning, splitting up later
        while (True): # until all files of iteration have been read or been deemed different
#            names = reduce(lambda a,b:a + b, slots, []): # concatenate all files
            assert len(names) > 1 # otherwise seriously wrong
            buffers = {} # map file name -> contents, to read files' contents into for comparison
            for name in names: # fill buffers
                buffers[name] = fds[name].read(chunkSize)

            s = 0 # start with first (and only) slot
            while len(slots) > s: # until no more slots were created in inner loop
#                names = slots[s]: # now iterate over slots instead of all files at once
                s += 1 # next slot to look for
                assert len(names) > 0
                if len(names) < 2:
                    continue # TODO already singular -> don't consider this slot anymore until big comparison
                excludes = [] # stores names already put into other slots
                for i in range(len(names) - 1): # file comparison of chunks for all currently loaded files
                    newSlot= []
                    for j in range(i + 1, len(names)): # rest
                        if buffers[names[i]] == buffers[names[j]]: # pairwise comparison
                            newSlot.append(names[j]) # add to new slot (which contains everything not equal to names[i])
                            slots[i].remove(names[i]) # remove from original slot
                    if len(newSlot) > 0:
                        slots.append(newSlot) # append new slot
                        
                same = {} # stores file name -> list of same files in this comparison iteration step
                for i in range(len(names) - 1): # file comparison of chunks for all currently loaded files
                    if names[i] not in same: # can already have been added in inner loop (due to symmetry)
                        same[names[i]] = [] # per each file (but the last) create a slot of same chunks
                        md5s[names[i]] = '' # initiate hash collection
                    for j in range(i + 1, len(names)): # rest
                        if buffers[names[i]] == buffers[names[j]]: # pairwise comparison
                            same[names[i]].append(names[j]) # mark as same
                            if names[j] not in same:
                                same[names[j]] = [] # create symmetric entry
                                md5s[names[j]] = ''
                            same[names[j]].append(names[i]) # reverse mapping
                            m = md5.new()
                            m.update(buffers[names[i]])
                            h = m.hexdigest()
                            md5s[names[i]].append(h)
                            md5s[names[j]].append(h)
                        else: # buffers differ: split into two buckets
                            md5s[names[j]] = md5s[names[i]] + digesters[names[j]].hexdigest()
                            digesters[names[i]].update(buffers[names[i]])
                            md5s[names[i]].append(digesters[names[i]].hexdigest())
                # check for EOF, release file handles if yes
                closed = [] # list of all closed handles in this iteration
                for name, fd in fds.items():
                    if len(buffers[name]) < chunkSize: # must be end of file
                        fds[name].close() # release file handle early
                        closed.append(name)
                        del fds[name] # clean fd slot
                # all closed filenames in the same slot must be the equal, all unclosed must be at least one other file
                dups = [] # list of lists of duplicate files (not final, since maxParallel limits number of concurrent files)
                newSlots = [] # list of lists of duplicate files that have not been closed yet
                for name, others in same.items():
                    if name not in closed:
                        continue # this file is not finished 
                    dup = [name] # this file is definitely to be removed from chunk comparison due to EOF
                    newSlot = []
                    for other in others:
                        if other in closed:
                            dup.append(other) # closed and same content: duplicate
                        else:
                            newSlot.append(other)
                    dups.append(dup)
                    if len(newSlot) > 0:
                        newSlots.append(newSlot)
                for dup in dups:
                    del same[dup[0]] # remove the slot
              
        
def getHashes(first, second = None):
    ''' Calculate hashes for all given entries.
        If --concurrent is active, perform first and second in separate threads concurrently.
        The hashing is performed with several files at the same time in chunks (to avoid too many file handles and too many seeks).
        Input: one or two arguments, each a list of entries
        Output: list of lists of duplicates (no key needed anymore)
    '''
    if second != None:
        pass
        

def dups(reduced):
    ''' Find real duplicates by bytewise comparison of head, tail and hashing.
        Input: map((key, partial) -> list of entries (size 1 or bigger))
        Output: map((key, hash) -> list of entries  (size 1 or bigger))
        We use key[FIRST] here because this is the original key now, since reducedups added "part" as second part of pair.
    '''
    ret = {}
    for key, entries in reduced.items():
        if len(entries) == 1 or (len(entries) > 1 and (entries[FIRST][SIZE] == 0 or entries[FIRST][SIZE] <= ignore_size)):
            ret[(key[FIRST], '')] = entries # just keep singular entries for later
            continue
        if simplified and len(entries) == 2 and len([x for x in entries if x[SOURCE]]) == 1: # this is a speed up for cases where remote conncetion is slow and likeness is assumed high: if only one source file and same on target, assume equal
            if verbose: print >> sys.stderr, ("Skipping hash check for almost certain duplicate %s" % entries[FIRST][NAME]).encode(sys.stderr.encoding)
            k = (key[FIRST], 'simplified') # TODO is this always unique? should be!
            if k not in ret:
                ret[k] = []
            ret[k] += entries # assume as same without checking hash here
            continue
        for entry in entries: # TODO do both trees concurrently, if --concurrent is active, but this needs locking on the ret dict
            if verbose: print >> sys.stderr, ("Calculating hash for file %s" %  fullName(entry)).encode(sys.stderr.encoding)
            ahash = getHash(entry)
            k = (key[FIRST], ahash, )
            if k not in ret: # ignore hash part of key
                ret[k] = []
            ret[k].append(entry)
    # Post conditions
    assert allTrue([type(x) == types.TupleType and type(x[0]) in [types.IntType, types.LongType, types.TupleType] and type(x[HASH]) == types.StringType for x in ret.keys()])
    try:
        assert allTrue([type(x) == types.ListType and len(x) >= 1 and type(x[0]) == types.TupleType for x in ret.values()])
    except:
        print ["%s:%s" % (x, type(x)) for x in ret.values()]
    return ret

def showdups(dups, kind, source):
    ''' Give convenient info about duplicates. '''
    assert allTrue([allTrue([y[SOURCE] for y in x]) for x in dups.values()]) # Precondition: Only containing lists of source entries
    for key, entries in dups.items():
        p = True
        lastDir = '*'
        for entry in sorted(entries):
            if p:
                sys.stdout.write("\n")                
                if osys == None:
                    print (u'Bucket ' + kind + u' ' + unicode(key[HASH]) + ' size ' + unicode(key[SIZE])).encode(sys.stdout.encoding)
                p = False
            if entry[CDIR] != lastDir:
                print (u'  Folder "' + unicode(entry[CDIR]) + u'" name "' + unicode(entry[NAME]) + u'"').encode(sys.stdout.encoding)
                lastDir = entry[CDIR]
            else:
                print ((u' ' * (len(entry[CDIR]) + 11)) + u' name "' + unicode(entry[NAME]) + u'"').encode(sys.stdout.encoding)

def removesingulars(data):
    ''' Helper to remove and store non-duplicate entries.
        Input: map (key -> list of entries
        Output: <None>
        Side effects: Write entries to singularsource and singulartarget for later consideration of copy or delete actions
        >>> d = {1:[(1,2,3,4,True)], 2:[(2,3,4,5,True),(3,4,5,6,True)]}
        >>> removesingulars(d)
        >>> print len(singularsource)
        1
        >>> print singularsource[0]
        (1, 2, 3, 4, True)
        >>> print len(d.keys())
        1
    '''
    global singularsource; global singulartarget
    for key,entries in data.items():
        if len(entries) == 1:
            if entries[FIRST][SOURCE]:
                singularsource.append(entries[FIRST])
            else:
                singulartarget.append(entries[FIRST])
            del data[key]
    # Post conditions
    assert allTrue([type(x) == types.ListType and len(x) > 1 for x in data.values()])

def timeit(funct, outdic, *params):
    ''' Helper to time the duration of a method call.
        >>> x = timeit(time.sleep, None, 0.5)[1]
        >>> print x > 0.49 and x < 0.51
        True
    '''
    start = time.time()
    ret = funct(*params)
    duration = time.time() - start
    assert duration >= 0
    if outdic != None:
        outdic["return"] = ret
        outdic["duration"] = duration
    else:
        return (ret, duration, )

def findwronglocations(dups):
    ''' Main algorithm for smart syncing:
        Input: Map of key -> list of occurrences for duplicate (at least two entries) 
         * For every target location of a bucket entry check if there are source occurrences.
         * If nothing in target, mark as copy from source.
         * If one occurrence in target exists but it has a wrong location, mark as move.
         * If more than one occurrences in target exist but some are in unknown locations, mark as remove, keeping one.
         * If file is in correct location, check if renaming might be necessary.
         Test scenario 1: File needs copying from a to b,
         >>> # a = (1, 'a', 
    '''
    global copy_missing; global delete_missing
    copy = []; move = []; remo = []; rena = []; erro = [] # these contain actions, in contrast to singularsource and singulartarget, which contain states!
    
    for key, entries in dups.items(): # iterate over all buckets
        s = [x for x in entries if x[SOURCE]] # all source occurrences
        t = [x for x in entries if not x[SOURCE]] # all target occurrences
        scdir = set([x[CDIR] for x in s]) # all source occurrence dirs
        
        if len(t) == 0: # only in source
            # TODO add means to copy only one instance from source (but which of probably many?)
            copy += s # mark all source files for copying (duplicates in source stay duplicates in target!)
            
        elif len(t) == 1: # found one occurrence in target, therefore there must be at least one occurrence in source (since we consider only buckets of len >= 2)
            if t[0][CDIR] in scdir: # is already at correct location, only file name might differ  
                sn = [x[NAME] for x in s if x[CDIR] == t[0][CDIR]] # find all names of same file in the source directory
                if len(sn) > 1: # more than one source candidates in same source location
                    # TODO add means to specify default selection (similarity of name, alphabetic first, any, keep, rename in source...)
                    erro.append('Cannot decide which file candidate from "' + s[0][CDIR] + os.sep + '" is the base name for target file rename "' + t[0][CDIR] + os.sep + t[0][NAME] + '", candidates ' + str(sn))
                elif sn[0] != t[0][NAME]: # found exactly one file in source, but must be renamed
                    rena.append((sn[0], t[0], )) # only one target occurrence to take, (orig, existing)
                else:
                    pass # Name matches exactly, nothing to do
            else: # target is in wrong location (target location unknown in source occurrences)
                if len(s) > 1: # is there more than one source occurrence that could be right
                    # TODO check option that allows only one location in source (?). currently duplicates in source are assumed to be OK (but warned about)
                    if copy_missing in ['target','both']:
                        copy += s # mark all occurrences for copying, keeping the unknown one in target
                    elif delete_missing in ['target','both']:
                        remo += t # mark all occurrences for removal
                    else:
                        erro.append(u"Cannot decide which source location to consider as base for moving " + t[0][CDIR] + os.sep + t[0][NAME] + " candidates:" + str([x[CDIR] + "/" + x[NAME] for x in s]))
                    if copy_missing in ['source','both']:
                        copy += t # mark all occurrences for copying back to source
                    elif delete_missing in ['source','both']:
                        remo += s # mark all occurrences for removal in source
                    else:
                        erro.append("Cannot decide which source location to consider as base for moving " + t[0][CDIR] + os.sep + t[0][NAME] + " candidates:" + str([x[CDIR] + "/" + x[NAME] for x in s]))
                else: # there must be exactly one file in source, so there is a clear connection
                    # TODO option to decide about moving (because we have a clear connection) or copying (creating a duplicate on target, which might be undesirable)
                    move.append((s[0], t[0], )) # move target from its location to the one specified in the source
                    
        else: # more than one target entries for this file exist
            if len(s) == 0: # no copy of this duplicate is in source
                if copy_missing in ['source','both']:
                    # TODO option to use only on location instead of syncing all duplicates back
                    copy += t # mark all target occurrences for backsync
                elif delete_missing in ['target','both']: # cannot copy back and remove in target at the same time (!)
                    remo += t # mark occurrences for removal
                else:
                    erro.append(u"Warning: Duplicates in target not in source. Candidates:" + str([x[CDIR] + os.sep + x[NAME] for x in t]))
            else: # at least one source file exists for more than one target file
                sdelcands = [x for x in t if x[CDIR] not in scdir] # consider only target occurrences not in known source locations
                # TODO handle the other occurrences, too, if only one should remain (but how to specify which?)
                if copy_missing in ['source','both']: # TODO: handling of wrong names here and --obey-names option 
                    copy += sdelcands
                elif delete_missing in ['target','both']:
                    remo += sdelcands
                elif len(sdelcands) == len(t): # all target occurrences are in unknown/wrong locations regarding source
                    if copy_missing in ['target','both']:
                        copy += s # mark all source occurrences for copy (ignoring the fact, that this creates one more duplicate)
                    elif delete_missing in ['source','both']:
                        remo += s # mark all occurrences in source for removal (duplicates stay in target)
                    else:
                        erro.append(u'Duplicates in target with differing locations in source found, need option how to handle this. Target:' + unicode([x[CDIR] + os.sep + x[NAME] for x in t]) + " Source:" + unicode([x[CDIR] + os.sep + x[NAME] for x in s]))
                elif len(sdelcands) + 1 == len(t): # exactly one occurrence in source matches one of the ones in target
                    # TODO option to keep exactly the one match (clear connection is available, so remove rest)
                    # TODO what about renaming? makes problem harder or use "--force-rename" option
#                    copy += sdelcands
#                    else: # TODO add means to tell to keep only one file in source, removing rest
                    erro.append(u'Multiple occurrences in target match one source location, cannot decide what to do without further option') 
                elif len(sdelcands) < len(t): # some occurrences in source match the one in target, so be smart about it
                    # TODO option to keep exactly the one match (clear connection is available, so remove rest)
                    # TODO what about renaming? makes problem harder or use "--force-rename" option
#                    copy += sdelcands
#                    else: # TODO add means to tell to keep only one file in source, removing rest
                    names = "  Source:\n"
                    for ss in s: names += (u"    %s\n" % fullName(ss))
                    names += "  Target:\n"
                    for tt in t: names += (u"    %s\n" % fullName(tt))
                    erro.append(u'Multiple occurrences in both target and source found, unclear which source is base\n' + names)
    return (copy,move,remo,rena,erro,)

def showactions(ops):
    ''' Create batch file actions for the given operations, using the syntax for the operating system specified. '''
    copy, move, remo, rena, erro = ops
    print >> sys.stderr
    
    copytotarget = [x for x in copy if x[SOURCE]]
    copytosource = [x for x in copy if not x[SOURCE]]
    if len(copytotarget) > 0:
        if osys == None:
            print u"Copy missing file from source to target"
            for x in copytotarget: print fullName(x)
        elif osys == 'lin':
            for x in copytotarget:
                print (u'mkdir -p %s ; cp -n %s %s/' % (escapeLinux(fullDir(x, target)), escapeLinux(fullName(x)), escapeLinux(fullDir(x, target)))).encode(sys.stdout.encoding)
        elif osys == 'win':
            for x in copytotarget:
                print (u'@mkdir "%s" > nul' % fullDir(x, target)).encode(sys.stdout.encoding)
                print (u'@copy /b /y /z "%s" "%s" > nul' % (fullName(x), fullName(x, target))).encode(sys.stdout.encoding) # TODO link commandos unconditionally (not via &&)
    if len(copytosource) > 0:
        if osys == None:
            print u"Copy missing file back from target to source"
            for x in copytosource: print x
        elif osys == 'lin':
            for x in copytosource:
                print (u'mkdir -p %s ; cp -n %s %s/' % (escapeLinux(fullDir(x, source)), escapeLinux(fullName(x)), escapeLinux(fullDir(x, source)))).encode(sys.stdout.encoding)
        elif osys == 'win':
            for x in copytosource:
                print (u'@mkdir "%s" > nul' % fullDir(x, source)).encode(sys.stdout.encoding)
                print ('@copy /b /y /z "%s" "%s" > nul' % (fullName(x), fullName(x, source))).encode(sys.stdout.encoding)
    moveintarget = [x for x in move if x[0][SOURCE]]
    moveinsource = [x for x in move if x[1][SOURCE]]
    if len(moveintarget) > 0:
        if osys == None:
            print u"Move file within target file tree"# TODO what if target file already exists?
            for x in moveintarget: print (u'  "%s" to "%s"' % (x[1][CDIR] + os.sep + x[1][NAME], x[0][CDIR] + os.sep + x[0][NAME])).encode(sys.stdout.encoding)
        elif osys == 'lin':
            for x in moveintarget: print (u'  mkdir -p %s ; cp -n %s %s/ && rm %s' % (escapeLinux(fullDir(x[0])), escapeLinux(fullName(x[1])), escapeLinux(fullDir(x[0])), escapeLinux(fullName(x[1])))).encode(sys.stdout.encoding)
        elif osys == 'win':
            for x in moveintarget: print (u'@  mkdir "%s" > nul' % fullDir(x[0])).encode(sys.stdout.encoding); print (u'@  copy /b /y /z "%s" "%s" && del "%s" > nul' % (fullName(x[1]), fullName(x[0]), fullName(x[1]))).encode(sys.stdout.encoding)
    if len(moveinsource) > 0:
        if osys == None:
            print u"Move file within source file tree" # TODO what if target file already exists? Add to main logic
            for x in moveinsource: print (u'  "%s" to "%s"' % (x[1][CDIR] + os.sep + x[1][NAME], x[0][CDIR] + os.sep + x[0][NAME])).encode(sys.stdout.encoding)
        elif osys == 'lin':
            for x in moveinsource: print (u'  mkdir -p %s ; cp -n %s %s/ && rm %s' % (escapeLinux(fullDir(x[0])), escapeLinux(fullName(x[1])), escapeLinux(fullDir(x[0])), escapeLinux(fullName(x[1])))).encode(sys.stdout.encoding)
        elif osys == 'win':
            for x in moveinsource: print (u'@  mkdir "%s" > nul' % fullDir(x[1])).encode(sys.stdout.encoding); print (u'@  copy /b /y /z "%s" "%s" && del "%s" > nul' % (fullName(x[1]), fullName(x[0]), fullName(x[1]))).encode(sys.stdout.encoding) # TODO check &&
    
    remointarget = [x for x in remo if not x[SOURCE]]
    remoinsource = [x for x in remo if x[SOURCE]]
    if len(remointarget) > 0:
        if osys == None:
            print u"Remove in target file tree"
            for x in remointarget: print x 
        elif osys == 'lin':
            for x in remointarget: print (u'rm %s' % escapeLinux(fullName(x))).encode(sys.stdout.encoding)
        elif osys == 'win':
            for x in remointarget: print (u'del "%s"' % fullName(x)).encode(sys.stdout.encoding)
    if len(remoinsource) > 0:
        if osys == None:
            print "Remove in source file tree"
            for x in remoinsource: print x
        elif osys == 'lin':
            for x in remoinsource: print (u'rm %s' % escapeLinux(fullName(x))).encode(sys.stdout.encoding)
        elif osys == 'win':
            for x in remoinsource: print (u'del /q "%s"' % fullName(x)).encode(sys.stdout.encoding)
        
    renaintarget = [x for x in rena if not x[1][SOURCE]] # arg 0 is new name, arg 1 is entry
    renainsource = [x for x in rena if x[1][SOURCE]]
    if len(renaintarget) > 0:
        if osys == None:
            print "Rename in target file tree"
            for x in renaintarget: print (u'  "%s" to "%s"' % (x[1][CDIR] + os.sep + x[1][NAME], x[FIRST])).encode(sys.stdout.encoding)
        elif osys == 'lin':
            for x in renaintarget: print (u'mv %s %s' % (escapeLinux(fullName(x[1])), escapeLinux(join(fullDir(x[1]), x[FIRST])))).encode(sys.stdout.encoding) # TODO check if FIRST is right
        elif osys == 'win':
            for x in renaintarget: print (u'move "%s" "%s"' % (fullName(x[1]), join(target, x[1][CDIR], x[FIRST]))).encode(sys.stdout.encoding)
    if len(renainsource) > 0:
        if osys == None:
            print u"Rename in source file tree"
            for x in renainsource: print (u'  "%s" to "%s"' % (x[1][CDIR] + os.sep + x[1][NAME], x[FIRST])).encode(sys.stdout.encoding)
        elif osys == 'lin':
            for x in renainsource: print (u'mv %s %s' % (escapeLinux(fullName(x[1])), escapeLinux(fullName(join(fullDir(x[1], x[FIRST])))))).encode(sys.stdout.encoding) # TODO check if FIRST is right
        elif osys == 'win':
            for x in renainsource: print (u'move "%s" "%s"' % (fullName(x[1]), join(source, x[1][CDIR], x[FIRST]))).encode(sys.stdout.encoding) # TODO shorter formulation?
    if len(erro) > 0:
        print >> sys.stderr, u"\nError messages"
        for x in erro: print >> sys.stderr, u'  ' + x

def showsingulars(singularsource, singulartarget):
    ''' Create batch file actions for the given operations, using the syntax for the operating system specified. '''
    global copy_missing; global delete_missing; global osys; global source; global target
    if (len (singularsource) + len(singulartarget)) > 0:
        print
    if len(singularsource) > 0:
        if copy_missing in ['target','both']:
            if osys == None:
                print u"Copy from source to target"
                for x in singularsource:
                    print (u'  file "' + x[CDIR] + os.sep + x[NAME] + u'"').encode(sys.stdout.encoding)
            elif osys == 'lin':
                for x in singularsource: print (u'mkdir -p %s ; cp -n %s %s/' % (escapeLinux(fullDir(x, target)), escapeLinux(fullName(x)), escapeLinux(fullDir(x, target)))).encode(sys.stdout.encoding)
            elif osys == 'win':
                for x in singularsource: print (u'@mkdir "%s" > nul' % fullDir(x, target)).encode(sys.stdout.encoding); print (u'@copy /b /y /z "%s" "%s" > nul' % (fullName(x), fullName(x, target))).encode(sys.stdout.encoding)
        elif delete_missing in ['source','both']: # TODO check logic
            if osys == None:
                print u"Removing from source"
                for x in singularsource:
                    print (u'  file "' + x[CDIR] + os.sep + x[NAME] + u'"').encode(sys.stdout.encoding)
            elif osys == 'lin':
                for x in singularsource: print (u'rm %s' % escapeLinux(fullName(x))).encode(sys.stdout.encoding)
            elif osys == 'win':
                for x in singularsource: print (u'del "%s"' % fullName(x)).encode(sys.stdout.encoding)
    if len(singulartarget) > 0:
        if copy_missing in ['source','both']:
            if osys == None:
                print u"Sync back to source or remove"
                for x in singulartarget:
                    print (u'  file "' + x[CDIR] + os.sep + x[NAME] + u'"').encode(sys.stdout.encoding)
            elif osys == 'lin':
                for x in singulartarget: print (u'mkdir -p %s ; cp -n %s %s/' % (escapeLinux(fullDir(x, target)), escapeLinux(fullName(x)), escapeLinux(fullDir(x, source)))).encode(sys.stdout.encoding)
            elif osys == 'win':
                for x in singulartarget: print (u'@mkdir "%s" > nul' % fullDir(x, source)).encode(sys.stdout.encoding); print (u'@copy /b /y /z "%s" "%s" > nul' % (fullName(x), fullName(x, source))).encode(sys.stdout.encoding)
        elif delete_missing in ['target','both']: # TODO check logic
            if osys == None:
                print u"Removing from target"
                for x in singulartarget:
                    print (u'  file "' + x[CDIR] + os.sep + x[NAME] + u'"').encode(sys.stdout.encoding)
            elif osys == 'lin':
                for x in singulartarget: print (u'rm %s' % escapeLinux(fullName(x))).encode(sys.stdout.encoding) # TODO in target?
            elif osys == 'win':
                for x in singulartarget: print (u'del "%s"' % fullName(x)).encode(sys.stdout.encoding) # TODO in target?

def test():
    ''' Perform unit tests contained in this file. '''
    import doctest
    g = globals()
    g.update({'singularsource':[],'singulartarget':[]})
    g.update({'osys':oss.get(os.name, 'win'), 'ossep':ossep, 'symlinks':False})
    doctest.testmod(globs=g)

def usage():
    ''' Show usage info. '''
    print 'Usage:                                                                             (C) 2012 Arne Bachmann'
    print '  syncter.py sourcedir           : Find duplicate files in a single file tree'
    print '  syncter.py sourcedir targetdir : Find wrong locations and deviating names between two file trees'
    print '\nGeneral options:'
    print '  --os=<OS>                 Specifies operating system for batch file creation'
    print '                            Allowed values: win,lin,mac,none. Default: Current operating system'
    print '  --obey-names              Considers files equal only if names match'
    print "  --ignore-symlinks         Don't recurse into symbolic link directories"
    print '  --ignore-size=<SIZE>      Ignores files with file size smaller than SIZE. Default: <unspecified>'
    print '                            Add a size indicator out of kb,Mb,Gb for Kilo-, Mega-, and Gigabytes'
    print '                            Add a size indicator out of Ki,Mi,Gi for Kibi-, Mebi-, and Gibibytes'
    print '  --file-ignore=<PATTERN>   A regular expression for files to ignore. Default: "\.class$"'
    print '  --dir-stop=<PATTERN>      A regular expression for directory names to not recurse into. Default: "^\.*"'
    print '  --fast                    Don\'t be so thorough in some cases, omit hash calculation sometimes'
    print '  --verbose                 Show more info on what is happening'
    print '  --self-test               Perform unit tests for this script'
    print '  --run                     Immediately run the created batch commands'
    print '\nSynchronisation options:'
    print '  --concurrent              Runs crawling of source and target file trees at the same time'
    print '  --copy-missing=<VALUE>    Determins what to do with files existing only in source or target'
    print '                            Allowed values:'
    print '                              target : copy files from source to target, if missing there'
    print '                              source : copy files from target back to source'
    print '                              both   : copy either direction, if missing on other side'
    print "                              none   : don't copy anything"
    print '                            Default: target'
    print '  --delete-missing=<VALUE>  Determines what to do with files missing in source or target'
    print '                            Allowed values:'
    print '                              source : if existing only in source tree, remove it there'
    print '                              target : if existing only in target tree, remove it there'
    print "                              none   : don't remove anything"
    print '                              both   : remove all files residing in only one tree'
    print '                            Default: none'
    print '                            Both parameters cannot have the same value' 
    print '\nWarning'
    print "  This tool doesn't use byte-by-byte comparison, so watch for false positives!"

if __name__ == '__main__':
    ''' Main script code. '''
    args = [x.decode(sys.stdin.encoding) for x in sys.argv[1:] if not x.startswith(u'-'.encode(sys.stdin.encoding))]
    opts = [x for x in sys.argv[1:] if x.startswith(u'-'.encode(sys.stdin.encoding))]
    if u'--help' in opts or u'-h' in opts:
        usage()
        sys.exit()
    if len(args) < 1 and u'--self-test' not in opts:
        print 'Missing command line argument.\n'
        usage()
        sys.exit()
        
    # Define defaults
    obey_names = False
    symlinks = True
    ignore_size = -1
    simplified = False
    verbose = False
    oss = {'posix':'lin', 'nt':'win', 'mac':'mac'}
    ossep = {'win':u'\\', 'lin':u'/', 'mac':u'/', None:u'/','none':None}
    osys = oss.get(os.name, None)
    
    # Parse general command line options
    recognized = []
    for x in opts:
        y = x.split(u"=")
        if len(y) > 1:
            opt = y[0]
            val = y[1]
        else:
            opt = y[0]
            val = None
        if opt == u'--os':
            if val in [u'lin',u'win',u'mac',u'none']:
                if val == 'none':
                    osys = None
                else:
                    osys = val
            else:
                raise Exception("Unsupported operating system specified")
            recognized.append(x)
        elif opt == u'--obey-names':
            getkey = namekey
            recognized.append(x)
        elif opt == u'--ignore-symlinks':
            symlinks = False
            recognized.append(x)
        elif opt == u'--ignore-size' and val:
            try:
                float(val)
                ignore_size = int(val)
            except:
                m = {'kb':1000, 'k':1000, 'mb':1000000, 'm':1000000, 'gb':1000000000L, 'g':1000000000L, 'ki':1024, 'mi':1048576, 'gi':1073741824L}
                for unit, value in m.items():
                    if val.endswith(unit):
                        v = float(val[:-len(unit)]); f = val[-len(unit):].lower()
                        ignore_size = int(v * m.get(f, 1))
                        recognized.append(x)
                        print (u'Ignoring file sizes of %d or below for comparison' % ignore_size).encode(sys.stdout.encoding)
                        break
        elif opt == u'--fast':
            simplified = True
            recognized.append(x)
        elif opt == u'--verbose':
            verbose = True
            recognized.append(x)
        elif opt == u'--self-test':
            test()
            sys.exit()
        elif opt == '--run':
            # TODO implement
            recognized.append(x)
                
    # Crawl source file tree
    singularsource = [] # This is the easy part: Non-duplicates only in one location
    singulartarget = []
    source = args[0]
    while source[-1] in [u'/', u'\\']: source = source[:-1]
    
    if len(args) > 1: # operation mode: find differing locations of duplication files between source and target
        # Define defaults for sync mode
        crawl_concurrently = False
        copy_missing = "target" # Default: copy missing files in target from source to target (but the other way around)
        delete_missing = "none" # Default: don't remove anything on either side
        
        # Parse command line options for sync mode
        for x in opts:
            y = x.split(u"=")
            if len(y) > 1:
                opt = y[0]
                val = y[1]
            else:
                opt = y[0]
                val = None
            if opt == u'--concurrent':
                crawl_concurrently = True
                recognized.append(x)
            elif opt == u'--copy-missing':
                if val in [u'target',u'source',u'both',u'none']:
                    copy_missing = val
                else:
                    raise Exception("Unsupported option for --copy-missing")
                recognized.append(x)
            elif opt == u'--delete-missing':
                if val in [u'source',u'target',u'none',u'both']:
                    delete_missing = val
                else:
                    raise Exception("Unsupported option for --delete-missing")
                recognized.append(x)
        if copy_missing == delete_missing and copy_missing != u'none':
            print (u'Options for --copy-missing and --delete-missing cannot both have the value %s' % copy_missing).encode(sys.stdout.encoding)
            sys.exit()
        for r in recognized:
            try:
                del opts[opts.index(r)]
            except:
                pass
        if len(opts) > 0:
            for u in opts:
                print (u"Unknown command line option found: %s" % u).encode(sys.stdout.encoding)
            sys.exit(1)
        
        # Crawl target file tree
        print >> sys.stderr, u"Processing data..."
        target = args[1]
        while target[-1] == ossep[osys]: target = target[:-1]
        import threading # only needed for dual tree mode
        sourceOutput = {}
        targetOutput = {}
        sourceThread = threading.Thread(target = timeit, name='Source Crawler', args = (crawl, sourceOutput, source, True))
        targetThread = threading.Thread(target = timeit, name='Target Crawler', args = (crawl, targetOutput, target, False))
        sourceThread.start() # start first thread
        if crawl_concurrently:
            targetThread.start() # start second thread concurrently
            sourceThread.join() # wait for both to have finished
            targetThread.join()
        else:
            sourceThread.join() # wait for completion of first thread
            targetThread.start() # start second thread
            targetThread.join() # wait for completion of second thread
        source_rawfiles = sourceOutput["return"]
        sourcecrawldur = sourceOutput["duration"]
        target_rawfiles = targetOutput["return"]
        targetcrawldur = targetOutput["duration"]
        print >> sys.stderr, (u"    (took %.1f minutes)" % (targetcrawldur / 60)).encode(sys.stderr.encoding);
        
        # Combine buckets of source and target
        combined = combine([source_rawfiles, target_rawfiles])
            
    else: # operation: find duplicate files in a single file tree
        for r in recognized:
            try:
                del opts[opts.index(r)]
            except:
                pass
        if len(opts) > 0:
            for u in opts:
                print >> sys.stderr, (u"Unknown command line option found: %s" % u).encode(sys.stderr.encoding)
            sys.exit(1)
        (source_rawfiles, sourcecrawldur, ) = timeit(crawl, None, source, True)
        print >> sys.stderr, u"    (took %.1f')" % (sourcecrawldur / 60)
        combined = combine([source_rawfiles])
    
    # Do main bucket handling
    print >> sys.stderr, (u'  Found %d distinct file sizes' % (len(combined))).encode(sys.stderr.encoding)
    
    # Remove non-dups from further consideration
    removesingulars(combined)
    print >> sys.stderr, (u'  Remaining %d potential duplicate size buckets' % (len(combined))).encode(sys.stderr.encoding)
    
    # Reduce number of duplicate candidates by checking partial file contents
    (reducedup, reducedur) = timeit(reducedups, None, combined)
    # Remove non-dups from further consideration
    print >> sys.stderr, (u"  Found %d potential duplicate partial content buckets (took %.1f')" % (len(reducedup), reducedur / 60)).encode(sys.stderr.encoding)
    removesingulars(reducedup)
    print >> sys.stderr, (u'  Remaining %d potential duplicate partial content buckets' % (len(reducedup))).encode(sys.stderr.encoding)
    
    # Reduce number of duplicate candidates by checking hash sums of whole file contents
    realdup, dupdur = timeit(dups, None, reducedup)
    # Remove non-dups from further consideration
    print >> sys.stderr, (u"  Found %d duplicate hash buckets (took %.1f')" % (len(realdup), dupdur / 60)).encode(sys.stderr.encoding)
    removesingulars(realdup)
    print >> sys.stderr, (u'  Remaining %d duplicate hash buckets' % (len(realdup))).encode(sys.stderr.encoding)
        
    # Continue separate logic for dup/sync mode
    if len(args) > 1:
        # here main logic: find differing locations on target
        ops = findwronglocations(realdup)
        showsingulars(singularsource, singulartarget)
        showactions(ops)
    else:
        showdups(realdup, 'Hash', True)
