#!/usr/bin/env python
# -*- coding: utf-8 -*-

##############################################################
#                     (C) SANDULEAC DAN                      #
#                           EVALUATOR                        #
#                 RELEASED UNDER GPL LICENCE                 #
#============================================================#


# DON'T MODIFY ANYTHING BELOW
# ---------------------------

from __future__ import with_statement
import sys, os, stat
import re
from operator import itemgetter
from os.path import join, isfile, isdir
import tempfile
# Shell utilities like copy, move etc
import shutil
from subprocess import Popen, PIPE
import cPickle
import itertools
from pprint import pprint

try: from itertools import izip_longest
except: from compat import izip_longest

from ctypes import *

# Manual compatibility library for lower Python versions
import compat

__all__ = ['EvalError', 'die', 'log_print', 'setup_output', 'Problem']

# default verbosity: 7 // print errors, warnings, notices

eval_options = {
    'args': [],
    'verbosity': 7,
    #'jrun': '/usr/local/bin/jrun',
    'chroot_args': ['--chroot', '--copy-libs']
}

defaults = {
    'time': 1000,
    'memory': 16384,
    'inf': '?-%s.in',
    'outf':'',
    # these two are here so they can be stored within the problem,
    # in case no others are specified
    'user_input': '%s.in',
    'user_output': '%s.out',
    'verif': '',
    'groups': '',
    'type': 'classical',
}

eval_status_default = {
    'tests_done': 0,
    'score': 0,
    'real_score': 0,
}

types = ('classical', 'output-only', 'interactive')

log_output = sys.stderr

class EvalError(Exception):
    def __init__(self, value):
        self.value = value
    def __str__(self):
        return (isinstance(self.value, str) \
            and self.value or str(self.value))

def die(message):
    'Raises an EvalError exception'
    raise EvalError(message)


def log_print(message, verbosity = 4):
    ''' Puts a message to the file referenced by global variable `log_output`,
        if that exists, but only if verbosity & eval_options['verbosity'] is not 0
        See setup_output for documentation regarding
    '''
    # If any of verbosity's flags match, then we'll print it
    if verbosity & eval_options['verbosity'] == 0: return
    if not isinstance(message, basestring):
        message = str(message)
    if log_output:
        print >>log_output, message.encode('utf-8')


def setup_output(verbosity = None, **args):
    '''Initializes eval_options['verbosity'] and global vars 'log_output'
    args: log_output_to -> will replace log_output
    Verbosity flags:
    1 = print error messages -> handled EvalErrors
    2 = print warning messages
    4 = print notice messages'''

    global log_output
    if 'log_output_to' in args:
        log_output = args['log_output_to']

    if verbosity is not None:
        eval_options['verbosity'] = verbosity



class s_results(Structure):
    _fields_ = [("verdict", c_char_p),
                ("message", c_char_p),
                ("time", c_int),
                ("memory", c_int)]



class Problem(object):

    def call_jrun(self):
        'Calls the eval method from jrun.so, using the args predetermined in the constructor'
        'This method gets generated in the constructor'
        pass

    def __init__(self, name = None, path = None, configure = False, repo = False, **args):
        ''' Problem constructor

            name: exact name of the problem, which appears in the tests too
            path: absolute or relative to cwd or to TestDBRoot (in the latter case, smart search can be used)
            repo: if True, force the use of repository (TestDBRoot is prepended to path)
            configure: if True, then make a smart path search, even when both path and name were specified
            args -> problem specific properties, such as time, memory, type, user-output, verif, ...
        '''
        # Maybe split this in two pieces, excluding the automatic configuration ?

        if not name and not path:
            raise EvalError('You must specify the name and/or the path')
        self.punctaje = {}
        # Group the specified test belongs to
        self.groupof = {}
        # Pairs of groups for tests
        self.groups = {}

        opts = self.opts = defaults.copy() # shallow copy, but only simple objects here
        # 0.64 - changed here to explicit parameters
        self.name = name
        self.path = path
        self.update(args)

        # FIXME: Poate ar trebui modificat si self.update() sa poata prelua
        # quiet si output din **args ... pana una alta ne multumim cu asta

        if not self.punctaje and self.opts['groups']:
            die('For now, you must provide tests.txt (real scores) in order to use groups')

        # facem recunoastere automata dupa name si path
        if configure or (not name or not path):
            self.configure(repo = repo)

        # Print_header won't do anything unless global open file 'output' is set
        self.print_header()

        """
        save_file = os.path.join(self.path, self.name + '.dat')
        # TODO: load file from `save_file` if dir's mtime is different that save_file's
        dir_mtime = os.stat(self.path).st_mtime
        pickle_mtime = os.stat(save_file).st_mtime
        if dir_mtime == pickle_mtime:
            print >>sys.stderr, 'Loading from file'
            self.load_from_file(save_file)
        else:
            if os.path.isfile(save_file):
                try:
                    os.remove(save_file)
                except OSError, e:
                    print >>sys.stderr, 'Could not remove save file in attempt to regenerate it'
        """
        # Continuing initialization procedure
        self.open_config()
        self.sanity_check()

        if self.opts['type'] in ('classical', 'output', 'output-only'):
            self.collect_tests()
        # what to do otherwise ?TODO

        # set up jrun
        self.jrun_args = args = map(str, ['ignored', '-p', self.name, '-m', opts['memory'], '-t', opts['time']] + eval_options['args'])

        self.JrunEvaluator = JrunLibrary.eval
        self.JrunEvaluator.restype = c_int
        self.JrunEvaluator.argtypes = [c_int, c_char_p*len(args), c_void_p]
        argv = (c_char_p*len(args)) (*args)
        argc = c_int(len(args))

        def gen_call_jrun(argc, argv):
            def call_jrun():
                results = s_results()
                self.JrunEvaluator(argc, argv, byref(results))
                return results
            return call_jrun

        self.call_jrun = gen_call_jrun(argc, argv)

        # Automatically save problem in test directory
        'self.save_to_file(save_file)'


    def update(self, args):
        # only allowed parameters, which are the ones from 'defaults'.
        for key, value in args.iteritems():
            if key not in defaults:
                die('Unknown parameter %s=%s' % (key, value))
            if not isinstance(value, type(defaults[key])):
                die('Wrong value type for %s = %s: correct is %s' % (key, value, str(type(defaults[key]))[7:-2]) )
            self.opts[key] = value
        return True
        #self.opts.update(args)


    def configure(self, repo = False):
        ''' What this does is to try to deduce missing parameters from the already given ones
            Very nifty piece of code
            It is usually called automatically in the constructor
            ** Ported in version 0.63 from parse_parameters to help give the program modularity
        '''

        opts = self.opts

        if self.path:
            # if the name's not given, derive it from the path
            self.name = self.name or os.path.basename(self.path.rstrip('/'))

            # CHANGED in 0.64: if path ends in / => it is a direct relative path from TestDBRoot
            if self.path.endswith('/'):
                self.path = join(TestDBRoot, self.path)

        else: # compute path for problem's test directory from name
            self.path = self.name

        # If can't find "%PATH" | "%PATH-teste" in current directory, search the repo
        pdirs = filter(isdir, [self.path, self.path + '-teste'])
        if repo or not pdirs:
            self.path = find_testdb_directory(TestDBRoot, self.path, depth=5)
        else:
            self.path = os.path.abspath(pdirs[0]) # Use first directory that matched
        if not self.path:
            die("Couldn't find the test directory for this problem, sorry")


    def open_config(self):
        opts = self.opts
        config = '%(path)s/%(name)s.cfg' % self.__dict__
        #PARAMETERS = ['inf', 'outf', 'time', 'memory', 'verif', 'groups']
        if not isfile(config):
            try:
                config = find(self.path, '.*\.cfg$', type='f').next()[0]
            except StopIteration: pass
        if isfile(config):
            # Could do without it...
            #log_print("Using config file `%s`" % os.path.basename(config), 4)
            f = open(config, 'r')
            for line in f:
                m = re.match(r'(\w+)\s*=\s*(\S+)', line)
                #print line
                if not m: continue
                key, value = m.groups()
                key = key.lower()
                if key in defaults and opts[key] == defaults.get(key):
                    # facem si typecasting ca sa nu-l ia string !!!
                    try:
                        opts[key] = type(opts[key])(value)
                        log_print('[config-file] %s=%s' % (key, value), 4)
                        # Verificatorul trebuie sa fie in directorul de configurare
                        if key == 'verif':
                            opts[key] = join(self.path, value)
                    except TypeError:
                        log_print("[config-file] Incorrect value for `%s', expected %s" % \
                                (key, type(opts[key])), 4)
                else:
                    if key not in defaults:
                        log_print('[config-file] Unknown parameter %s' % key, 4)
                    else:
                        log_print('[config-file] Parameter ignored: %s = %s' % (key,value), 4)
            f.close()

        # ==================================
        # fisierul tests.txt pentru punctaje
        # ==================================
        punc_file = join(self.path, 'tests.txt')
        if isfile(punc_file):
            try:
                for i in open(punc_file, 'r'):
                    id, score = i.split(' ')
                    self.punctaje[int(id)] = int(score)
            except (ValueError, KeyError), err:
                die('Parsing error in tests.txt: %s' % str(err))


    def sanity_check(self):
        opts = self.opts
        if opts['verif']:
            if isfile(opts['verif']):
                opts['verif'] = os.path.abspath(opts['verif'])
                # HACK to accept pyc scripts here
                if opts['verif'].endswith('.pyc'):
                    opts['verif'] = ['python', opts['verif']]
                else:
                    opts['verif'] = [opts['verif']]
            else:
                log_print("Couldn't find judge application `%s', falling back to lazy diff" % \
                        opts['verif'], 2)
                opts['verif'] = None

        global types
        if opts['type'] == 'output':
            opts['type'] = 'output-only'
        if opts['type'] not in types:
            die("Problem type must be one of: %s" % ', '.join(types))


    def __format_filename(self, filenames):
        if isinstance(filenames, dict):
            for k in filenames:
                tmp = filenames[k].replace('%s', self.name)
                tmp = re.escape(tmp)
                filenames[k] = re.sub(r'\\\?+', '(\d+)', tmp) + '$'
        elif isinstance(filenames, list):
            # modify a list in-place
            for k, elem in enumerate(filenames):
                tmp = elem.replace('%s', self.name)
                tmp = re.escape(tmp)
                filenames[k] = re.sub(r'\\\?+', '(\d+)', tmp) + '$'
        else: return NotImplemented
        return filenames


    def collect_tests(self):
        "Gather information about the problem's tests"

        opts = self.opts
        if not opts['outf']:
            if not opts['inf'].endswith('.in'):
                die("Couldn't autodetect ok-file format, please provide manually")
            opts['outf'] = opts['inf'][:-2] + 'ok'  #re.sub('\.in$', '.ok', opts['inf'])


        # test_files will contain the regex patterns for the test files
        test_files = { 'inf': opts['inf'], 'outf': opts['outf'] }
        # if we have an output-only problem, eliminate input tests
        if self.opts['type'] == 'output-only':
            del test_files['inf']

        self.__format_filename(test_files)

        if 'inf' in test_files:
            files_in = [(int(nr), test) \
                for test, nr in find(self.path, test_files['inf'], type='f')]
        files_out = dict((int(nr), test) \
            for test, nr in find(self.path, test_files['outf'], type='f'))

        #tests_d = dict( ( i[0], (i[1], files_out.get(i[0])) ) \
        #        for i in files_in )
        #tests = sorted(tests_d.items(), key = itg(0))
        if self.opts['type'] == 'classical':
            self.tests = sorted(
                ( (nr, (in_file, files_out.get(nr))) for nr, in_file in files_in),
                key = itemgetter(0)
            )
        elif self.opts['type'] == 'output-only':
            self.tests = sorted(files_out.items(), key=itemgetter(0))

        # This verification is only needed for classical problems
        if self.opts['type'] == 'classical' and not opts['verif']:
            for id, (fi, fo) in self.tests:
                if not fo:
                    die("For test %d, couldn't find ok-file in '%s'" % (id, self.path))

        if not self.tests:
            die("Couldn't find any test in tests directory. Test file format expected: `%s` & `%s`" % (opts['inf'], opts['outf']))

        self.nr_tests = len(self.tests)

        # Configure GROUPS
        group_id = 0
        for group in opts['groups'].split(','):
            if not group.strip(): continue
            try:
                left, right = map(int, group.split('-'))
            except ValueError, e:
                die('Bad groups format: %s' % str(e))
            # add new group (left, right)
            self.groups[group_id] = (left, right)
            for i in range(left, right+1):
                self.groupof[i] = group_id
            group_id += 1


    # Serialization framework
    # currently unoperational, need to figure out how to pickle an entire class
    # -------------------------------------------------------------------------

    def save_to_file(self, filename):
        try:
            cPickle.dump(self, open(filename, 'wb'), protocol=2)
        except (IOError, OSError), e:
            log_print('Could not save problem to file %s' % filename, 2) # warn
            raise e

    def load_from_file(self, filename):
        try:
            # FIXME: nu merge, mai facem asa at all ?
            self = cPickle.load(open(filename, 'rb'))
        except (IOError, OSError), e:
            die('Could not load problem from file %s' % filename)

    # -------------------------------------------------------------------------


    def compare(self, input, output, ok):
        """ compare(input, output, ok) -> (message, score out of score_max %) """
        opts = self.opts

        if not opts['verif']:
            # just do the diff, lazy diff, ignore all trailing whitespace
            with open(output, 'r') as output_file:
                with open(ok, 'r') as ok_file:
                    for line1, line2 in izip_longest(output_file, ok_file):
                        if line1.rstrip(' \n\r') != line2.rstrip(' \n\r'):
                            return 'Wrong Answer', 0
            return 'OK', 100

        else:
            #f, tmp = popen4('%s "%s" "%s" "%s"' % (opts['verif'], input, os.path.abspath(output), ok))
            command = opts['verif'] + [input, os.path.abspath(output), ok]
            f = Popen(command, stdout=PIPE).stdout
            ln = f.readlines()
            try:
                assert (len(ln) == 2)
                punctaj = int(ln[1])
            except:
                log_print("Dump - verifier program's output:", 1)
                log_print('\n'.join(ln), 1)
                die("Unexpected output format in verifier program")
            assert(0 <= punctaj <= 100)
            exitcode = os.WEXITSTATUS(f.close() or 0)
            if exitcode == 0:
                return ln[0].rstrip('\n'), punctaj
            else:
                return 'Verificator exitcode != 0', 0


    def __prerun_check(self, chroot):
        # can we chroot ?
        # os.popen('which "%(jrun)s"' % eval_options).readline()[:-1]

        # this doesn't serve anymore since we use a module
        '''
        jrun_stat = os.stat(eval_options['jrun'])
        jrun_uid = jrun_stat[stat.ST_UID]
        jrun_setuid = stat.S_ISUID & jrun_stat.st_mode
        #if eval_options.get('chroot'):
        if chroot:
            if (jrun_uid == 0 and jrun_setuid) or os.getuid() == 0:
                eval_options['args'].extend(eval_options['chroot_args'])
            else:
                die("Can not chroot, you are not root and jrun doesn't have the setuid bit set.")
        '''
        # TODO reimplement the prerun check, checking the script itself
        pass


    def evaluate(self, subject, **args):
        'Evaluates `subject`, which can be an executable or a directory filled only with outputs. Yields (eval_status, testdata)'
        subject = os.path.abspath(subject)

        if self.opts['type'] == 'classical':
            if not isfile(subject):
                die('The executable to be evaluated does not exist')
            self.__prerun_check(chroot = args.get('chroot'))
            return self.__run_tests_classical(subject, **args)

        elif self.opts['type'] == 'output-only':
            if not isdir(subject):
                die('The outputs directory to be evaluated does not exist')
            return self.__run_tests_output_only(subject, **args)

        elif self.opts['type'] == 'interactive':
            return self.__run_tests_interactive(subject, **args)


    class TestContextManager:
        """Creates a status object for one test, `testdata`, and updates the evaluation context to reflect the test's results
Evaluation context consists of:
 * context.eval_status (the progress and status of the evaluation)
 * context.group_score (the incomplete group scores, they span across multiple tests);
`context` is a holder object that accomodates any context vars needed here
Usage (from within Problem): with TestContextManager(self, test_no, context_object)"""
        def __init__(self, problem, test, context):
            self.problem = problem
            self.test = test
            self.s = context

        def __enter__(self):
            # TODO: make testdata reausable resource?
            testdata = {
                'id': self.test,
                'verdict': 'unknown',
                'group': self.problem.groupof.get(self.test),
            }
            testdata['group_range'] = self.problem.groups.get(testdata['group'])
            self.testdata = testdata
            return testdata

        def __exit__(self, type, value, traceback):
            if type in (KeyboardInterrupt,): # REMOVED EvalError, --> why?
                # Suppress the error
                return True
            elif type != None:
                # Let it pass, we'll handle the error in __run_tests_*
                return False

            s = self.s
            testdata = self.testdata

            # Test here to see what kind of scoring we have
            # assume testdata['score'] is already set
            # FIXME: maybe we should have one type of score only
            if len(self.problem.punctaje) > 0:
                score_max = self.problem.punctaje[self.test]
                testdata['real_score'] = int(round(score_max * testdata['score'] / 100.0))

            # HANDLE GROUPS (current percentual score)
            # -------------
            if self.problem.groupof:
                group = testdata['group']
                if group is None:
                    # poate trebuie sa explicitez score, sa nu mai existe decat real_score?
                    # For now, real_score always exists when we are using groups
                    # TODO: implement group scoring even when there's no tests.txt
                    testdata['group_score'] = testdata['real_score']
                    s.eval_status['real_score'] += testdata['group_score']
                else:
                    gr_l, gr_r = testdata['group_range']
                    tmp_score = s.group_score.get(group, 0)
                    if testdata['score'] < 100:
                        tmp_score = False
                    if tmp_score is not False:
                        tmp_score += testdata['real_score']
                    s.group_score[group] = tmp_score
                    #if gr_r > testdata['id']:
                    #    testdata['group_score'] = None
                    if gr_r == testdata['id']:
                        testdata['group_score'] = s.group_score[group] or 0
                        s.eval_status['real_score'] += testdata['group_score']
            else:
                if self.problem.punctaje: # (problem.) having_real_scores
                    s.eval_status['real_score'] += testdata['real_score']
                else:
                    s.eval_status['score'] += testdata['score']



    def __run_tests_interactive(self, subject, **kwargs):
        pass


    def __run_tests_output_only(self, subject, debug = False, user_output = None, **kwargs):
        '''Yields (eval_status, testdata), kwargs are just ignored'''
        opts = self.opts
        # Auto unpack the files used by the contestant's executable
        user_output = (user_output or opts['user_output']).replace('%s', self.name)
        # the user outputs are in the "subject" directory here
        user_output = join(subject, user_output)

        eval_status = eval_status_default.copy()
        eval_status['tests_total'] = self.nr_tests

        class Object(object): pass
        context = Object()
        context.group_score = {}
        # make a symlink in `context`
        context.eval_status = eval_status

        for test_crt, (test, outf) in enumerate(self.tests):
            testdata = { 'id': test, 'verdict': 'unknown'}
            eval_status['tests_done'] = test_crt

            with self.TestContextManager(self, test, context) as testdata:
                tmp_user_output = user_output.replace('?', str(test))
                testdata['verdict'], testdata['score'] = \
                    self.compare(input = '', output = tmp_user_output, ok = outf)

            yield (eval_status, testdata)


    def __run_tests_classical(self, executable, debug = False, chroot = False, user_output = None, user_input = None, **kwargs):
        #'''Yields (testdata, (test_crt, nr_tests))'''
        # Now, the scoring (and handling of groups) is carried away here
        '''Yields (eval_status, testdata), kwargs are just ignored'''

        opts = self.opts
        workdir = tempfile.mkdtemp()
        current_directory = os.getcwd()

        eval_status = eval_status_default.copy()
        eval_status['tests_total'] = self.nr_tests

        class Object(object): pass
        context = Object()
        context.group_score = {}
        # make a symlink in `context`
        context.eval_status = eval_status

        try:
            shutil.copy(executable, join(workdir, self.name))
        except IOError, err:
            die(str(err))

        # Auto unpack the files used by the contestant's executable
        user_input = (user_input or opts['user_input']).replace('%s', self.name)
        user_output = (user_output or opts['user_output']).replace('%s', self.name)

        try:
            for test_crt, (test, (inf, outf)) in enumerate(self.tests):
                eval_status['tests_done'] = test_crt
                os.chdir(workdir)

                unixize(inf, user_input)
                if isfile(user_output):
                    os.remove(user_output)

                with self.TestContextManager(self, test, context) as testdata:
                    '''
                    command = map(str, [eval_options['jrun'], '-p', self.name, '-m', opts['memory'], '-t', opts['time']] \
                        + eval_options['args'])
                    child_stdout = Popen(command, stdout = PIPE).communicate() [0]
                    rez = child_stdout.strip().split(': ')
                    ##child.wait()
                    ##rez = child.stdout.readline().strip().split(': ')

                    # Parsing jrun's output nicely
                    time, memory = re.match('time\s+(\w+)\s+memory\s+(\w+)', rez[1], re.I).groups()
                    testdata['verdict'] = rez[0]
                    testdata['jrun_message'] = rez[2]
                    '''
                    # NEW behaviour, using jrun module
                    results = self.call_jrun()
                    testdata['time'] = '{0}ms'.format(results.time)
                    testdata['memory'] = '{0}kb'.format(results.memory)
                    #mesaj.extend([time, memory] + rez[2:])
                    testdata['verdict'] = results.verdict
                    testdata['jrun_message'] = results.message

                    # Adapt verdict from jrun_message
                    if testdata['verdict'] == 'FAIL':
                        testdata['verdict'] = self.__adapt_message(testdata['jrun_message'])

                    # daca a rulat cu succes, testam daca nu cumva da `Gresit'
                    score = 0
                    if testdata['verdict'] == 'OK':
                        msg, score = self.compare(input = inf, output = user_output, ok = outf)
                        testdata['verdict'] = msg
                        if score < 100 and isfile(user_output) and debug:
                            # Maybe cleanup a bit here, use tmp = proc.communicate('\n'.join(unixize_iter(outf)))[0]
                            command = ['diff', '-pruN', user_output, '-']
                            proc = Popen(command, stdout=PIPE, stdin=PIPE)
                            fo, fi = proc.stdin, proc.stdout
                            fo.writelines(unixize_iter(outf))
                            fo.close()
                            testdata['debug_data'] = fi.readlines() #''.join(fi)
                            fi.close()

                            # save to a file in debug directory
                            D = join(current_directory, self.name + '-debug')
                            if not os.path.isdir(D):
                                os.mkdir(D)
                            shutil.copy(outf, join(D, '%d.ok' % testdata['id']))
                            shutil.copy(user_output, join(D, '%d.out' % testdata['id']))
                    testdata['score'] = score

                os.chdir(current_directory)
                yield (eval_status, testdata)

        # TODO: transform any uncaught exceptions to EvalError in such a try..finally seq, like here
        # OR.... do it in TestContextManager.__exit__ ??
        except Exception, e:
            raise EvalError(e)

        finally:
            try:
                os.chdir(current_directory)
            except: pass
            try:
                shutil.rmtree(workdir)
            except OSError, e:
                log_print("Notice: couldn't dispose of workdir %s: %s" % (workdir, e), 4)


    def print_header(self):
        log_print("Engine loaded task at `%s'" % self.path, 4)
        log_print("Task name: %s" % self.name, 4)


    def __str__(self):
        return '<Problem %(name)s, path="%(path)s">' % self.__dict__

    def __repr__(self):
        return 'Problem(name="%(name)s", path="%(path)s")' % self.__dict__


    def __adapt_message(self, str):
        # TODO: widen this function
        if str == 'Time limit exceeded.':
            return 'TLE'
        elif str == 'Wall time limit exceeded.':
            return 'WTLE'
        elif str == 'Memory limit exceeded.':
            return 'MLE'
        return str


# This method seems bulky
# FIXME: consider using os.walk ? --> seems slower ! (see below)
'''
def find(root, name, depth = 0, icase = True, type = 'df'):
    """Does a re.match() to test `name' against file_name"""
    it = os.walk(root)
    for path, dirs, files in it:
        dep = path.count('/')
        T = []
        if 'f' in type: T.append(files)
        if 'd' in type: T.append(dirs)
        for i in itertools.chain(*T):
            m = re.match(name, i, icase and re.I)
            if not m: continue
            fullpath = join(path, i)
            yield (fullpath, m.groups() and m.groups()[-1])
        if dep >= depth:
            del dirs

'''
def find(root, name, depth = 1, icase = True, type = 'df'):
    """Does a re.match() to test `name' against file_name"""
    dirs = [[]] * 2
    if not isdir(root): return
    dirs[0] = [root]
    match = re.compile(name, icase and re.I).match
    for nv in range(depth):
        dirs[1] = []
        for dir in dirs[0]:
            if nv < depth-1:
                dirs[1].extend(
                    filter(isdir,
                        map(lambda f: join(dir, f), os.listdir(dir)),
                    )
                )
            for file in os.listdir(dir):
                m = match(file)
                if not m: continue
                fullpath = join(dir, file)
                tip = isfile(fullpath) and 'f' or 'd'
                if tip in type:
                    yield (fullpath, m.groups() and m.groups()[-1])
        dirs[0] = dirs[1]


def find_testdb_directory(root, path, depth = 5):
    """Wrapper to find. Locates test db directory *somewhere* in root+path1. Its name: path2=ignorecase
    path = (.*/)?(.*) -> (path1, path2)"""
    path1, path2 = re.match("(.*/)?(.*)", path).groups()
    # Changed in 0.62
    # Accept -teste suffix too and DON'T allow partial problem name matches
    path2 += "(-teste)?$"
    results = find(join(root, path1 or ''), path2, depth=depth, type = 'd')
    testdir = None
    for i in results:
        if testdir is not None:
            die('Too many test directories found, be more specific by using a partial prefix path\n' + 
            ("Option 1: '%s'\n" % testdir) +
            ("Option 2: '%s'\n" % i[0]) +
            '(maybe more)')
        testdir = i[0]
    return testdir

def unixize_iter(a):
    "Dos2unix done in python"
    f = open(a, 'rt')
    for line in f:
        yield line.replace('\r', '')

def unixize(a, b):
    "Dos2unix done in python"
    f = open(a, 'rt')
    with open(b, 'wt') as g:
        for line in f:
            g.write(line.replace('\r', ''))


# initialization - import config.py

try:
    from config import TestDBRoot
    from config import eval_options as eval_options_config
    assert(isinstance(eval_options_config, dict))
    eval_options.update(eval_options_config)
except ImportError, e:
    raise ImportError('evalcore: can not parse config.py.\nReason: %s' % str(e))

# make jrun and TestDBRoot have an absolute path, this is needed so that __run_tests doesn't try to run it relative to the working dir (while evaluating)

#eval_options['jrun'] = os.path.abspath(eval_options['jrun'])
TestDBRoot = os.path.abspath(TestDBRoot)

# load the Jrun eval function
# TODO how to use the `import jrun` method?
JrunLibrary = cdll.LoadLibrary(join(os.path.dirname(__file__), '../jrun.so'))
