#!/usr/bin/env python
 # -*- coding: utf-8 -*-

##############################################################
#                     (C) SANDULEAC DAN                      #
#                         EVALUATOR                          #
#                 <<  Console interface  >>                  #
#                 RELEASED UNDER GPL LICENCE                 #
##############################################################

import evalcore
import sys, os, re
from os.path import join, isfile, isdir, basename
import itertools
import math

try: from itertools import izip_longest
except: from compat import izip_longest

__all__ = ['parse_parameters', 'print_results', 'configure_output']

# Don't modify anything below
# To configure test db directory, use config.py
# ------------------------------------------------------------
# some constants defined here

_ESC = chr(27)
_colors = {
    'green' : _ESC+'[1;32m',
    'green_i' : _ESC+'[0;32m',
    'red'   : _ESC+'[1;31m',
    'else'  : _ESC+'[1;35m',
    'whiter': _ESC+'[1;37m',
    'normal': _ESC+'[m'
}

def _colorize(msg, color):
    return '{0}{1}{2}'.format(_colors[color], msg, _colors['normal'])

log_print = evalcore.log_print
_die = evalcore.die

# -----------------------------------------------------
# OUTPUT
# -----------------------------------------------------

main_output = sys.stdout

def main_print(message):
    '''Print "evaluation table" line.
    This is always written to file referenced by global variable `main_output`, if set and valid'''
    if main_output:
        print >>main_output, message.encode('utf-8')

# -----------------------------------------------------

def parse_parameters(argv):
    '''Parses shell parameters
    Returns tuple (params, setup_params, options)
    `params` are per-student (or more precisely "per-evaluation") parameters
    `setup_params` are one-time parameters used to configure the behaviour of evalcore
    `options` are problem-specific parameters
    '''
    from optparse import OptionParser, OptionGroup
    parser = OptionParser(usage="%prog <executable/outputs_dir> [options]",
        description="Evaluator console interface for evaluating a single piece on code on one task. Tasks can be classical, output-only or interactive, as specified by `type` in the task's config file", version="Eval2 0.65")
    rules = [
        (['-p', '--prob'], {'metavar':'...', 'dest':'path', 'help':"Path to \
problem's directory. If not given, it will be derived from executable's name"}),
        (['-n', '--name'], {'dest':'name', 'help':"Exact problem name, which dictates the format for the tests and user output; automatically derived from path's basename if null"}),
        (['-d'],           {'dest':'debug', 'action':"store_true", 'default':False, 'help':'DEBUG: Create diff files \
for tests failed with WA in a directory named "<problem>.debug"'}),
        (['-r', '--repo'], {'action': 'store_true', 'default':False, 'help':'Force me to use test repository. '+
                            "If not used, I will check for problem's test directory in current directory too " +
                            "(as '<problem>-teste' or '<problem>')"}),
        (['-i', '--inf'],  {'help':'''Input file name pattern for tests (default: %default)
                                   %s expands into problem name, ? into test id'''}),
        (['-o', '--outf'], {'help':'OK file name pattern for tests'}),
        (['--user-in'], {'metavar':'INPUT', 'dest':'user_input', 'help':'User input file name pattern, default: %default'}),
        (['--user-out'], {'metavar':'OUTPUT', 'dest':'user_output', 'help':'User output file name pattern, default: %default for classical problems and: ?-%s.out for output-only problems', 'default': '%s.out'}),
        (['--verif'],      {'help':'''Judge program
                                   Parameters: <JUDGE> <infile> <outfile> <ok-file>
                                   Returns: "message\\n(percentage of score awarded)"
                                   Example: WA\\n0 ; or: OK\\n100'''})
    ]
    jrun_options = [
        (['-t', '--time'], {'type':'int', 'metavar': 'ms', 'help':'Time limit in milliseconds'}),
        (['-m', '--memory'], {'metavar':'kB', 'type':"int", 'help':'Memory limit in KB'}),
        (['--chroot'],     {'action':'store_true', 'default':False,
                            'help':'Use chroot (jrun should have setuid/you should be root). ' +
                            'Commands given to jrun: %s' % evalcore.eval_options['chroot_args']}),
    ]
    output_verbosity_rules = [
        (['-s', '--suppress'], {'dest':'suppress', 'action':'store_true', 'default':False, 'help':"Don't print the table"}),
        (['-q'], dict(dest='n_quiet', action='count', default=0, help="Be more quiet with log messages")),
        (['-v'], dict(dest='n_verbose', action='count', default=0, help="Be more verbose with log messages (errors, warnings, notices. Default is to show only errors and warnings)")),
        (['--output'], {'dest':'output', 'metavar':'FILE', 'help':'Output evaluation table to this file. Use value "." to output to same file as log'}),
        (['--log-output'], {'dest':'log_output', 'metavar':'FILE', 'help':'Output log messages to this file. Use value "." to output to same file as table'}),
    ]
    for i, j in rules: parser.add_option(*i, **j)

    group = OptionGroup(parser, "Flags passed to jrun")
    for i, j in jrun_options: group.add_option(*i, **j)
    parser.add_option_group(group)

    group = OptionGroup(parser, "Options related to output and verbosity")
    for i, j in output_verbosity_rules: group.add_option(*i, **j)
    parser.add_option_group(group)

    parser.set_defaults(**evalcore.defaults)
    (opts, args) = parser.parse_args(argv)
    if len(args) < 2:
        _die("You didn't specify the executable/output-dir to evaluate. See '%s --help'" % os.path.basename(sys.argv[0]))
    if len(args) > 2:
        _die("Unexpected parameter '%s'" % args[2])

    # Setting the verbosity according to the difference in flags
    # This is a really nasty hack, but what can I do ...
    default_verbosity = int(math.log(evalcore.eval_options['verbosity'] + 1, 2))
    min_level = 0 # we can quiet it so much that nothing will appear anymore, not even errors
    opts.verbosity = (1 << (max(min_level, opts.n_verbose - opts.n_quiet + default_verbosity))) - 1

    params_list = ('debug', 'chroot', 'user_input', 'user_output')
    params = dict((i, getattr(opts, i)) for i in params_list)
    setup_params_list = ('verbosity', 'suppress', 'output', 'log_output')
    setup_params = dict((i, getattr(opts, i)) for i in setup_params_list)
    options_list = ('inf', 'outf', 'name', 'path', 'memory', 'time', 'verif', 'repo')
    options = dict((i, getattr(opts, i)) for i in options_list)

    params['subject'] = re.sub('^./', '', args[1])

    return (params, setup_params, options)


__headers = {
    'id': 'Test',
    'group_score': 'Group',
    'real_score': 'Score',
    'time': 'Time',
    'memory': 'Memory',
    'verdict': 'Verdict'
}
__lens = {
    'id': 6,
    'group_score': 5,
    'real_score': 5,
    'time': 10,
    'memory': 10,
    'verdict': 13,
}

def print_results(problem, subject, **kwargs):
    ''' Evaluation function for the console interface
        Calls evaluate_tester
        **kwargs -> only among: ['debug' (Bool), 'chroot' (Bool)]
        also in kwargs: params related to what and where to output
    '''
    # HACK to get chroot and debug parameters to evaluate()
    #for i in ('chroot', 'debug'):
    #    if i not in params: continue
    #    kwargs[i] = params[i]
    # Initialize problem
    tester = problem.evaluate(subject, **kwargs)

    console_eval = evaluate_tests_iter(problem, **kwargs)
    # initialize the evaluator
    console_eval.send(None)

    for i in tester:
        console_eval.send(i)
    # evaluate_tests_iter yields contestant's score in the end
    # this is so the while loop breaks
    console_eval.next()
    # this doesn't generate StopIteration since there's an additional yield there
    return console_eval.next()


def evaluate_tests_iter(problem, **kwargs):

    # Display table if we have an output to print to
    display_table = bool(main_output)
    # when visual notice?
    # - output is not stdout
    # - quiet (output is None)
    # - output is lacking (that's never true anymore)

    #visual_notice = not hasattr(evalcore, 'output') or evalcore.output is not sys.stdout
    visual_notice = main_output not in (sys.stdout, sys.stderr)

    having_groups = problem.groupof
    having_real_scores = problem.punctaje

    columns = ['id'] + \
        (having_real_scores and ['real_score'] or []) + \
        (having_groups and ['group_score'] or []) + \
        ['time', 'memory', 'verdict']

    if problem.opts['type'] == 'output-only':
        del columns[-3:-1]

    # evaluate() will automatically raise an exception if there are no tests

    eval_status = None
    while 1:
        # grab an evaluation message for a test
        Message = yield
        # we've been signaled end of input
        if Message is None: break
        eval_status, test = Message

        if visual_notice:
            sys.stdout.write('%d ' % test['id'])
            sys.stdout.flush()

        content = {}

        if having_groups:
            content['group_score'] = test.get('group_score', unichr(8595))

        if display_table:
            # 'verdict' moved here as it is adapted in evalcore now (0.64)
            for i in ('id', 'real_score', 'time', 'memory', 'verdict'):
                content[i] = test.get(i)

            # Baga cap de tabel, daca suntem la primul test
            if eval_status['tests_done'] == 0:
                line = [__headers[i].ljust(__lens[i]) for i in columns]
                main_print(' '.join(line))
                line = ['-'*__lens[i] for i in columns]
                main_print(' '.join(line))

            line = [unicode(content[i]).ljust(__lens[i]) for i in columns]
            # Colorization. still a tad Hacky!
            if main_output.isatty():
                # Assign the appropriate color
                highlight = test['score'] == 0 and 'red' or \
                        test['score'] == 100 and 'green' or \
                        'green_i'

                line[-1] = _colorize(line[-1], highlight)
                if having_real_scores:
                    line[1] = highlight == 'green' and _colorize(str(line[1]), 'whiter') or str(line[1])

            main_print(' '.join(line))

            # Save debug refernce
            if 'debug_data' in test:
                path1 = problem.name + '-debug'
                path2 = '%d.diff' % test['id']
                try:
                    if not os.path.isdir(path1):
                        os.mkdir(path1)
                    open(join(path1, path2), 'wt').write(''.join(test['debug_data']))
                except (OSError, IOError), e:
                    log_print("Couldn't write debug data: " + str(e), 1)

    # If no tests succeded (exception in evalcore occured), we should end this here so that exceptions from evalcore propagate
    if eval_status is None:
        return

    if visual_notice and sys.stdout.isatty():
        print

    puncte = eval_status['real_score'] or \
        int(round(eval_status['score']/eval_status['tests_total']))

    # Table "ending"
    main_print(' '.join('-'*__lens[i] for i in columns))

    # Afisarea scorului tot in tabel !
    content = {'id': 'Total'}
    if having_real_scores:
        if having_groups:
            content['group_score'] = puncte
        else:
            content['real_score'] = puncte
        line = [str(content.get(i, '')).ljust(__lens[i]) for i in columns]
        main_print(' '.join(line))
    else:
        main_print('Total: %d' % puncte)


    if visual_notice and __name__ == "__main__":
        print puncte

    # Return the score to pass it on
    yield puncte
    # To hide StopIteration error!
    yield


def configure_output(suppress = None, verbosity = None, log_output = None, output = None):
    '''Configure the output mechanism of evalcore (log_output, verbosity) and consoleapp (output)'''

    global main_output

    def configure_output_file(output_to, dest):
        'Helper function for setup_output'

        if not output_to:
            # Return the already set up output
            return dest
        else:
            try:
                return open(output_to, 'wt')
            except IOError, e:
                # This gets to stderr if log_output file can't be opened. That's okay.
                log_print("Can't open desired output file: %s" % output_to, 1)
                raise e

    # Really NASTY output magic

    if log_output == output == '.':
        _die("Kindly specify a file name for at least one of the outputs, don't set them both to '.'")

    if log_output == '.':
        out = log_out = configure_output_file(output, main_output)
    elif output == '.':
        out = log_out = configure_output_file(log_output, evalcore.log_output)
    else:
        log_out = configure_output_file(log_output, evalcore.log_output)
        out = configure_output_file(output, main_output)

    main_output = out

    # verbosity, log_output_to. That's all it needs
    evalcore.setup_output(verbosity = verbosity, log_output_to = log_out)

    # If and only if we haven't declared some other file (or object) to output the table to
    # We may suppress the table output
    if main_output is sys.stdout and suppress:
       main_output = None


def main(argv = ['.']):
    'Parse parameters (argv or sys.argv) and evaluate one single executable'

    params, setup_params, options = parse_parameters(argv)
    subject = params['subject']

    # Daca este specificat doar executabilul
    if not options.get('path') and not options.get('name'):
        options['name'] = os.path.basename(subject)

    # Shortcut. Don't initialize object if it's just a one-time failed run
    # Problem.evaluate() makes this check too
    if not isfile(subject):
        _die('The executable to be evaluated does not exist:\n%s' % os.path.abspath(subject))

    configure_output(**setup_params)

    problem = evalcore.Problem(configure = True, **options)
    print_results(problem, **params)


def main_handled(argv):
    try:
        return main(argv)
    except evalcore.EvalError, e:
        log_print(e, 1)
    except IOError, e:
        log_print('IOError ' + str(e), 1)
    except KeyboardInterrupt:
        print >>sys.stderr, "\nWe've been interrupted, cleaning up"
    return 1

if __name__ == "__main__":
    sys.exit(main_handled(sys.argv))
